repo_name
stringlengths 6
97
| path
stringlengths 3
341
| text
stringlengths 8
1.02M
|
|---|---|---|
Paul-Verardi/nipyapi
|
nipyapi/registry/__init__.py
|
# coding: utf-8
"""
NiFi Registry REST API
The REST API provides an interface to a registry with operations for saving, versioning, reading NiFi flows and components.
OpenAPI spec version: 0.2.0
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import models into sdk package
from .models.access_policy import AccessPolicy
from .models.access_policy_summary import AccessPolicySummary
from .models.batch_size import BatchSize
from .models.bucket import Bucket
from .models.bucket_item import BucketItem
from .models.bundle import Bundle
from .models.component_difference import ComponentDifference
from .models.component_difference_group import ComponentDifferenceGroup
from .models.connectable_component import ConnectableComponent
from .models.controller_service_api import ControllerServiceAPI
from .models.current_user import CurrentUser
from .models.fields import Fields
from .models.link import Link
from .models.permissions import Permissions
from .models.position import Position
from .models.resource import Resource
from .models.resource_permissions import ResourcePermissions
from .models.tenant import Tenant
from .models.uri_builder import UriBuilder
from .models.user import User
from .models.user_group import UserGroup
from .models.versioned_connection import VersionedConnection
from .models.versioned_controller_service import VersionedControllerService
from .models.versioned_flow import VersionedFlow
from .models.versioned_flow_coordinates import VersionedFlowCoordinates
from .models.versioned_flow_difference import VersionedFlowDifference
from .models.versioned_flow_snapshot import VersionedFlowSnapshot
from .models.versioned_flow_snapshot_metadata import VersionedFlowSnapshotMetadata
from .models.versioned_funnel import VersionedFunnel
from .models.versioned_label import VersionedLabel
from .models.versioned_port import VersionedPort
from .models.versioned_process_group import VersionedProcessGroup
from .models.versioned_processor import VersionedProcessor
from .models.versioned_property_descriptor import VersionedPropertyDescriptor
from .models.versioned_remote_group_port import VersionedRemoteGroupPort
from .models.versioned_remote_process_group import VersionedRemoteProcessGroup
# import apis into sdk package
from .apis.access_api import AccessApi
from .apis.bucket_flows_api import BucketFlowsApi
from .apis.buckets_api import BucketsApi
from .apis.flows_api import FlowsApi
from .apis.items_api import ItemsApi
from .apis.policies_api import PoliciesApi
from .apis.tenants_api import TenantsApi
# import ApiClient
from .api_client import ApiClient
from .configuration import Configuration
configuration = Configuration()
|
Paul-Verardi/nipyapi
|
nipyapi/nifi/models/search_results_dto.py
|
# coding: utf-8
"""
NiFi Rest Api
The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service.
OpenAPI spec version: 1.7.1
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class SearchResultsDTO(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'processor_results': 'list[ComponentSearchResultDTO]',
'connection_results': 'list[ComponentSearchResultDTO]',
'process_group_results': 'list[ComponentSearchResultDTO]',
'input_port_results': 'list[ComponentSearchResultDTO]',
'output_port_results': 'list[ComponentSearchResultDTO]',
'remote_process_group_results': 'list[ComponentSearchResultDTO]',
'funnel_results': 'list[ComponentSearchResultDTO]'
}
attribute_map = {
'processor_results': 'processorResults',
'connection_results': 'connectionResults',
'process_group_results': 'processGroupResults',
'input_port_results': 'inputPortResults',
'output_port_results': 'outputPortResults',
'remote_process_group_results': 'remoteProcessGroupResults',
'funnel_results': 'funnelResults'
}
def __init__(self, processor_results=None, connection_results=None, process_group_results=None, input_port_results=None, output_port_results=None, remote_process_group_results=None, funnel_results=None):
"""
SearchResultsDTO - a model defined in Swagger
"""
self._processor_results = None
self._connection_results = None
self._process_group_results = None
self._input_port_results = None
self._output_port_results = None
self._remote_process_group_results = None
self._funnel_results = None
if processor_results is not None:
self.processor_results = processor_results
if connection_results is not None:
self.connection_results = connection_results
if process_group_results is not None:
self.process_group_results = process_group_results
if input_port_results is not None:
self.input_port_results = input_port_results
if output_port_results is not None:
self.output_port_results = output_port_results
if remote_process_group_results is not None:
self.remote_process_group_results = remote_process_group_results
if funnel_results is not None:
self.funnel_results = funnel_results
@property
def processor_results(self):
"""
Gets the processor_results of this SearchResultsDTO.
The processors that matched the search.
:return: The processor_results of this SearchResultsDTO.
:rtype: list[ComponentSearchResultDTO]
"""
return self._processor_results
@processor_results.setter
def processor_results(self, processor_results):
"""
Sets the processor_results of this SearchResultsDTO.
The processors that matched the search.
:param processor_results: The processor_results of this SearchResultsDTO.
:type: list[ComponentSearchResultDTO]
"""
self._processor_results = processor_results
@property
def connection_results(self):
"""
Gets the connection_results of this SearchResultsDTO.
The connections that matched the search.
:return: The connection_results of this SearchResultsDTO.
:rtype: list[ComponentSearchResultDTO]
"""
return self._connection_results
@connection_results.setter
def connection_results(self, connection_results):
"""
Sets the connection_results of this SearchResultsDTO.
The connections that matched the search.
:param connection_results: The connection_results of this SearchResultsDTO.
:type: list[ComponentSearchResultDTO]
"""
self._connection_results = connection_results
@property
def process_group_results(self):
"""
Gets the process_group_results of this SearchResultsDTO.
The process groups that matched the search.
:return: The process_group_results of this SearchResultsDTO.
:rtype: list[ComponentSearchResultDTO]
"""
return self._process_group_results
@process_group_results.setter
def process_group_results(self, process_group_results):
"""
Sets the process_group_results of this SearchResultsDTO.
The process groups that matched the search.
:param process_group_results: The process_group_results of this SearchResultsDTO.
:type: list[ComponentSearchResultDTO]
"""
self._process_group_results = process_group_results
@property
def input_port_results(self):
"""
Gets the input_port_results of this SearchResultsDTO.
The input ports that matched the search.
:return: The input_port_results of this SearchResultsDTO.
:rtype: list[ComponentSearchResultDTO]
"""
return self._input_port_results
@input_port_results.setter
def input_port_results(self, input_port_results):
"""
Sets the input_port_results of this SearchResultsDTO.
The input ports that matched the search.
:param input_port_results: The input_port_results of this SearchResultsDTO.
:type: list[ComponentSearchResultDTO]
"""
self._input_port_results = input_port_results
@property
def output_port_results(self):
"""
Gets the output_port_results of this SearchResultsDTO.
The output ports that matched the search.
:return: The output_port_results of this SearchResultsDTO.
:rtype: list[ComponentSearchResultDTO]
"""
return self._output_port_results
@output_port_results.setter
def output_port_results(self, output_port_results):
"""
Sets the output_port_results of this SearchResultsDTO.
The output ports that matched the search.
:param output_port_results: The output_port_results of this SearchResultsDTO.
:type: list[ComponentSearchResultDTO]
"""
self._output_port_results = output_port_results
@property
def remote_process_group_results(self):
"""
Gets the remote_process_group_results of this SearchResultsDTO.
The remote process groups that matched the search.
:return: The remote_process_group_results of this SearchResultsDTO.
:rtype: list[ComponentSearchResultDTO]
"""
return self._remote_process_group_results
@remote_process_group_results.setter
def remote_process_group_results(self, remote_process_group_results):
"""
Sets the remote_process_group_results of this SearchResultsDTO.
The remote process groups that matched the search.
:param remote_process_group_results: The remote_process_group_results of this SearchResultsDTO.
:type: list[ComponentSearchResultDTO]
"""
self._remote_process_group_results = remote_process_group_results
@property
def funnel_results(self):
"""
Gets the funnel_results of this SearchResultsDTO.
The funnels that matched the search.
:return: The funnel_results of this SearchResultsDTO.
:rtype: list[ComponentSearchResultDTO]
"""
return self._funnel_results
@funnel_results.setter
def funnel_results(self, funnel_results):
"""
Sets the funnel_results of this SearchResultsDTO.
The funnels that matched the search.
:param funnel_results: The funnel_results of this SearchResultsDTO.
:type: list[ComponentSearchResultDTO]
"""
self._funnel_results = funnel_results
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, SearchResultsDTO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
Paul-Verardi/nipyapi
|
nipyapi/nifi/models/bulletin_entity.py
|
# coding: utf-8
"""
NiFi Rest Api
The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service.
OpenAPI spec version: 1.7.1
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class BulletinEntity(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'int',
'group_id': 'str',
'source_id': 'str',
'timestamp': 'str',
'node_address': 'str',
'can_read': 'bool',
'bulletin': 'BulletinDTO'
}
attribute_map = {
'id': 'id',
'group_id': 'groupId',
'source_id': 'sourceId',
'timestamp': 'timestamp',
'node_address': 'nodeAddress',
'can_read': 'canRead',
'bulletin': 'bulletin'
}
def __init__(self, id=None, group_id=None, source_id=None, timestamp=None, node_address=None, can_read=None, bulletin=None):
"""
BulletinEntity - a model defined in Swagger
"""
self._id = None
self._group_id = None
self._source_id = None
self._timestamp = None
self._node_address = None
self._can_read = None
self._bulletin = None
if id is not None:
self.id = id
if group_id is not None:
self.group_id = group_id
if source_id is not None:
self.source_id = source_id
if timestamp is not None:
self.timestamp = timestamp
if node_address is not None:
self.node_address = node_address
if can_read is not None:
self.can_read = can_read
if bulletin is not None:
self.bulletin = bulletin
@property
def id(self):
"""
Gets the id of this BulletinEntity.
:return: The id of this BulletinEntity.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this BulletinEntity.
:param id: The id of this BulletinEntity.
:type: int
"""
self._id = id
@property
def group_id(self):
"""
Gets the group_id of this BulletinEntity.
:return: The group_id of this BulletinEntity.
:rtype: str
"""
return self._group_id
@group_id.setter
def group_id(self, group_id):
"""
Sets the group_id of this BulletinEntity.
:param group_id: The group_id of this BulletinEntity.
:type: str
"""
self._group_id = group_id
@property
def source_id(self):
"""
Gets the source_id of this BulletinEntity.
:return: The source_id of this BulletinEntity.
:rtype: str
"""
return self._source_id
@source_id.setter
def source_id(self, source_id):
"""
Sets the source_id of this BulletinEntity.
:param source_id: The source_id of this BulletinEntity.
:type: str
"""
self._source_id = source_id
@property
def timestamp(self):
"""
Gets the timestamp of this BulletinEntity.
When this bulletin was generated.
:return: The timestamp of this BulletinEntity.
:rtype: str
"""
return self._timestamp
@timestamp.setter
def timestamp(self, timestamp):
"""
Sets the timestamp of this BulletinEntity.
When this bulletin was generated.
:param timestamp: The timestamp of this BulletinEntity.
:type: str
"""
self._timestamp = timestamp
@property
def node_address(self):
"""
Gets the node_address of this BulletinEntity.
:return: The node_address of this BulletinEntity.
:rtype: str
"""
return self._node_address
@node_address.setter
def node_address(self, node_address):
"""
Sets the node_address of this BulletinEntity.
:param node_address: The node_address of this BulletinEntity.
:type: str
"""
self._node_address = node_address
@property
def can_read(self):
"""
Gets the can_read of this BulletinEntity.
Indicates whether the user can read a given resource.
:return: The can_read of this BulletinEntity.
:rtype: bool
"""
return self._can_read
@can_read.setter
def can_read(self, can_read):
"""
Sets the can_read of this BulletinEntity.
Indicates whether the user can read a given resource.
:param can_read: The can_read of this BulletinEntity.
:type: bool
"""
self._can_read = can_read
@property
def bulletin(self):
"""
Gets the bulletin of this BulletinEntity.
:return: The bulletin of this BulletinEntity.
:rtype: BulletinDTO
"""
return self._bulletin
@bulletin.setter
def bulletin(self, bulletin):
"""
Sets the bulletin of this BulletinEntity.
:param bulletin: The bulletin of this BulletinEntity.
:type: BulletinDTO
"""
self._bulletin = bulletin
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, BulletinEntity):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
Paul-Verardi/nipyapi
|
nipyapi/nifi/models/status_history_dto.py
|
# coding: utf-8
"""
NiFi Rest Api
The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service.
OpenAPI spec version: 1.7.1
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class StatusHistoryDTO(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'generated': 'str',
'component_details': 'dict(str, str)',
'field_descriptors': 'list[StatusDescriptorDTO]',
'aggregate_snapshots': 'list[StatusSnapshotDTO]',
'node_snapshots': 'list[NodeStatusSnapshotsDTO]'
}
attribute_map = {
'generated': 'generated',
'component_details': 'componentDetails',
'field_descriptors': 'fieldDescriptors',
'aggregate_snapshots': 'aggregateSnapshots',
'node_snapshots': 'nodeSnapshots'
}
def __init__(self, generated=None, component_details=None, field_descriptors=None, aggregate_snapshots=None, node_snapshots=None):
"""
StatusHistoryDTO - a model defined in Swagger
"""
self._generated = None
self._component_details = None
self._field_descriptors = None
self._aggregate_snapshots = None
self._node_snapshots = None
if generated is not None:
self.generated = generated
if component_details is not None:
self.component_details = component_details
if field_descriptors is not None:
self.field_descriptors = field_descriptors
if aggregate_snapshots is not None:
self.aggregate_snapshots = aggregate_snapshots
if node_snapshots is not None:
self.node_snapshots = node_snapshots
@property
def generated(self):
"""
Gets the generated of this StatusHistoryDTO.
When the status history was generated.
:return: The generated of this StatusHistoryDTO.
:rtype: str
"""
return self._generated
@generated.setter
def generated(self, generated):
"""
Sets the generated of this StatusHistoryDTO.
When the status history was generated.
:param generated: The generated of this StatusHistoryDTO.
:type: str
"""
self._generated = generated
@property
def component_details(self):
"""
Gets the component_details of this StatusHistoryDTO.
A Map of key/value pairs that describe the component that the status history belongs to
:return: The component_details of this StatusHistoryDTO.
:rtype: dict(str, str)
"""
return self._component_details
@component_details.setter
def component_details(self, component_details):
"""
Sets the component_details of this StatusHistoryDTO.
A Map of key/value pairs that describe the component that the status history belongs to
:param component_details: The component_details of this StatusHistoryDTO.
:type: dict(str, str)
"""
self._component_details = component_details
@property
def field_descriptors(self):
"""
Gets the field_descriptors of this StatusHistoryDTO.
The Descriptors that provide information on each of the metrics provided in the status history
:return: The field_descriptors of this StatusHistoryDTO.
:rtype: list[StatusDescriptorDTO]
"""
return self._field_descriptors
@field_descriptors.setter
def field_descriptors(self, field_descriptors):
"""
Sets the field_descriptors of this StatusHistoryDTO.
The Descriptors that provide information on each of the metrics provided in the status history
:param field_descriptors: The field_descriptors of this StatusHistoryDTO.
:type: list[StatusDescriptorDTO]
"""
self._field_descriptors = field_descriptors
@property
def aggregate_snapshots(self):
"""
Gets the aggregate_snapshots of this StatusHistoryDTO.
A list of StatusSnapshotDTO objects that provide the actual metric values for the component. If the NiFi instance is clustered, this will represent the aggregate status across all nodes. If the NiFi instance is not clustered, this will represent the status of the entire NiFi instance.
:return: The aggregate_snapshots of this StatusHistoryDTO.
:rtype: list[StatusSnapshotDTO]
"""
return self._aggregate_snapshots
@aggregate_snapshots.setter
def aggregate_snapshots(self, aggregate_snapshots):
"""
Sets the aggregate_snapshots of this StatusHistoryDTO.
A list of StatusSnapshotDTO objects that provide the actual metric values for the component. If the NiFi instance is clustered, this will represent the aggregate status across all nodes. If the NiFi instance is not clustered, this will represent the status of the entire NiFi instance.
:param aggregate_snapshots: The aggregate_snapshots of this StatusHistoryDTO.
:type: list[StatusSnapshotDTO]
"""
self._aggregate_snapshots = aggregate_snapshots
@property
def node_snapshots(self):
"""
Gets the node_snapshots of this StatusHistoryDTO.
The NodeStatusSnapshotsDTO objects that provide the actual metric values for the component, for each node. If the NiFi instance is not clustered, this value will be null.
:return: The node_snapshots of this StatusHistoryDTO.
:rtype: list[NodeStatusSnapshotsDTO]
"""
return self._node_snapshots
@node_snapshots.setter
def node_snapshots(self, node_snapshots):
"""
Sets the node_snapshots of this StatusHistoryDTO.
The NodeStatusSnapshotsDTO objects that provide the actual metric values for the component, for each node. If the NiFi instance is not clustered, this value will be null.
:param node_snapshots: The node_snapshots of this StatusHistoryDTO.
:type: list[NodeStatusSnapshotsDTO]
"""
self._node_snapshots = node_snapshots
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, StatusHistoryDTO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
Paul-Verardi/nipyapi
|
nipyapi/nifi/models/versioned_remote_process_group.py
|
# coding: utf-8
"""
NiFi Rest Api
The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service.
OpenAPI spec version: 1.7.1
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class VersionedRemoteProcessGroup(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'identifier': 'str',
'name': 'str',
'comments': 'str',
'position': 'Position',
'target_uri': 'str',
'target_uris': 'str',
'communications_timeout': 'str',
'yield_duration': 'str',
'transport_protocol': 'str',
'local_network_interface': 'str',
'proxy_host': 'str',
'proxy_port': 'int',
'proxy_user': 'str',
'input_ports': 'list[VersionedRemoteGroupPort]',
'output_ports': 'list[VersionedRemoteGroupPort]',
'component_type': 'str',
'group_identifier': 'str'
}
attribute_map = {
'identifier': 'identifier',
'name': 'name',
'comments': 'comments',
'position': 'position',
'target_uri': 'targetUri',
'target_uris': 'targetUris',
'communications_timeout': 'communicationsTimeout',
'yield_duration': 'yieldDuration',
'transport_protocol': 'transportProtocol',
'local_network_interface': 'localNetworkInterface',
'proxy_host': 'proxyHost',
'proxy_port': 'proxyPort',
'proxy_user': 'proxyUser',
'input_ports': 'inputPorts',
'output_ports': 'outputPorts',
'component_type': 'componentType',
'group_identifier': 'groupIdentifier'
}
def __init__(self, identifier=None, name=None, comments=None, position=None, target_uri=None, target_uris=None, communications_timeout=None, yield_duration=None, transport_protocol=None, local_network_interface=None, proxy_host=None, proxy_port=None, proxy_user=None, input_ports=None, output_ports=None, component_type=None, group_identifier=None):
"""
VersionedRemoteProcessGroup - a model defined in Swagger
"""
self._identifier = None
self._name = None
self._comments = None
self._position = None
self._target_uri = None
self._target_uris = None
self._communications_timeout = None
self._yield_duration = None
self._transport_protocol = None
self._local_network_interface = None
self._proxy_host = None
self._proxy_port = None
self._proxy_user = None
self._input_ports = None
self._output_ports = None
self._component_type = None
self._group_identifier = None
if identifier is not None:
self.identifier = identifier
if name is not None:
self.name = name
if comments is not None:
self.comments = comments
if position is not None:
self.position = position
if target_uri is not None:
self.target_uri = target_uri
if target_uris is not None:
self.target_uris = target_uris
if communications_timeout is not None:
self.communications_timeout = communications_timeout
if yield_duration is not None:
self.yield_duration = yield_duration
if transport_protocol is not None:
self.transport_protocol = transport_protocol
if local_network_interface is not None:
self.local_network_interface = local_network_interface
if proxy_host is not None:
self.proxy_host = proxy_host
if proxy_port is not None:
self.proxy_port = proxy_port
if proxy_user is not None:
self.proxy_user = proxy_user
if input_ports is not None:
self.input_ports = input_ports
if output_ports is not None:
self.output_ports = output_ports
if component_type is not None:
self.component_type = component_type
if group_identifier is not None:
self.group_identifier = group_identifier
@property
def identifier(self):
"""
Gets the identifier of this VersionedRemoteProcessGroup.
The component's unique identifier
:return: The identifier of this VersionedRemoteProcessGroup.
:rtype: str
"""
return self._identifier
@identifier.setter
def identifier(self, identifier):
"""
Sets the identifier of this VersionedRemoteProcessGroup.
The component's unique identifier
:param identifier: The identifier of this VersionedRemoteProcessGroup.
:type: str
"""
self._identifier = identifier
@property
def name(self):
"""
Gets the name of this VersionedRemoteProcessGroup.
The component's name
:return: The name of this VersionedRemoteProcessGroup.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this VersionedRemoteProcessGroup.
The component's name
:param name: The name of this VersionedRemoteProcessGroup.
:type: str
"""
self._name = name
@property
def comments(self):
"""
Gets the comments of this VersionedRemoteProcessGroup.
The user-supplied comments for the component
:return: The comments of this VersionedRemoteProcessGroup.
:rtype: str
"""
return self._comments
@comments.setter
def comments(self, comments):
"""
Sets the comments of this VersionedRemoteProcessGroup.
The user-supplied comments for the component
:param comments: The comments of this VersionedRemoteProcessGroup.
:type: str
"""
self._comments = comments
@property
def position(self):
"""
Gets the position of this VersionedRemoteProcessGroup.
The component's position on the graph
:return: The position of this VersionedRemoteProcessGroup.
:rtype: Position
"""
return self._position
@position.setter
def position(self, position):
"""
Sets the position of this VersionedRemoteProcessGroup.
The component's position on the graph
:param position: The position of this VersionedRemoteProcessGroup.
:type: Position
"""
self._position = position
@property
def target_uri(self):
"""
Gets the target_uri of this VersionedRemoteProcessGroup.
The target URI of the remote process group. If target uri is not set, but uris are set, then returns the first url in the urls. If neither target uri nor uris are set, then returns null.
:return: The target_uri of this VersionedRemoteProcessGroup.
:rtype: str
"""
return self._target_uri
@target_uri.setter
def target_uri(self, target_uri):
"""
Sets the target_uri of this VersionedRemoteProcessGroup.
The target URI of the remote process group. If target uri is not set, but uris are set, then returns the first url in the urls. If neither target uri nor uris are set, then returns null.
:param target_uri: The target_uri of this VersionedRemoteProcessGroup.
:type: str
"""
self._target_uri = target_uri
@property
def target_uris(self):
"""
Gets the target_uris of this VersionedRemoteProcessGroup.
The target URI of the remote process group. If target uris is not set but target uri is set, then returns the single target uri. If neither target uris nor target uri is set, then returns null.
:return: The target_uris of this VersionedRemoteProcessGroup.
:rtype: str
"""
return self._target_uris
@target_uris.setter
def target_uris(self, target_uris):
"""
Sets the target_uris of this VersionedRemoteProcessGroup.
The target URI of the remote process group. If target uris is not set but target uri is set, then returns the single target uri. If neither target uris nor target uri is set, then returns null.
:param target_uris: The target_uris of this VersionedRemoteProcessGroup.
:type: str
"""
self._target_uris = target_uris
@property
def communications_timeout(self):
"""
Gets the communications_timeout of this VersionedRemoteProcessGroup.
The time period used for the timeout when communicating with the target.
:return: The communications_timeout of this VersionedRemoteProcessGroup.
:rtype: str
"""
return self._communications_timeout
@communications_timeout.setter
def communications_timeout(self, communications_timeout):
"""
Sets the communications_timeout of this VersionedRemoteProcessGroup.
The time period used for the timeout when communicating with the target.
:param communications_timeout: The communications_timeout of this VersionedRemoteProcessGroup.
:type: str
"""
self._communications_timeout = communications_timeout
@property
def yield_duration(self):
"""
Gets the yield_duration of this VersionedRemoteProcessGroup.
When yielding, this amount of time must elapse before the remote process group is scheduled again.
:return: The yield_duration of this VersionedRemoteProcessGroup.
:rtype: str
"""
return self._yield_duration
@yield_duration.setter
def yield_duration(self, yield_duration):
"""
Sets the yield_duration of this VersionedRemoteProcessGroup.
When yielding, this amount of time must elapse before the remote process group is scheduled again.
:param yield_duration: The yield_duration of this VersionedRemoteProcessGroup.
:type: str
"""
self._yield_duration = yield_duration
@property
def transport_protocol(self):
"""
Gets the transport_protocol of this VersionedRemoteProcessGroup.
The Transport Protocol that is used for Site-to-Site communications
:return: The transport_protocol of this VersionedRemoteProcessGroup.
:rtype: str
"""
return self._transport_protocol
@transport_protocol.setter
def transport_protocol(self, transport_protocol):
"""
Sets the transport_protocol of this VersionedRemoteProcessGroup.
The Transport Protocol that is used for Site-to-Site communications
:param transport_protocol: The transport_protocol of this VersionedRemoteProcessGroup.
:type: str
"""
allowed_values = ["RAW", "HTTP"]
if transport_protocol not in allowed_values:
raise ValueError(
"Invalid value for `transport_protocol` ({0}), must be one of {1}"
.format(transport_protocol, allowed_values)
)
self._transport_protocol = transport_protocol
@property
def local_network_interface(self):
"""
Gets the local_network_interface of this VersionedRemoteProcessGroup.
The local network interface to send/receive data. If not specified, any local address is used. If clustered, all nodes must have an interface with this identifier.
:return: The local_network_interface of this VersionedRemoteProcessGroup.
:rtype: str
"""
return self._local_network_interface
@local_network_interface.setter
def local_network_interface(self, local_network_interface):
"""
Sets the local_network_interface of this VersionedRemoteProcessGroup.
The local network interface to send/receive data. If not specified, any local address is used. If clustered, all nodes must have an interface with this identifier.
:param local_network_interface: The local_network_interface of this VersionedRemoteProcessGroup.
:type: str
"""
self._local_network_interface = local_network_interface
@property
def proxy_host(self):
"""
Gets the proxy_host of this VersionedRemoteProcessGroup.
:return: The proxy_host of this VersionedRemoteProcessGroup.
:rtype: str
"""
return self._proxy_host
@proxy_host.setter
def proxy_host(self, proxy_host):
"""
Sets the proxy_host of this VersionedRemoteProcessGroup.
:param proxy_host: The proxy_host of this VersionedRemoteProcessGroup.
:type: str
"""
self._proxy_host = proxy_host
@property
def proxy_port(self):
"""
Gets the proxy_port of this VersionedRemoteProcessGroup.
:return: The proxy_port of this VersionedRemoteProcessGroup.
:rtype: int
"""
return self._proxy_port
@proxy_port.setter
def proxy_port(self, proxy_port):
"""
Sets the proxy_port of this VersionedRemoteProcessGroup.
:param proxy_port: The proxy_port of this VersionedRemoteProcessGroup.
:type: int
"""
self._proxy_port = proxy_port
@property
def proxy_user(self):
"""
Gets the proxy_user of this VersionedRemoteProcessGroup.
:return: The proxy_user of this VersionedRemoteProcessGroup.
:rtype: str
"""
return self._proxy_user
@proxy_user.setter
def proxy_user(self, proxy_user):
"""
Sets the proxy_user of this VersionedRemoteProcessGroup.
:param proxy_user: The proxy_user of this VersionedRemoteProcessGroup.
:type: str
"""
self._proxy_user = proxy_user
@property
def input_ports(self):
"""
Gets the input_ports of this VersionedRemoteProcessGroup.
A Set of Input Ports that can be connected to, in order to send data to the remote NiFi instance
:return: The input_ports of this VersionedRemoteProcessGroup.
:rtype: list[VersionedRemoteGroupPort]
"""
return self._input_ports
@input_ports.setter
def input_ports(self, input_ports):
"""
Sets the input_ports of this VersionedRemoteProcessGroup.
A Set of Input Ports that can be connected to, in order to send data to the remote NiFi instance
:param input_ports: The input_ports of this VersionedRemoteProcessGroup.
:type: list[VersionedRemoteGroupPort]
"""
self._input_ports = input_ports
@property
def output_ports(self):
"""
Gets the output_ports of this VersionedRemoteProcessGroup.
A Set of Output Ports that can be connected to, in order to pull data from the remote NiFi instance
:return: The output_ports of this VersionedRemoteProcessGroup.
:rtype: list[VersionedRemoteGroupPort]
"""
return self._output_ports
@output_ports.setter
def output_ports(self, output_ports):
"""
Sets the output_ports of this VersionedRemoteProcessGroup.
A Set of Output Ports that can be connected to, in order to pull data from the remote NiFi instance
:param output_ports: The output_ports of this VersionedRemoteProcessGroup.
:type: list[VersionedRemoteGroupPort]
"""
self._output_ports = output_ports
@property
def component_type(self):
"""
Gets the component_type of this VersionedRemoteProcessGroup.
:return: The component_type of this VersionedRemoteProcessGroup.
:rtype: str
"""
return self._component_type
@component_type.setter
def component_type(self, component_type):
"""
Sets the component_type of this VersionedRemoteProcessGroup.
:param component_type: The component_type of this VersionedRemoteProcessGroup.
:type: str
"""
allowed_values = ["CONNECTION", "PROCESSOR", "PROCESS_GROUP", "REMOTE_PROCESS_GROUP", "INPUT_PORT", "OUTPUT_PORT", "REMOTE_INPUT_PORT", "REMOTE_OUTPUT_PORT", "FUNNEL", "LABEL", "CONTROLLER_SERVICE"]
if component_type not in allowed_values:
raise ValueError(
"Invalid value for `component_type` ({0}), must be one of {1}"
.format(component_type, allowed_values)
)
self._component_type = component_type
@property
def group_identifier(self):
"""
Gets the group_identifier of this VersionedRemoteProcessGroup.
The ID of the Process Group that this component belongs to
:return: The group_identifier of this VersionedRemoteProcessGroup.
:rtype: str
"""
return self._group_identifier
@group_identifier.setter
def group_identifier(self, group_identifier):
"""
Sets the group_identifier of this VersionedRemoteProcessGroup.
The ID of the Process Group that this component belongs to
:param group_identifier: The group_identifier of this VersionedRemoteProcessGroup.
:type: str
"""
self._group_identifier = group_identifier
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, VersionedRemoteProcessGroup):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
Paul-Verardi/nipyapi
|
nipyapi/nifi/models/controller_dto.py
|
# coding: utf-8
"""
NiFi Rest Api
The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service.
OpenAPI spec version: 1.7.1
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class ControllerDTO(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'name': 'str',
'comments': 'str',
'running_count': 'int',
'stopped_count': 'int',
'invalid_count': 'int',
'disabled_count': 'int',
'active_remote_port_count': 'int',
'inactive_remote_port_count': 'int',
'input_port_count': 'int',
'output_port_count': 'int',
'remote_site_listening_port': 'int',
'remote_site_http_listening_port': 'int',
'site_to_site_secure': 'bool',
'instance_id': 'str',
'input_ports': 'list[PortDTO]',
'output_ports': 'list[PortDTO]'
}
attribute_map = {
'id': 'id',
'name': 'name',
'comments': 'comments',
'running_count': 'runningCount',
'stopped_count': 'stoppedCount',
'invalid_count': 'invalidCount',
'disabled_count': 'disabledCount',
'active_remote_port_count': 'activeRemotePortCount',
'inactive_remote_port_count': 'inactiveRemotePortCount',
'input_port_count': 'inputPortCount',
'output_port_count': 'outputPortCount',
'remote_site_listening_port': 'remoteSiteListeningPort',
'remote_site_http_listening_port': 'remoteSiteHttpListeningPort',
'site_to_site_secure': 'siteToSiteSecure',
'instance_id': 'instanceId',
'input_ports': 'inputPorts',
'output_ports': 'outputPorts'
}
def __init__(self, id=None, name=None, comments=None, running_count=None, stopped_count=None, invalid_count=None, disabled_count=None, active_remote_port_count=None, inactive_remote_port_count=None, input_port_count=None, output_port_count=None, remote_site_listening_port=None, remote_site_http_listening_port=None, site_to_site_secure=None, instance_id=None, input_ports=None, output_ports=None):
"""
ControllerDTO - a model defined in Swagger
"""
self._id = None
self._name = None
self._comments = None
self._running_count = None
self._stopped_count = None
self._invalid_count = None
self._disabled_count = None
self._active_remote_port_count = None
self._inactive_remote_port_count = None
self._input_port_count = None
self._output_port_count = None
self._remote_site_listening_port = None
self._remote_site_http_listening_port = None
self._site_to_site_secure = None
self._instance_id = None
self._input_ports = None
self._output_ports = None
if id is not None:
self.id = id
if name is not None:
self.name = name
if comments is not None:
self.comments = comments
if running_count is not None:
self.running_count = running_count
if stopped_count is not None:
self.stopped_count = stopped_count
if invalid_count is not None:
self.invalid_count = invalid_count
if disabled_count is not None:
self.disabled_count = disabled_count
if active_remote_port_count is not None:
self.active_remote_port_count = active_remote_port_count
if inactive_remote_port_count is not None:
self.inactive_remote_port_count = inactive_remote_port_count
if input_port_count is not None:
self.input_port_count = input_port_count
if output_port_count is not None:
self.output_port_count = output_port_count
if remote_site_listening_port is not None:
self.remote_site_listening_port = remote_site_listening_port
if remote_site_http_listening_port is not None:
self.remote_site_http_listening_port = remote_site_http_listening_port
if site_to_site_secure is not None:
self.site_to_site_secure = site_to_site_secure
if instance_id is not None:
self.instance_id = instance_id
if input_ports is not None:
self.input_ports = input_ports
if output_ports is not None:
self.output_ports = output_ports
@property
def id(self):
"""
Gets the id of this ControllerDTO.
The id of the NiFi.
:return: The id of this ControllerDTO.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this ControllerDTO.
The id of the NiFi.
:param id: The id of this ControllerDTO.
:type: str
"""
self._id = id
@property
def name(self):
"""
Gets the name of this ControllerDTO.
The name of the NiFi.
:return: The name of this ControllerDTO.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this ControllerDTO.
The name of the NiFi.
:param name: The name of this ControllerDTO.
:type: str
"""
self._name = name
@property
def comments(self):
"""
Gets the comments of this ControllerDTO.
The comments for the NiFi.
:return: The comments of this ControllerDTO.
:rtype: str
"""
return self._comments
@comments.setter
def comments(self, comments):
"""
Sets the comments of this ControllerDTO.
The comments for the NiFi.
:param comments: The comments of this ControllerDTO.
:type: str
"""
self._comments = comments
@property
def running_count(self):
"""
Gets the running_count of this ControllerDTO.
The number of running components in the NiFi.
:return: The running_count of this ControllerDTO.
:rtype: int
"""
return self._running_count
@running_count.setter
def running_count(self, running_count):
"""
Sets the running_count of this ControllerDTO.
The number of running components in the NiFi.
:param running_count: The running_count of this ControllerDTO.
:type: int
"""
self._running_count = running_count
@property
def stopped_count(self):
"""
Gets the stopped_count of this ControllerDTO.
The number of stopped components in the NiFi.
:return: The stopped_count of this ControllerDTO.
:rtype: int
"""
return self._stopped_count
@stopped_count.setter
def stopped_count(self, stopped_count):
"""
Sets the stopped_count of this ControllerDTO.
The number of stopped components in the NiFi.
:param stopped_count: The stopped_count of this ControllerDTO.
:type: int
"""
self._stopped_count = stopped_count
@property
def invalid_count(self):
"""
Gets the invalid_count of this ControllerDTO.
The number of invalid components in the NiFi.
:return: The invalid_count of this ControllerDTO.
:rtype: int
"""
return self._invalid_count
@invalid_count.setter
def invalid_count(self, invalid_count):
"""
Sets the invalid_count of this ControllerDTO.
The number of invalid components in the NiFi.
:param invalid_count: The invalid_count of this ControllerDTO.
:type: int
"""
self._invalid_count = invalid_count
@property
def disabled_count(self):
"""
Gets the disabled_count of this ControllerDTO.
The number of disabled components in the NiFi.
:return: The disabled_count of this ControllerDTO.
:rtype: int
"""
return self._disabled_count
@disabled_count.setter
def disabled_count(self, disabled_count):
"""
Sets the disabled_count of this ControllerDTO.
The number of disabled components in the NiFi.
:param disabled_count: The disabled_count of this ControllerDTO.
:type: int
"""
self._disabled_count = disabled_count
@property
def active_remote_port_count(self):
"""
Gets the active_remote_port_count of this ControllerDTO.
The number of active remote ports contained in the NiFi.
:return: The active_remote_port_count of this ControllerDTO.
:rtype: int
"""
return self._active_remote_port_count
@active_remote_port_count.setter
def active_remote_port_count(self, active_remote_port_count):
"""
Sets the active_remote_port_count of this ControllerDTO.
The number of active remote ports contained in the NiFi.
:param active_remote_port_count: The active_remote_port_count of this ControllerDTO.
:type: int
"""
self._active_remote_port_count = active_remote_port_count
@property
def inactive_remote_port_count(self):
"""
Gets the inactive_remote_port_count of this ControllerDTO.
The number of inactive remote ports contained in the NiFi.
:return: The inactive_remote_port_count of this ControllerDTO.
:rtype: int
"""
return self._inactive_remote_port_count
@inactive_remote_port_count.setter
def inactive_remote_port_count(self, inactive_remote_port_count):
"""
Sets the inactive_remote_port_count of this ControllerDTO.
The number of inactive remote ports contained in the NiFi.
:param inactive_remote_port_count: The inactive_remote_port_count of this ControllerDTO.
:type: int
"""
self._inactive_remote_port_count = inactive_remote_port_count
@property
def input_port_count(self):
"""
Gets the input_port_count of this ControllerDTO.
The number of input ports contained in the NiFi.
:return: The input_port_count of this ControllerDTO.
:rtype: int
"""
return self._input_port_count
@input_port_count.setter
def input_port_count(self, input_port_count):
"""
Sets the input_port_count of this ControllerDTO.
The number of input ports contained in the NiFi.
:param input_port_count: The input_port_count of this ControllerDTO.
:type: int
"""
self._input_port_count = input_port_count
@property
def output_port_count(self):
"""
Gets the output_port_count of this ControllerDTO.
The number of output ports in the NiFi.
:return: The output_port_count of this ControllerDTO.
:rtype: int
"""
return self._output_port_count
@output_port_count.setter
def output_port_count(self, output_port_count):
"""
Sets the output_port_count of this ControllerDTO.
The number of output ports in the NiFi.
:param output_port_count: The output_port_count of this ControllerDTO.
:type: int
"""
self._output_port_count = output_port_count
@property
def remote_site_listening_port(self):
"""
Gets the remote_site_listening_port of this ControllerDTO.
The Socket Port on which this instance is listening for Remote Transfers of Flow Files. If this instance is not configured to receive Flow Files from remote instances, this will be null.
:return: The remote_site_listening_port of this ControllerDTO.
:rtype: int
"""
return self._remote_site_listening_port
@remote_site_listening_port.setter
def remote_site_listening_port(self, remote_site_listening_port):
"""
Sets the remote_site_listening_port of this ControllerDTO.
The Socket Port on which this instance is listening for Remote Transfers of Flow Files. If this instance is not configured to receive Flow Files from remote instances, this will be null.
:param remote_site_listening_port: The remote_site_listening_port of this ControllerDTO.
:type: int
"""
self._remote_site_listening_port = remote_site_listening_port
@property
def remote_site_http_listening_port(self):
"""
Gets the remote_site_http_listening_port of this ControllerDTO.
The HTTP(S) Port on which this instance is listening for Remote Transfers of Flow Files. If this instance is not configured to receive Flow Files from remote instances, this will be null.
:return: The remote_site_http_listening_port of this ControllerDTO.
:rtype: int
"""
return self._remote_site_http_listening_port
@remote_site_http_listening_port.setter
def remote_site_http_listening_port(self, remote_site_http_listening_port):
"""
Sets the remote_site_http_listening_port of this ControllerDTO.
The HTTP(S) Port on which this instance is listening for Remote Transfers of Flow Files. If this instance is not configured to receive Flow Files from remote instances, this will be null.
:param remote_site_http_listening_port: The remote_site_http_listening_port of this ControllerDTO.
:type: int
"""
self._remote_site_http_listening_port = remote_site_http_listening_port
@property
def site_to_site_secure(self):
"""
Gets the site_to_site_secure of this ControllerDTO.
Indicates whether or not Site-to-Site communications with this instance is secure (2-way authentication).
:return: The site_to_site_secure of this ControllerDTO.
:rtype: bool
"""
return self._site_to_site_secure
@site_to_site_secure.setter
def site_to_site_secure(self, site_to_site_secure):
"""
Sets the site_to_site_secure of this ControllerDTO.
Indicates whether or not Site-to-Site communications with this instance is secure (2-way authentication).
:param site_to_site_secure: The site_to_site_secure of this ControllerDTO.
:type: bool
"""
self._site_to_site_secure = site_to_site_secure
@property
def instance_id(self):
"""
Gets the instance_id of this ControllerDTO.
If clustered, the id of the Cluster Manager, otherwise the id of the NiFi.
:return: The instance_id of this ControllerDTO.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""
Sets the instance_id of this ControllerDTO.
If clustered, the id of the Cluster Manager, otherwise the id of the NiFi.
:param instance_id: The instance_id of this ControllerDTO.
:type: str
"""
self._instance_id = instance_id
@property
def input_ports(self):
"""
Gets the input_ports of this ControllerDTO.
The input ports available to send data to for the NiFi.
:return: The input_ports of this ControllerDTO.
:rtype: list[PortDTO]
"""
return self._input_ports
@input_ports.setter
def input_ports(self, input_ports):
"""
Sets the input_ports of this ControllerDTO.
The input ports available to send data to for the NiFi.
:param input_ports: The input_ports of this ControllerDTO.
:type: list[PortDTO]
"""
self._input_ports = input_ports
@property
def output_ports(self):
"""
Gets the output_ports of this ControllerDTO.
The output ports available to received data from the NiFi.
:return: The output_ports of this ControllerDTO.
:rtype: list[PortDTO]
"""
return self._output_ports
@output_ports.setter
def output_ports(self, output_ports):
"""
Sets the output_ports of this ControllerDTO.
The output ports available to received data from the NiFi.
:param output_ports: The output_ports of this ControllerDTO.
:type: list[PortDTO]
"""
self._output_ports = output_ports
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, ControllerDTO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
Paul-Verardi/nipyapi
|
nipyapi/nifi/models/system_diagnostics_snapshot_dto.py
|
# coding: utf-8
"""
NiFi Rest Api
The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service.
OpenAPI spec version: 1.7.1
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class SystemDiagnosticsSnapshotDTO(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'total_non_heap': 'str',
'total_non_heap_bytes': 'int',
'used_non_heap': 'str',
'used_non_heap_bytes': 'int',
'free_non_heap': 'str',
'free_non_heap_bytes': 'int',
'max_non_heap': 'str',
'max_non_heap_bytes': 'int',
'non_heap_utilization': 'str',
'total_heap': 'str',
'total_heap_bytes': 'int',
'used_heap': 'str',
'used_heap_bytes': 'int',
'free_heap': 'str',
'free_heap_bytes': 'int',
'max_heap': 'str',
'max_heap_bytes': 'int',
'heap_utilization': 'str',
'available_processors': 'int',
'processor_load_average': 'float',
'total_threads': 'int',
'daemon_threads': 'int',
'uptime': 'str',
'flow_file_repository_storage_usage': 'StorageUsageDTO',
'content_repository_storage_usage': 'list[StorageUsageDTO]',
'provenance_repository_storage_usage': 'list[StorageUsageDTO]',
'garbage_collection': 'list[GarbageCollectionDTO]',
'stats_last_refreshed': 'str',
'version_info': 'VersionInfoDTO'
}
attribute_map = {
'total_non_heap': 'totalNonHeap',
'total_non_heap_bytes': 'totalNonHeapBytes',
'used_non_heap': 'usedNonHeap',
'used_non_heap_bytes': 'usedNonHeapBytes',
'free_non_heap': 'freeNonHeap',
'free_non_heap_bytes': 'freeNonHeapBytes',
'max_non_heap': 'maxNonHeap',
'max_non_heap_bytes': 'maxNonHeapBytes',
'non_heap_utilization': 'nonHeapUtilization',
'total_heap': 'totalHeap',
'total_heap_bytes': 'totalHeapBytes',
'used_heap': 'usedHeap',
'used_heap_bytes': 'usedHeapBytes',
'free_heap': 'freeHeap',
'free_heap_bytes': 'freeHeapBytes',
'max_heap': 'maxHeap',
'max_heap_bytes': 'maxHeapBytes',
'heap_utilization': 'heapUtilization',
'available_processors': 'availableProcessors',
'processor_load_average': 'processorLoadAverage',
'total_threads': 'totalThreads',
'daemon_threads': 'daemonThreads',
'uptime': 'uptime',
'flow_file_repository_storage_usage': 'flowFileRepositoryStorageUsage',
'content_repository_storage_usage': 'contentRepositoryStorageUsage',
'provenance_repository_storage_usage': 'provenanceRepositoryStorageUsage',
'garbage_collection': 'garbageCollection',
'stats_last_refreshed': 'statsLastRefreshed',
'version_info': 'versionInfo'
}
def __init__(self, total_non_heap=None, total_non_heap_bytes=None, used_non_heap=None, used_non_heap_bytes=None, free_non_heap=None, free_non_heap_bytes=None, max_non_heap=None, max_non_heap_bytes=None, non_heap_utilization=None, total_heap=None, total_heap_bytes=None, used_heap=None, used_heap_bytes=None, free_heap=None, free_heap_bytes=None, max_heap=None, max_heap_bytes=None, heap_utilization=None, available_processors=None, processor_load_average=None, total_threads=None, daemon_threads=None, uptime=None, flow_file_repository_storage_usage=None, content_repository_storage_usage=None, provenance_repository_storage_usage=None, garbage_collection=None, stats_last_refreshed=None, version_info=None):
"""
SystemDiagnosticsSnapshotDTO - a model defined in Swagger
"""
self._total_non_heap = None
self._total_non_heap_bytes = None
self._used_non_heap = None
self._used_non_heap_bytes = None
self._free_non_heap = None
self._free_non_heap_bytes = None
self._max_non_heap = None
self._max_non_heap_bytes = None
self._non_heap_utilization = None
self._total_heap = None
self._total_heap_bytes = None
self._used_heap = None
self._used_heap_bytes = None
self._free_heap = None
self._free_heap_bytes = None
self._max_heap = None
self._max_heap_bytes = None
self._heap_utilization = None
self._available_processors = None
self._processor_load_average = None
self._total_threads = None
self._daemon_threads = None
self._uptime = None
self._flow_file_repository_storage_usage = None
self._content_repository_storage_usage = None
self._provenance_repository_storage_usage = None
self._garbage_collection = None
self._stats_last_refreshed = None
self._version_info = None
if total_non_heap is not None:
self.total_non_heap = total_non_heap
if total_non_heap_bytes is not None:
self.total_non_heap_bytes = total_non_heap_bytes
if used_non_heap is not None:
self.used_non_heap = used_non_heap
if used_non_heap_bytes is not None:
self.used_non_heap_bytes = used_non_heap_bytes
if free_non_heap is not None:
self.free_non_heap = free_non_heap
if free_non_heap_bytes is not None:
self.free_non_heap_bytes = free_non_heap_bytes
if max_non_heap is not None:
self.max_non_heap = max_non_heap
if max_non_heap_bytes is not None:
self.max_non_heap_bytes = max_non_heap_bytes
if non_heap_utilization is not None:
self.non_heap_utilization = non_heap_utilization
if total_heap is not None:
self.total_heap = total_heap
if total_heap_bytes is not None:
self.total_heap_bytes = total_heap_bytes
if used_heap is not None:
self.used_heap = used_heap
if used_heap_bytes is not None:
self.used_heap_bytes = used_heap_bytes
if free_heap is not None:
self.free_heap = free_heap
if free_heap_bytes is not None:
self.free_heap_bytes = free_heap_bytes
if max_heap is not None:
self.max_heap = max_heap
if max_heap_bytes is not None:
self.max_heap_bytes = max_heap_bytes
if heap_utilization is not None:
self.heap_utilization = heap_utilization
if available_processors is not None:
self.available_processors = available_processors
if processor_load_average is not None:
self.processor_load_average = processor_load_average
if total_threads is not None:
self.total_threads = total_threads
if daemon_threads is not None:
self.daemon_threads = daemon_threads
if uptime is not None:
self.uptime = uptime
if flow_file_repository_storage_usage is not None:
self.flow_file_repository_storage_usage = flow_file_repository_storage_usage
if content_repository_storage_usage is not None:
self.content_repository_storage_usage = content_repository_storage_usage
if provenance_repository_storage_usage is not None:
self.provenance_repository_storage_usage = provenance_repository_storage_usage
if garbage_collection is not None:
self.garbage_collection = garbage_collection
if stats_last_refreshed is not None:
self.stats_last_refreshed = stats_last_refreshed
if version_info is not None:
self.version_info = version_info
@property
def total_non_heap(self):
"""
Gets the total_non_heap of this SystemDiagnosticsSnapshotDTO.
Total size of non heap.
:return: The total_non_heap of this SystemDiagnosticsSnapshotDTO.
:rtype: str
"""
return self._total_non_heap
@total_non_heap.setter
def total_non_heap(self, total_non_heap):
"""
Sets the total_non_heap of this SystemDiagnosticsSnapshotDTO.
Total size of non heap.
:param total_non_heap: The total_non_heap of this SystemDiagnosticsSnapshotDTO.
:type: str
"""
self._total_non_heap = total_non_heap
@property
def total_non_heap_bytes(self):
"""
Gets the total_non_heap_bytes of this SystemDiagnosticsSnapshotDTO.
Total number of bytes allocated to the JVM not used for heap
:return: The total_non_heap_bytes of this SystemDiagnosticsSnapshotDTO.
:rtype: int
"""
return self._total_non_heap_bytes
@total_non_heap_bytes.setter
def total_non_heap_bytes(self, total_non_heap_bytes):
"""
Sets the total_non_heap_bytes of this SystemDiagnosticsSnapshotDTO.
Total number of bytes allocated to the JVM not used for heap
:param total_non_heap_bytes: The total_non_heap_bytes of this SystemDiagnosticsSnapshotDTO.
:type: int
"""
self._total_non_heap_bytes = total_non_heap_bytes
@property
def used_non_heap(self):
"""
Gets the used_non_heap of this SystemDiagnosticsSnapshotDTO.
Amount of use non heap.
:return: The used_non_heap of this SystemDiagnosticsSnapshotDTO.
:rtype: str
"""
return self._used_non_heap
@used_non_heap.setter
def used_non_heap(self, used_non_heap):
"""
Sets the used_non_heap of this SystemDiagnosticsSnapshotDTO.
Amount of use non heap.
:param used_non_heap: The used_non_heap of this SystemDiagnosticsSnapshotDTO.
:type: str
"""
self._used_non_heap = used_non_heap
@property
def used_non_heap_bytes(self):
"""
Gets the used_non_heap_bytes of this SystemDiagnosticsSnapshotDTO.
Total number of bytes used by the JVM not in the heap space
:return: The used_non_heap_bytes of this SystemDiagnosticsSnapshotDTO.
:rtype: int
"""
return self._used_non_heap_bytes
@used_non_heap_bytes.setter
def used_non_heap_bytes(self, used_non_heap_bytes):
"""
Sets the used_non_heap_bytes of this SystemDiagnosticsSnapshotDTO.
Total number of bytes used by the JVM not in the heap space
:param used_non_heap_bytes: The used_non_heap_bytes of this SystemDiagnosticsSnapshotDTO.
:type: int
"""
self._used_non_heap_bytes = used_non_heap_bytes
@property
def free_non_heap(self):
"""
Gets the free_non_heap of this SystemDiagnosticsSnapshotDTO.
Amount of free non heap.
:return: The free_non_heap of this SystemDiagnosticsSnapshotDTO.
:rtype: str
"""
return self._free_non_heap
@free_non_heap.setter
def free_non_heap(self, free_non_heap):
"""
Sets the free_non_heap of this SystemDiagnosticsSnapshotDTO.
Amount of free non heap.
:param free_non_heap: The free_non_heap of this SystemDiagnosticsSnapshotDTO.
:type: str
"""
self._free_non_heap = free_non_heap
@property
def free_non_heap_bytes(self):
"""
Gets the free_non_heap_bytes of this SystemDiagnosticsSnapshotDTO.
Total number of free non-heap bytes available to the JVM
:return: The free_non_heap_bytes of this SystemDiagnosticsSnapshotDTO.
:rtype: int
"""
return self._free_non_heap_bytes
@free_non_heap_bytes.setter
def free_non_heap_bytes(self, free_non_heap_bytes):
"""
Sets the free_non_heap_bytes of this SystemDiagnosticsSnapshotDTO.
Total number of free non-heap bytes available to the JVM
:param free_non_heap_bytes: The free_non_heap_bytes of this SystemDiagnosticsSnapshotDTO.
:type: int
"""
self._free_non_heap_bytes = free_non_heap_bytes
@property
def max_non_heap(self):
"""
Gets the max_non_heap of this SystemDiagnosticsSnapshotDTO.
Maximum size of non heap.
:return: The max_non_heap of this SystemDiagnosticsSnapshotDTO.
:rtype: str
"""
return self._max_non_heap
@max_non_heap.setter
def max_non_heap(self, max_non_heap):
"""
Sets the max_non_heap of this SystemDiagnosticsSnapshotDTO.
Maximum size of non heap.
:param max_non_heap: The max_non_heap of this SystemDiagnosticsSnapshotDTO.
:type: str
"""
self._max_non_heap = max_non_heap
@property
def max_non_heap_bytes(self):
"""
Gets the max_non_heap_bytes of this SystemDiagnosticsSnapshotDTO.
The maximum number of bytes that the JVM can use for non-heap purposes
:return: The max_non_heap_bytes of this SystemDiagnosticsSnapshotDTO.
:rtype: int
"""
return self._max_non_heap_bytes
@max_non_heap_bytes.setter
def max_non_heap_bytes(self, max_non_heap_bytes):
"""
Sets the max_non_heap_bytes of this SystemDiagnosticsSnapshotDTO.
The maximum number of bytes that the JVM can use for non-heap purposes
:param max_non_heap_bytes: The max_non_heap_bytes of this SystemDiagnosticsSnapshotDTO.
:type: int
"""
self._max_non_heap_bytes = max_non_heap_bytes
@property
def non_heap_utilization(self):
"""
Gets the non_heap_utilization of this SystemDiagnosticsSnapshotDTO.
Utilization of non heap.
:return: The non_heap_utilization of this SystemDiagnosticsSnapshotDTO.
:rtype: str
"""
return self._non_heap_utilization
@non_heap_utilization.setter
def non_heap_utilization(self, non_heap_utilization):
"""
Sets the non_heap_utilization of this SystemDiagnosticsSnapshotDTO.
Utilization of non heap.
:param non_heap_utilization: The non_heap_utilization of this SystemDiagnosticsSnapshotDTO.
:type: str
"""
self._non_heap_utilization = non_heap_utilization
@property
def total_heap(self):
"""
Gets the total_heap of this SystemDiagnosticsSnapshotDTO.
Total size of heap.
:return: The total_heap of this SystemDiagnosticsSnapshotDTO.
:rtype: str
"""
return self._total_heap
@total_heap.setter
def total_heap(self, total_heap):
"""
Sets the total_heap of this SystemDiagnosticsSnapshotDTO.
Total size of heap.
:param total_heap: The total_heap of this SystemDiagnosticsSnapshotDTO.
:type: str
"""
self._total_heap = total_heap
@property
def total_heap_bytes(self):
"""
Gets the total_heap_bytes of this SystemDiagnosticsSnapshotDTO.
The total number of bytes that are available for the JVM heap to use
:return: The total_heap_bytes of this SystemDiagnosticsSnapshotDTO.
:rtype: int
"""
return self._total_heap_bytes
@total_heap_bytes.setter
def total_heap_bytes(self, total_heap_bytes):
"""
Sets the total_heap_bytes of this SystemDiagnosticsSnapshotDTO.
The total number of bytes that are available for the JVM heap to use
:param total_heap_bytes: The total_heap_bytes of this SystemDiagnosticsSnapshotDTO.
:type: int
"""
self._total_heap_bytes = total_heap_bytes
@property
def used_heap(self):
"""
Gets the used_heap of this SystemDiagnosticsSnapshotDTO.
Amount of used heap.
:return: The used_heap of this SystemDiagnosticsSnapshotDTO.
:rtype: str
"""
return self._used_heap
@used_heap.setter
def used_heap(self, used_heap):
"""
Sets the used_heap of this SystemDiagnosticsSnapshotDTO.
Amount of used heap.
:param used_heap: The used_heap of this SystemDiagnosticsSnapshotDTO.
:type: str
"""
self._used_heap = used_heap
@property
def used_heap_bytes(self):
"""
Gets the used_heap_bytes of this SystemDiagnosticsSnapshotDTO.
The number of bytes of JVM heap that are currently being used
:return: The used_heap_bytes of this SystemDiagnosticsSnapshotDTO.
:rtype: int
"""
return self._used_heap_bytes
@used_heap_bytes.setter
def used_heap_bytes(self, used_heap_bytes):
"""
Sets the used_heap_bytes of this SystemDiagnosticsSnapshotDTO.
The number of bytes of JVM heap that are currently being used
:param used_heap_bytes: The used_heap_bytes of this SystemDiagnosticsSnapshotDTO.
:type: int
"""
self._used_heap_bytes = used_heap_bytes
@property
def free_heap(self):
"""
Gets the free_heap of this SystemDiagnosticsSnapshotDTO.
Amount of free heap.
:return: The free_heap of this SystemDiagnosticsSnapshotDTO.
:rtype: str
"""
return self._free_heap
@free_heap.setter
def free_heap(self, free_heap):
"""
Sets the free_heap of this SystemDiagnosticsSnapshotDTO.
Amount of free heap.
:param free_heap: The free_heap of this SystemDiagnosticsSnapshotDTO.
:type: str
"""
self._free_heap = free_heap
@property
def free_heap_bytes(self):
"""
Gets the free_heap_bytes of this SystemDiagnosticsSnapshotDTO.
The number of bytes that are allocated to the JVM heap but not currently being used
:return: The free_heap_bytes of this SystemDiagnosticsSnapshotDTO.
:rtype: int
"""
return self._free_heap_bytes
@free_heap_bytes.setter
def free_heap_bytes(self, free_heap_bytes):
"""
Sets the free_heap_bytes of this SystemDiagnosticsSnapshotDTO.
The number of bytes that are allocated to the JVM heap but not currently being used
:param free_heap_bytes: The free_heap_bytes of this SystemDiagnosticsSnapshotDTO.
:type: int
"""
self._free_heap_bytes = free_heap_bytes
@property
def max_heap(self):
"""
Gets the max_heap of this SystemDiagnosticsSnapshotDTO.
Maximum size of heap.
:return: The max_heap of this SystemDiagnosticsSnapshotDTO.
:rtype: str
"""
return self._max_heap
@max_heap.setter
def max_heap(self, max_heap):
"""
Sets the max_heap of this SystemDiagnosticsSnapshotDTO.
Maximum size of heap.
:param max_heap: The max_heap of this SystemDiagnosticsSnapshotDTO.
:type: str
"""
self._max_heap = max_heap
@property
def max_heap_bytes(self):
"""
Gets the max_heap_bytes of this SystemDiagnosticsSnapshotDTO.
The maximum number of bytes that can be used by the JVM
:return: The max_heap_bytes of this SystemDiagnosticsSnapshotDTO.
:rtype: int
"""
return self._max_heap_bytes
@max_heap_bytes.setter
def max_heap_bytes(self, max_heap_bytes):
"""
Sets the max_heap_bytes of this SystemDiagnosticsSnapshotDTO.
The maximum number of bytes that can be used by the JVM
:param max_heap_bytes: The max_heap_bytes of this SystemDiagnosticsSnapshotDTO.
:type: int
"""
self._max_heap_bytes = max_heap_bytes
@property
def heap_utilization(self):
"""
Gets the heap_utilization of this SystemDiagnosticsSnapshotDTO.
Utilization of heap.
:return: The heap_utilization of this SystemDiagnosticsSnapshotDTO.
:rtype: str
"""
return self._heap_utilization
@heap_utilization.setter
def heap_utilization(self, heap_utilization):
"""
Sets the heap_utilization of this SystemDiagnosticsSnapshotDTO.
Utilization of heap.
:param heap_utilization: The heap_utilization of this SystemDiagnosticsSnapshotDTO.
:type: str
"""
self._heap_utilization = heap_utilization
@property
def available_processors(self):
"""
Gets the available_processors of this SystemDiagnosticsSnapshotDTO.
Number of available processors if supported by the underlying system.
:return: The available_processors of this SystemDiagnosticsSnapshotDTO.
:rtype: int
"""
return self._available_processors
@available_processors.setter
def available_processors(self, available_processors):
"""
Sets the available_processors of this SystemDiagnosticsSnapshotDTO.
Number of available processors if supported by the underlying system.
:param available_processors: The available_processors of this SystemDiagnosticsSnapshotDTO.
:type: int
"""
self._available_processors = available_processors
@property
def processor_load_average(self):
"""
Gets the processor_load_average of this SystemDiagnosticsSnapshotDTO.
The processor load average if supported by the underlying system.
:return: The processor_load_average of this SystemDiagnosticsSnapshotDTO.
:rtype: float
"""
return self._processor_load_average
@processor_load_average.setter
def processor_load_average(self, processor_load_average):
"""
Sets the processor_load_average of this SystemDiagnosticsSnapshotDTO.
The processor load average if supported by the underlying system.
:param processor_load_average: The processor_load_average of this SystemDiagnosticsSnapshotDTO.
:type: float
"""
self._processor_load_average = processor_load_average
@property
def total_threads(self):
"""
Gets the total_threads of this SystemDiagnosticsSnapshotDTO.
Total number of threads.
:return: The total_threads of this SystemDiagnosticsSnapshotDTO.
:rtype: int
"""
return self._total_threads
@total_threads.setter
def total_threads(self, total_threads):
"""
Sets the total_threads of this SystemDiagnosticsSnapshotDTO.
Total number of threads.
:param total_threads: The total_threads of this SystemDiagnosticsSnapshotDTO.
:type: int
"""
self._total_threads = total_threads
@property
def daemon_threads(self):
"""
Gets the daemon_threads of this SystemDiagnosticsSnapshotDTO.
Number of daemon threads.
:return: The daemon_threads of this SystemDiagnosticsSnapshotDTO.
:rtype: int
"""
return self._daemon_threads
@daemon_threads.setter
def daemon_threads(self, daemon_threads):
"""
Sets the daemon_threads of this SystemDiagnosticsSnapshotDTO.
Number of daemon threads.
:param daemon_threads: The daemon_threads of this SystemDiagnosticsSnapshotDTO.
:type: int
"""
self._daemon_threads = daemon_threads
@property
def uptime(self):
"""
Gets the uptime of this SystemDiagnosticsSnapshotDTO.
The uptime of the Java virtual machine
:return: The uptime of this SystemDiagnosticsSnapshotDTO.
:rtype: str
"""
return self._uptime
@uptime.setter
def uptime(self, uptime):
"""
Sets the uptime of this SystemDiagnosticsSnapshotDTO.
The uptime of the Java virtual machine
:param uptime: The uptime of this SystemDiagnosticsSnapshotDTO.
:type: str
"""
self._uptime = uptime
@property
def flow_file_repository_storage_usage(self):
"""
Gets the flow_file_repository_storage_usage of this SystemDiagnosticsSnapshotDTO.
The flowfile repository storage usage.
:return: The flow_file_repository_storage_usage of this SystemDiagnosticsSnapshotDTO.
:rtype: StorageUsageDTO
"""
return self._flow_file_repository_storage_usage
@flow_file_repository_storage_usage.setter
def flow_file_repository_storage_usage(self, flow_file_repository_storage_usage):
"""
Sets the flow_file_repository_storage_usage of this SystemDiagnosticsSnapshotDTO.
The flowfile repository storage usage.
:param flow_file_repository_storage_usage: The flow_file_repository_storage_usage of this SystemDiagnosticsSnapshotDTO.
:type: StorageUsageDTO
"""
self._flow_file_repository_storage_usage = flow_file_repository_storage_usage
@property
def content_repository_storage_usage(self):
"""
Gets the content_repository_storage_usage of this SystemDiagnosticsSnapshotDTO.
The content repository storage usage.
:return: The content_repository_storage_usage of this SystemDiagnosticsSnapshotDTO.
:rtype: list[StorageUsageDTO]
"""
return self._content_repository_storage_usage
@content_repository_storage_usage.setter
def content_repository_storage_usage(self, content_repository_storage_usage):
"""
Sets the content_repository_storage_usage of this SystemDiagnosticsSnapshotDTO.
The content repository storage usage.
:param content_repository_storage_usage: The content_repository_storage_usage of this SystemDiagnosticsSnapshotDTO.
:type: list[StorageUsageDTO]
"""
self._content_repository_storage_usage = content_repository_storage_usage
@property
def provenance_repository_storage_usage(self):
"""
Gets the provenance_repository_storage_usage of this SystemDiagnosticsSnapshotDTO.
The provenance repository storage usage.
:return: The provenance_repository_storage_usage of this SystemDiagnosticsSnapshotDTO.
:rtype: list[StorageUsageDTO]
"""
return self._provenance_repository_storage_usage
@provenance_repository_storage_usage.setter
def provenance_repository_storage_usage(self, provenance_repository_storage_usage):
"""
Sets the provenance_repository_storage_usage of this SystemDiagnosticsSnapshotDTO.
The provenance repository storage usage.
:param provenance_repository_storage_usage: The provenance_repository_storage_usage of this SystemDiagnosticsSnapshotDTO.
:type: list[StorageUsageDTO]
"""
self._provenance_repository_storage_usage = provenance_repository_storage_usage
@property
def garbage_collection(self):
"""
Gets the garbage_collection of this SystemDiagnosticsSnapshotDTO.
The garbage collection details.
:return: The garbage_collection of this SystemDiagnosticsSnapshotDTO.
:rtype: list[GarbageCollectionDTO]
"""
return self._garbage_collection
@garbage_collection.setter
def garbage_collection(self, garbage_collection):
"""
Sets the garbage_collection of this SystemDiagnosticsSnapshotDTO.
The garbage collection details.
:param garbage_collection: The garbage_collection of this SystemDiagnosticsSnapshotDTO.
:type: list[GarbageCollectionDTO]
"""
self._garbage_collection = garbage_collection
@property
def stats_last_refreshed(self):
"""
Gets the stats_last_refreshed of this SystemDiagnosticsSnapshotDTO.
When the diagnostics were generated.
:return: The stats_last_refreshed of this SystemDiagnosticsSnapshotDTO.
:rtype: str
"""
return self._stats_last_refreshed
@stats_last_refreshed.setter
def stats_last_refreshed(self, stats_last_refreshed):
"""
Sets the stats_last_refreshed of this SystemDiagnosticsSnapshotDTO.
When the diagnostics were generated.
:param stats_last_refreshed: The stats_last_refreshed of this SystemDiagnosticsSnapshotDTO.
:type: str
"""
self._stats_last_refreshed = stats_last_refreshed
@property
def version_info(self):
"""
Gets the version_info of this SystemDiagnosticsSnapshotDTO.
The nifi, os, java, and build version information
:return: The version_info of this SystemDiagnosticsSnapshotDTO.
:rtype: VersionInfoDTO
"""
return self._version_info
@version_info.setter
def version_info(self, version_info):
"""
Sets the version_info of this SystemDiagnosticsSnapshotDTO.
The nifi, os, java, and build version information
:param version_info: The version_info of this SystemDiagnosticsSnapshotDTO.
:type: VersionInfoDTO
"""
self._version_info = version_info
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, SystemDiagnosticsSnapshotDTO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
arsalhuda24/pedestrian-data-visualization
|
test.py
|
<gh_stars>0
from toolkit.loaders.loader_sdd import load_sdd, load_sdd_dir
import os, yaml
from toolkit.loaders.loader_sdd import load_sdd, load_sdd_dir
scene_name = 'quad'
scene_video_id = 'video3'
# fixme: replace OPENTRAJ_ROOT with the address to root folder of OpenTraj
sdd_root = os.path.join("/home/asyed/OpenTraj/", 'datasets', 'SDD')
annot_file = os.path.join(sdd_root, scene_name, scene_video_id, 'annotations.txt')
# load the homography values
with open(os.path.join(sdd_root, 'estimated_scales.yaml'), 'r') as hf:
scales_yaml_content = yaml.load(hf, Loader=yaml.FullLoader)
scale = scales_yaml_content[scene_name][scene_video_id]['scale']
traj_dataset = load_sdd(annot_file, scale=scale, scene_id=scene_name + '-' + scene_video_id,
drop_lost_frames=False, use_kalman=False)
print(traj_dataset)
path ="/home/asyed/Downloads/VAE-Ped/datasets/stanford/isvc/val/" + str(scene_name) +"_"+ str(scene_video_id[-1:]) +".txt"
df=traj_dataset.data[traj_dataset.data.label == "pedestrian"]
df=df.sort_values(by=["frame_id","agent_id"])
df = df.iloc[:,[0,1,2,3]]
df=df.reset_index()
# df=traj_dataset.data.iloc[:,[0,1,2,3]]
df1 = df.iloc[:,[1,2,3,4]]
print(df)
df1.to_csv(path, header=None, index=None, sep='\t', mode='w')
|
mastersign/prochestra
|
prochestra/__main__.py
|
<gh_stars>0
from . import command_line
exit(command_line())
|
mastersign/prochestra
|
prochestra/__init__.py
|
import subprocess
import datetime
import os
import argparse
import json
import shlex
import yaml
__version__ = "1.1.2"
class Prochestra(object):
def __init__(self, log=None, log_prefix="PROCHESTRA ", silent=False):
self.log_file = log
self.log_prefix = log_prefix
self.silent = silent
self.state = {}
def info(self, text, *args):
if self.log_file:
ts = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S")
print(ts + ' ' + self.log_prefix + text.format(*args),
file=self.log_file, flush=True)
def run(self, job_list):
jobs = job_list.jobs
self.state = dict.fromkeys(list(map(lambda j: j.id, jobs)), None)
for job in jobs:
if not all(map(lambda dep: self.state[dep] if dep in self.state else None, job.dependencies)):
self.info("{} SKIPPED", job.id)
continue
self.info("{} STARTED", job.id)
# combine the cmd and the args into one string, because of shell=True
argv = [' '.join([job.cmd] + [shlex.quote(arg) for arg in job.args])]
p = subprocess.run(argv, shell=True,
stderr=subprocess.DEVNULL if self.silent else self.log_file,
stdout=subprocess.DEVNULL if self.silent else self.log_file)
self.state[job.id] = p.returncode == 0
if p.returncode != 0:
print("Prochestra: job '{}' exited with code {}.".format(job.id, p.returncode))
self.info("{} FAILED WITH CODE {}.", job.id, p.returncode)
else:
self.info("{} FINISHED", job.id)
return all(self.state.values())
class Job(object):
def __init__(self, data):
self.id = data['id']
self.cmd = data['cmd']
args = data['args'] if 'args' in data else []
self.args = [args] if type(args) is str else args
dependencies = data['dependencies'] if 'dependencies' in data else []
self.dependencies = [dependencies] if type(dependencies) is str else dependencies
class JobList(object):
def __init__(self, data):
self.data = data
if 'jobs' in self.data:
self.jobs = list(map(lambda entry: Job(entry), self.data['jobs']))
else:
self.jobs = []
def _file_format(path):
_, ext = os.path.splitext(path)
return ext[1:].lower() if ext.startswith('.') else ext.lower()
def _load_data(file, file_format):
if file_format == 'json':
data = json.load(file)
file.close()
elif file_format == 'yaml' or file_format == 'yml':
data = yaml.load(file)
file.close()
else:
raise Exception("Unsupported file type: {}".format(file_format))
return data
class JobListFile(argparse.Action):
def __call__(self, parser, namespace, values, option_strings=None):
path = os.path.abspath(values)
try:
file = argparse.FileType('r', encoding='UTF-8')(path)
except argparse.ArgumentTypeError as ate:
raise argparse.ArgumentError(self, ate)
try:
data = _load_data(file, _file_format(path))
setattr(namespace, self.dest, JobList(data))
except Exception as e:
raise argparse.ArgumentError(self, "Error while loading jobfile: {}".format(e))
def command_line(argv=None):
parser = argparse.ArgumentParser(prog='prochestra')
parser.add_argument('jobfile', action=JobListFile,
help="A path to a JSON or YAML file with the job list.")
parser.add_argument('--log', '-l', type=argparse.FileType('a', encoding='UTF-8'),
help="A path to a log file to write the output to.")
parser.add_argument('--silent', '-s', action='store_true',
help="Dismiss the output of the executed processes.")
parser.add_argument('--version', '-v', action='version', version=__version__)
args = parser.parse_args(args=argv)
runner = Prochestra(log=args.log, silent=args.silent)
result = runner.run(args.jobfile)
return 0 if result else 1
|
mastersign/prochestra
|
setup.py
|
#!/usr/bin/env python
import re
from setuptools import setup
import fastentrypoints
with open('prochestra/__init__.py') as source_file:
source_text = source_file.read()
version = re.compile(r'^__version__\s*=\s*"(.*)"', re.M).search(source_text).group(1)
setup(name='Prochestra',
packages=['prochestra'],
entry_points={'console_scripts': ['prochestra=prochestra:command_line']},
version=version,
description='Run multiple processes with dependencies with one call.',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/mastersign/prochestra',
install_requires=['pyyaml >= 3.0.0'],
)
|
mastersign/prochestra
|
bin/prochestra-cli.py
|
#!/usr/bin/env python
import os
import sys
source_path = os.path.join(os.path.dirname(__file__), '..')
sys.path.append(source_path)
if __name__ == '__main__':
from prochestra import command_line
exit(command_line())
|
rexgamer945/instagram-bot
|
instabot.py
|
bot = InstaBot('login', 'password',
like_per_day=1000,
more_than_likes=10,
tag_list = ['love', 'coding', 'dev'],
max_like_for_one_tag=5,
log_mod = 0)
|
aaronamk/my-purchase.online
|
purchase.py
|
#!/usr/bin/python
import sys
import pyrebase
from getpass import getpass
config = { "apiKey": "<KEY>", "authDomain": "my-purchases-bb5f8.firebaseapp.com", "databaseURL": "https://my-purchases-bb5f8.firebaseio.com", "storageBucket": "my-purchases-bb5f8.appspot.com", "serviceAccount": "my-purchases-bb5f8-firebase-adminsdk-e272n-daddec61f9.json" }
firebase = pyrebase.initialize_app(config)
auth = firebase.auth()
# Get a reference to the database service
db = firebase.database()
username = ""
password = ""
for arg_num in range(len(sys.argv)):
if ((sys.argv[arg_num]) == "-f"):
receipt = open(sys.argv[arg_num + 1], "r")
for line in receipt.read():
pass
if ((sys.argv[arg_num]) == "-u"):
username = sys.argv[arg_num + 1]
if ((sys.argv[arg_num]) == "-p"):
password = sys.argv[arg_num + 1]
if (username == ""):
username = input("email: ")
if (password == ""):
password = getpass("password: ")
#authenticate a user
user = auth.sign_in_with_email_and_password(username, password)
#archer = {"name": "<NAME>", "agency": "Figgis Agency"} db.child("agents").push(archer, user['<EMAIL>'])
#lana = {"name": "Lana Kane", "agency": "Figgis Agency"} db.child("agents").child("Lana").set(lana, user['idToken'])
print(db.child("Apple").get(user['idToken']).val())
|
oasys/checkov
|
checkov/common/util/docs_generator.py
|
<gh_stars>0
#!/usr/bin/env python
import re
from tabulate import tabulate
from checkov.arm.registry import arm_registry
from checkov.cloudformation.checks.resource.registry import cfn_registry as cfn_registry
from checkov.kubernetes.registry import registry as k8_registry
from checkov.serverless.registry import sls_registry
from checkov.terraform.checks.data.registry import data_registry
from checkov.terraform.checks.provider.registry import provider_registry
from checkov.terraform.checks.resource.registry import resource_registry
ID_PARTS_PATTERN = re.compile(r'(\D*)(\d*)')
def get_compare_key(c):
res = []
for match in ID_PARTS_PATTERN.finditer(c[0]):
text, number = match.groups()
numeric_value = int(number) if number else 0
# count number of leading zeros
same_number_ordering = len(number) - len(number.lstrip('0'))
res.append((text, numeric_value, same_number_ordering))
return res
def print_checks(framework="all"):
printable_checks_list = get_checks(framework)
print(
tabulate(printable_checks_list, headers=["Id", "Type", "Entity", "Policy", "IaC"], tablefmt="github",
showindex=True))
print("\n\n---\n\n")
def get_checks(framework="all"):
printable_checks_list = []
if framework == "terraform" or framework == "all":
for key in resource_registry.checks.keys():
for check in resource_registry.checks[key]:
printable_checks_list.append([check.id, "resource", key, check.name, "Terraform"])
for key in data_registry.checks.keys():
for check in data_registry.checks[key]:
printable_checks_list.append([check.id, "data", key, check.name, "Terraform"])
for key in provider_registry.checks.keys():
for check in provider_registry.checks[key]:
printable_checks_list.append([check.id, "provider", key, check.name, "Terraform"])
if framework == "cloudformation" or framework == "all":
for key in cfn_registry.checks.keys():
for check in cfn_registry.checks[key]:
printable_checks_list.append([check.id, "resource", key, check.name, "Cloudformation"])
if framework == "kubernetes" or framework == "all":
for key in k8_registry.checks.keys():
for check in k8_registry.checks[key]:
printable_checks_list.append([check.id, "PodSecurityPolicy", key, check.name, "Kubernetes"])
if framework == "serverless" or framework == "all":
for key in sls_registry.checks.keys():
for check in sls_registry.checks[key]:
printable_checks_list.append([check.id, "resource", key, check.name, "serverless"])
if framework == "arm" or framework == "all":
for key in arm_registry.checks.keys():
for check in arm_registry.checks[key]:
printable_checks_list.append([check.id, "resource", key, check.name, "arm"])
return sorted(printable_checks_list, key=get_compare_key)
if __name__ == '__main__':
print_checks()
|
oasys/checkov
|
checkov/version.py
|
<reponame>oasys/checkov
version = '1.0.514'
|
oasys/checkov
|
checkov/terraform/checks/resource/aws/LaunchConfigurationEBSEncryption.py
|
from checkov.common.models.enums import CheckResult, CheckCategories
from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
class LaunchConfigurationEBSEncryption(BaseResourceValueCheck):
def __init__(self):
name = "Ensure all data stored in the Launch configuration EBS is securely encrypted"
id = "CKV_AWS_8"
supported_resources = ['aws_launch_configuration', 'aws_instance']
categories = [CheckCategories.ENCRYPTION]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def get_inspected_key(self):
return "*_block_device/[0]/encrypted"
def scan_resource_conf(self, conf):
"""
Looks for encryption configuration at launch configuration:
https://www.terraform.io/docs/providers/aws/r/launch_configuration.html or https://www.terraform.io/docs/providers/aws/d/instance.html
:param conf: aws_launch_configuration configuration
:return: <CheckResult>
"""
have_root_block = 0
for key in conf.keys():
if "block_device" in key and "ephemeral" not in key:
if isinstance(conf[key][0], dict) and conf[key][0].get("encrypted") != [True]:
return CheckResult.FAILED
if "root_block_device" in key:
# Issue 496 - TF will create unencrypted EBS root by default if whole root_block_device block is omitted.
have_root_block = 1
if have_root_block == 0:
return CheckResult.FAILED
return CheckResult.PASSED
check = LaunchConfigurationEBSEncryption()
|
Thigos/Detector-de-Carros-OpenCV
|
Driver.py
|
<filename>Driver.py
import os
import cv2
import pyautogui
import mahotas
#Local do video
video_original = cv2.VideoCapture('teste.wmv')
#O local aonde os arquivos estão
diret = os.path.dirname(os.path.abspath(__file__))
while True:
#Frame do video
ret, frame = video_original.read()
#Converte para escalas de cinza
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#Suaviza a imagem
suavizador = cv2.GaussianBlur(gray, (7, 7), 0)
#Utiliza o método otsu para transformar a imagem em binário 0,1 (preto ou branco)
otsu = mahotas.thresholding.otsu(suavizador)
#Copia a imagem em escalas de cinza
binar = gray.copy()
#Calcula áreas em que há um pico de intensidade e transforma em branco (255) ou preto (0)
binar[binar > otsu] = 255
binar[binar < 255] = 0
binar = cv2.bitwise_not(binar)
#Pega todos os arquivos que estão na pasta ML_RECORT
for nome in os.listdir(diret + '\\ML_RECORT'):
#Lê o arquivo
carro = cv2.imread(diret + '\\ML_RECORT\\' + str(nome))
#Converte a imagem para escalas de cinza
carroGray = cv2.cvtColor(carro, cv2.COLOR_BGR2GRAY)
#Pega o w (largura) e h (altura) da imagem
w, h = carroGray.shape[::-1]
#Compara a imagem modelo (carroGray) com a imagem de entrada (binar)
res = cv2.matchTemplate(binar, carroGray, cv2.TM_CCOEFF)
limit = 200000000
#Recebe as informações da comparação
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
#Verificar se o valor da comparação é maior ou igual a 200000000
if (max_val >= limit):
#Forma um retângulo e uma palavra para identificar o carro
cv2.rectangle(frame, max_loc, (max_loc[0] + w, max_loc[1] + h), (0, 0, 255), 2)
fonte = cv2.FONT_HERSHEY_SIMPLEX
texto = "CARRO"
#Os frames 136 e 236 são os pontos em que o carro está mais perto do outro
#Então, o programa avisará para frear o carro e apertará o 's' que é o freio do carro no jogo
if(nome == "frame136.png" or nome == "frame236.png"):
texto = "FREAR"
#pyautogui.press('s')
cv2.putText(frame, texto, (max_loc[0], max_loc[1] - 5), fonte,
0.5, (0, 0, 255), 1, cv2.LINE_AA)
#É preciso que o for pare, assim não terá conflitos de frames
break
#Mostra o video
cv2.imshow("Driver", frame)
#Aperte 'q' para sair
key = cv2.waitKey(1)
if key == ord('q'):
break
cv2.destroyAllWindows()
|
SBonnietheoneandonly/MalaBot
|
thesaurus.py
|
import random
_words = {
'moved to': [
'teleported to',
'slinked away to',
'sneaked to',
'hopped to',
'ran away to'
],
'Hello': [
'Hello',
'Bonjour',
'Hola',
'Zdravstvuyte',
'Nǐn hǎo',
'Salve',
'Konnichiwa',
'Guten Tag',
'Olá',
'Goedendag',
'Annyeong'
],
'Goodbye': [
'Goodbye',
'Adios',
'Arrivederci'
'Au Revoir',
'Adeus',
'<NAME>',
'Sayōnara',
'Do svidaniya',
'Annyeong',
'Slan',
'Tot ziens'
]
}
def get_synonym(word):
return _words[word][random.randint(0, len(_words[word]) - 1)]
|
SBonnietheoneandonly/MalaBot
|
main.py
|
import os
import discord
from dotenv import load_dotenv
from thesaurus import get_synonym
from tts import text_to_pcm
from io import BytesIO
load_dotenv()
TOKEN = os.getenv('TOKEN')
client = discord.Client()
voice_client = None
message_queue = []
def after_play_finished(err):
# If another message has been queued, play it.
if message_queue:
msg = message_queue.pop(0)
voice_client.play(msg, after=after_play_finished)
async def update_bot_channel(guild):
global voice_client
all_channels = await guild.fetch_channels()
if not all_channels:
return
def get_num_members(idx):
channel = all_channels[idx]
if isinstance(channel, discord.VoiceChannel):
if channel == guild.afk_channel:
return 0
num_members = len(channel.voice_states)
if voice_client and voice_client.channel == channel:
num_members -= 1
return num_members
else:
return 0
# Find the channel with the most non-bot members and try to join it.
max_idx = max(range(len(all_channels)), key=lambda i: get_num_members(i))
if get_num_members(max_idx) > 0:
channel_to_join = all_channels[max_idx]
if voice_client:
if voice_client.channel != channel_to_join:
await voice_client.move_to(channel_to_join)
else:
voice_client = await channel_to_join.connect()
elif voice_client:
# Leave voice if there's nobody left on the server.
await voice_client.disconnect()
voice_client = None
@client.event
async def on_ready():
print(f'{client.user} has connected to Discord!')
@client.event
async def on_voice_state_update(member, old_state, new_state):
# Don't send messages about ourself or any other bots!
if member.bot:
return
# Has the user moved channel?
new_channel = new_state.channel
old_channel = old_state.channel
if old_channel == new_channel:
return
# Check if the bot should go to the new channel.
guild = (old_channel and old_channel.guild) or (new_channel and new_channel.guild)
assert(guild)
await update_bot_channel(guild)
# Build a message based on the change. Treat 'afk' channel as if user disconnected.
now_in_channel = (new_channel and new_channel != guild.afk_channel)
was_in_channel = (old_channel and old_channel != guild.afk_channel)
message = None
if now_in_channel:
if was_in_channel:
message = f'{member.display_name} {get_synonym("moved to")} {new_channel.name}.'
else:
message = f'{get_synonym("Hello")} {member.display_name}!'
elif was_in_channel:
message = f'{get_synonym("Goodbye")} {member.display_name}!'
# Send the text-to-speech message, or queue it if the bot is already speaking.
if message and voice_client:
audio_stream = discord.PCMAudio(BytesIO(text_to_pcm(message)))
if voice_client.is_playing():
message_queue.append(audio_stream)
else:
voice_client.play(audio_stream, after=after_play_finished)
client.run(TOKEN)
|
SBonnietheoneandonly/MalaBot
|
tts.py
|
<reponame>SBonnietheoneandonly/MalaBot<filename>tts.py
from dotenv import load_dotenv
from google.cloud import texttospeech
load_dotenv()
client = texttospeech.TextToSpeechClient()
voice = texttospeech.VoiceSelectionParams(language_code="en-GB",
name="en-GB-Wavenet-F")
# NOTE: Discord actually wants 48KHz stereo (for some reason). But 96KHz mono works fine for now.
audio_config = texttospeech.AudioConfig(
audio_encoding=texttospeech.AudioEncoding.LINEAR16, sample_rate_hertz=96000)
def text_to_pcm(input):
synthesis_input = texttospeech.SynthesisInput(text=input)
response = client.synthesize_speech(input=synthesis_input,
voice=voice,
audio_config=audio_config)
return response.audio_content
|
bscott711/LLSpy
|
tests/test_gui.py
|
<gh_stars>0
import os
from llspy.gui.mainwindow import main_GUI
from PyQt5 import QtWidgets, QtCore
from pytestqt import qtbot
from llspy.llsdir import LLSdir
def test_basic_processing(qtbot):
testdata = os.path.join(os.path.dirname(__file__), 'testdata', 'sample')
LLSdir(testdata).reduce_to_raw(keepmip=False)
n_testfiles = len(os.listdir(testdata))
otfdir = os.path.join(os.path.dirname(__file__), 'testdata', 'otfs')
APP = QtWidgets.QApplication([])
mainGUI = main_GUI()
mainGUI.loadProgramDefaults()
mainGUI.setOTFdirPath(otfdir)
assert mainGUI.listbox.rowCount() == 0
mainGUI.listbox.addPath(testdata)
assert mainGUI.listbox.rowCount() == 1
with qtbot.waitSignal(mainGUI.sig_processing_done, timeout=12000):
qtbot.mouseClick(mainGUI.processButton, QtCore.Qt.LeftButton)
deconFolder = os.path.join(testdata, 'GPUdecon')
MIPfolder = os.path.join(deconFolder, 'MIPs')
assert os.path.isdir(deconFolder)
assert os.path.isdir(MIPfolder)
assert len(os.listdir(deconFolder)) == 3
assert len(os.listdir(MIPfolder)) == 1
LLSdir(testdata).reduce_to_raw(keepmip=False)
assert not os.path.isdir(deconFolder)
assert not os.path.isdir(MIPfolder)
assert len(os.listdir(testdata)) == n_testfiles
mainGUI.quitProgram(save=False)
# def test_spimagine_preview(qtbot):
# testdata = os.path.join(os.path.dirname(__file__), 'testdata', 'sample')
# otfdir = os.path.join(os.path.dirname(__file__), 'testdata', 'otfs')
# APP = QtWidgets.QApplication([])
# mainGUI = main_GUI()
# mainGUI.loadProgramDefaults()
# mainGUI.setOTFdirPath(otfdir)
# assert mainGUI.listbox.rowCount() == 0
# mainGUI.listbox.addPath(testdata)
# assert mainGUI.listbox.rowCount() == 1
# def preview_exists():
# assert len(mainGUI.spimwins)
# mainGUI.prevBackendSpimagineRadio.setChecked(True)
# qtbot.mouseClick(mainGUI.previewButton, QtCore.Qt.LeftButton)
# qtbot.waitUntil(preview_exists, timeout=10000)
# mainGUI.close_all_previews()
# assert len(mainGUI.spimwins) == 0
# mainGUI.quitProgram(save=False)
def test_matplotlib_preview(qtbot):
testdata = os.path.join(os.path.dirname(__file__), 'testdata', 'sample')
otfdir = os.path.join(os.path.dirname(__file__), 'testdata', 'otfs')
APP = QtWidgets.QApplication([])
mainGUI = main_GUI()
mainGUI.loadProgramDefaults()
mainGUI.setOTFdirPath(otfdir)
assert mainGUI.listbox.rowCount() == 0
mainGUI.listbox.addPath(testdata)
assert mainGUI.listbox.rowCount() == 1
def preview_exists():
assert len(mainGUI.spimwins)
mainGUI.prevBackendMatplotlibRadio.setChecked(True)
qtbot.mouseClick(mainGUI.previewButton, QtCore.Qt.LeftButton)
qtbot.waitUntil(preview_exists, timeout=10000)
mainGUI.close_all_previews()
assert len(mainGUI.spimwins) == 0
mainGUI.quitProgram(save=False)
|
bscott711/LLSpy
|
llspy/gui/watcher.py
|
import fnmatch
import logging
import os
import os.path as osp
import time
import numpy as np
from PyQt5 import QtCore
import llspy
from llspy.gui import workers
from llspy.gui.helpers import (
newWorkerThread,
shortname,
wait_for_file_close,
wait_for_folder_finished,
)
_watchdog = False
ActiveWatcher = None
MainHandler = None
Observer = None
try:
from watchdog import events
from watchdog.observers import Observer
_watchdog = True
except ImportError:
pass
logger = logging.getLogger() # set root logger
logger.setLevel(logging.DEBUG)
lhStdout = logger.handlers[0] # grab console handler so we can delete later
ch = logging.StreamHandler() # create new console handler
ch.setLevel(logging.ERROR) # with desired logging level
# ch.addFilter(logging.Filter('llspy')) # and any filters
logger.addHandler(ch) # add it to the root logger
logger.removeHandler(lhStdout) # and delete the original streamhandler
if _watchdog:
class ActiveWatcher(QtCore.QObject):
"""docstring for ActiveWatcher"""
finished = QtCore.pyqtSignal()
stalled = QtCore.pyqtSignal()
status_update = QtCore.pyqtSignal(str, int)
def __init__(self, path, timeout=30):
super(ActiveWatcher, self).__init__()
self.path = path
self.timeout = timeout # seconds to wait for new file before giving up
self.inProcess = False
settext = llspy.util.find_filepattern(path, "*Settings.txt")
wait_for_file_close(settext)
time.sleep(1) # give the settings file a minute to write
self.E = llspy.LLSdir(path, False)
# TODO: probably need to check for files that are already there
self.tQueue = []
self.allReceived = False
self.worker = None
try:
app = QtCore.QCoreApplication.instance()
gui = next(
w for w in app.topLevelWidgets() if w.objectName == "main_GUI"
)
self.opts = gui.getValidatedOptions()
except Exception:
raise
# timeout clock to make sure this directory doesn't stagnate
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.stall)
self.timer.start(self.timeout * 1000)
# Too strict?
fpattern = "^.+_ch\d_stack\d{4}_\D*\d+.*_\d{7}msec_\d{10}msecAbs.*.tif"
# fpattern = '.*.tif'
handler = ActiveHandler(
str(self.E.path),
self.E.parameters.nc,
self.E.parameters.nt,
regexes=[fpattern],
ignore_directories=True,
)
handler.tReady.connect(self.add_ready)
handler.allReceived.connect(self.all_received)
handler.newfile.connect(self.newfile)
handler.check_for_existing_files()
self.observer = Observer()
self.observer.schedule(handler, self.path, recursive=False)
self.observer.start()
logger.info("New LLS directory now being watched: " + self.path)
@QtCore.pyqtSlot(str)
def newfile(self, path):
self.restart_clock()
self.status_update.emit(shortname(path), 2000)
def all_received(self):
self.allReceived = True
if not self.inProcess and len(self.tQueue):
self.terminate()
def restart_clock(self):
# restart the kill timer, since the handler is still alive
self.timer.start(self.timeout * 1000)
def add_ready(self, t):
# add the new timepoint to the Queue
self.tQueue.append(t)
# start processing
self.process()
def process(self):
if not self.inProcess and len(self.tQueue):
self.inProcess = True
timepoints = []
while len(self.tQueue):
timepoints.append(self.tQueue.pop())
timepoints = sorted(timepoints)
cRange = None # TODO: plug this in
self.tQueue = []
# note: it was important NOT to ditch partial tiffs with the activate
# watcher since the files aren't finished yet...
w, thread = newWorkerThread(
workers.TimePointWorker,
self.path,
timepoints,
cRange,
self.opts,
False,
workerConnect={"previewReady": self.writeFile},
start=True,
)
self.worker = (timepoints, w, thread)
elif not any((self.inProcess, len(self.tQueue), not self.allReceived)):
self.terminate()
@QtCore.pyqtSlot(np.ndarray, float, float)
def writeFile(self, stack, dx, dz):
timepoints, worker, thread = self.worker
def write_stack(s, c=0, t=0):
if self.opts["nIters"] > 0:
outfolder = "GPUdecon"
proctype = "_decon"
else:
outfolder = "Deskewed"
proctype = "_deskewed"
if not self.E.path.joinpath(outfolder).exists():
self.E.path.joinpath(outfolder).mkdir()
corstring = "_COR" if self.opts["correctFlash"] else ""
basename = os.path.basename(self.E.get_files(c=c, t=t)[0])
filename = basename.replace(".tif", corstring + proctype + ".tif")
outpath = str(self.E.path.joinpath(outfolder, filename))
llspy.util.imsave(
llspy.util.reorderstack(np.squeeze(s), "zyx"),
outpath,
dx=self.E.parameters.dx,
dz=self.E.parameters.dzFinal,
)
if stack.ndim == 5:
if not stack.shape[0] == len(timepoints):
raise ValueError(
"Processed stacks length not equal to requested"
" number of timepoints processed"
)
for t in range(stack.shape[0]):
for c in range(stack.shape[1]):
s = stack[t][c]
write_stack(s, c, timepoints[t])
elif stack.ndim == 4:
for c in range(stack.shape[0]):
write_stack(stack[c], c, timepoints[0])
else:
write_stack(stack, t=timepoints[0])
thread.quit()
thread.wait()
self.inProcess = False
self.process() # check to see if there's more waiting in the queue
def stall(self):
self.stalled.emit()
logger.debug("WATCHER TIMEOUT REACHED!")
self.terminate()
@QtCore.pyqtSlot()
def terminate(self):
logger.debug("TERMINATING WATCHER")
self.observer.stop()
self.observer.join()
self.finished.emit()
class MainHandler(events.FileSystemEventHandler, QtCore.QObject):
foundLLSdir = QtCore.pyqtSignal(str)
lostListItem = QtCore.pyqtSignal(str)
def __init__(self):
super(MainHandler, self).__init__()
def on_created(self, event):
# Called when a file or directory is created.
if event.is_directory:
pass
else:
if "Settings.txt" in event.src_path:
wait_for_folder_finished(osp.dirname(event.src_path))
self.foundLLSdir.emit(osp.dirname(event.src_path))
def on_deleted(self, event):
# Called when a file or directory is created.
if event.is_directory:
app = QtCore.QCoreApplication.instance()
gui = next(
w for w in app.topLevelWidgets() if w.objectName == "main_GUI"
)
# TODO: Is it safe to directly access main gui listbox here?
if len(gui.listbox.findItems(event.src_path, QtCore.Qt.MatchExactly)):
self.lostListItem.emit(event.src_path)
class ActiveHandler(events.RegexMatchingEventHandler, QtCore.QObject):
tReady = QtCore.pyqtSignal(int)
allReceived = QtCore.pyqtSignal() # don't expect to receive anymore
newfile = QtCore.pyqtSignal(str)
def __init__(self, path, nC, nT, **kwargs):
super(ActiveHandler, self).__init__(**kwargs)
self.path = path
self.nC = nC
self.nT = nT
# this assumes the experiment hasn't been stopped mid-stream
self.counter = np.zeros(self.nT)
def check_for_existing_files(self):
# this is here in case files already exist in the directory...
# we don't want the handler to miss them
# this is called by the parent after connecting the tReady signal
for f in os.listdir(self.path):
if fnmatch.fnmatch(f, "*tif"):
self.register_file(osp.join(self.path, f))
def on_created(self, event):
# Called when a file or directory is created.
self.register_file(event.src_path)
def register_file(self, path):
self.newfile.emit(path)
p = llspy.parse.parse_filename(osp.basename(path))
self.counter[p["stack"]] += 1
ready = np.where(self.counter == self.nC)[0]
# break counter for those timepoints
self.counter[ready] = np.nan
# can use <100 as a sign of timepoints still not finished
if len(ready):
# try to see if file has finished writing... does this work?
wait_for_file_close(path)
[self.tReady.emit(t) for t in ready]
# once all nC * nT has been seen emit allReceived
if all(np.isnan(self.counter)):
logger.debug("All Timepoints Received")
self.allReceived.emit()
|
bscott711/LLSpy
|
llspy/otf.py
|
<gh_stars>0
from .exceptions import OTFError
from .util import load_lib
from datetime import datetime, timedelta
import numpy as np
import re
import ctypes
import os
import logging
logger = logging.getLogger(__name__)
try:
import pathlib as plib
plib.Path()
except (ImportError, AttributeError):
import pathlib2 as plib
except (ImportError, AttributeError):
raise ImportError("no pathlib detected. For python2: pip install pathlib2")
otflib = load_lib("libradialft")
if not otflib:
logger.error("Could not load libradialft!")
else:
try:
shared_makeotf = otflib.makeOTF
shared_makeotf.restype = ctypes.c_int
shared_makeotf.argtypes = [
ctypes.c_char_p,
ctypes.c_char_p,
ctypes.c_int,
ctypes.c_float,
ctypes.c_int,
ctypes.c_bool,
ctypes.c_float,
ctypes.c_float,
ctypes.c_float,
ctypes.c_float,
ctypes.c_int,
ctypes.c_bool,
]
except AttributeError as e:
logger.warn("Failed to properly import libradialft")
logger.error(e)
def requireOTFlib(func, *args, **kwargs):
def dec(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
if not otflib:
raise Exception(
"Could not find libradialft library! OTF generation "
"will not be available:"
)
else:
raise e
return dec
@requireOTFlib
def makeotf(
psf,
otf=None,
lambdanm=520,
dz=0.1,
fixorigin=10,
bUserBackground=False,
background=90,
NA=1.25,
NIMM=1.3,
dr=0.102,
krmax=0,
bDoCleanup=False,
):
# krmax => "pixels outside this limit will be zeroed (overwriting estimated value from NA and NIMM)")
if otf is None:
otf = psf.replace(".tif", "_otf.tif")
shared_makeotf(
str.encode(psf),
str.encode(otf),
lambdanm,
dz,
fixorigin,
bUserBackground,
background,
NA,
NIMM,
dr,
krmax,
bDoCleanup,
)
return otf
# example: 20160825_488_totPSF_mb_0p5-0p42.tif
psffile_pattern = re.compile(
r"""
^(?P<date>\d{6}|\d{8}) # 6 or 8 digit date
_(?P<wave>\d+) # wavelength ... only digits following _ are used
_(?P<slmpattern>[a-zA-Z_]+) # slm pattern
_(?P<outerNA>[0-9p.]+) # outer NA, digits with . or p for decimal
[-_](?P<innerNA>[0-9p.]+) # inter NA, digits with . or p for decimal
(?P<isotf>_otf)?.tif$""", # optional _otf to specify that it is already an otf
re.VERBOSE,
)
default_otf_pattern = re.compile(
r"""
^(?P<wave>\d{3})
(?P<isotf>_otf)?
(?P<ispsf>_psf)?.tif$""",
re.VERBOSE,
)
def dir_has_otfs(dirname):
if os.path.isdir(str(dirname)):
if any(
[
(psffile_pattern.search(t) or default_otf_pattern.search(t))
for t in os.listdir(dirname)
]
):
return True
return False
def get_otf_dict(otfdir):
""" The otf_dict is a dict with
"""
otf_dict = {}
otfdir = plib.Path(otfdir)
for t in otfdir.glob("*tif"):
M = psffile_pattern.search(str(t.name))
if M:
M = M.groupdict()
wave = int(M["wave"])
if wave not in otf_dict:
otf_dict[wave] = {"default": None}
mask = (
float(M["innerNA"].replace("p", ".")),
float(M["outerNA"].replace("p", ".")),
)
if mask not in otf_dict[wave]:
otf_dict[wave][mask] = []
if not M["isotf"]:
matching_otf = otfdir.joinpath(t.name.replace(".tif", "_otf.tif"))
if not matching_otf.is_file():
matching_otf = None
else:
matching_otf = matching_otf
else:
matching_otf = None
otf_dict[wave][mask].append(
{
"date": datetime.strptime(M["date"], "%Y%m%d"),
"path": str(t),
"form": "otf" if M["isotf"] else "psf",
"slm": M["slmpattern"],
"otf": str(matching_otf),
}
)
else:
pathname = str(t.name)
M = default_otf_pattern.search(pathname)
if M:
M = M.groupdict()
wave = int(M["wave"])
if wave not in otf_dict:
otf_dict[wave] = {}
if not M["isotf"]:
newname = str(t).replace(".tif", "_otf.tif")
if M["ispsf"]:
newname = newname.replace("_psf", "")
pathname = newname
if not os.path.exists(newname):
makeotf(str(t), newname, lambdanm=int(wave), bDoCleanup=False)
otf_dict[wave]["default"] = str(otfdir.joinpath(pathname))
for wave in otf_dict.keys():
logger.debug("OTFdict wave: {}, masks: {}".format(wave, otf_dict[wave].keys()))
return otf_dict
def get_default_otf(wave, otfpath, approximate=True):
origwave = wave
otf_dict = get_otf_dict(otfpath)
waves_with_defaults = [k for k, v in otf_dict.items() if v["default"] is not None]
if wave not in waves_with_defaults:
if approximate:
for newwave in range(wave - 8, wave + 9):
if newwave in waves_with_defaults:
wave = newwave
if wave in otf_dict:
return otf_dict[wave]["default"]
else:
raise OTFError("No default OTF found for wavelength {}".format(origwave))
def choose_otf(
wave, otfpath, date=None, mask=None, direction="nearest", approximate=True
):
"""return otf with date closest to requested date.
if OTF doesn't exist, but PSF does, generate OTF and return the path.i
direction can be {'nearest', 'before', 'after'}, where 'before' returns an
OTF that was collected before 'date' and 'after' returns one that was
collected after 'date.'
"""
if not dir_has_otfs(otfpath):
raise OTFError("Not a valid OTF path: {}".format(otfpath))
if not date:
date = datetime.now()
otf_dict = get_otf_dict(otfpath)
otflist = []
# if the exact wavelenght is not matched, look for similar wavelengths...
if wave not in otf_dict:
if approximate:
for newwave in range(wave - 8, wave + 9):
if newwave in otf_dict:
wave = newwave
break
else:
return None
if wave not in otf_dict:
return None
# if the mask has been provided, use the OTFs from that mask
if mask is not None and mask in otf_dict[wave]:
otflist = otf_dict[wave][mask]
# if still empty, just return the default
if not len(otflist):
return get_default_otf(wave, otfpath, approximate)
if direction == "nearest":
minIdx = np.argmin([np.abs(i["date"] - date) for i in otflist])
elif direction == "before":
deltas = [date - i["date"] for i in otflist]
test = [d > timedelta(minutes=0) for d in deltas]
minIdx = next((obj for obj in test if obj), None)
elif direction == "after":
deltas = [i["date"] - date for i in otflist]
test = [d > timedelta(minutes=0) for d in deltas]
minIdx = next((obj for obj in test if obj), None)
else:
raise ValueError("Unkown direction argument: {}".format(direction))
if minIdx is None:
return get_default_otf(wave, otfpath, approximate)
matching_otfs = [
i
for i in otflist
if i["date"] == otflist[minIdx]["date"] and i["form"] == "otf"
]
if len(matching_otfs):
return matching_otfs[0]["path"]
else:
matching_psfs = [
i
for i in otflist
if i["date"] == otflist[minIdx]["date"] and i["form"] == "psf"
]
if matching_psfs:
# generate new OTF from PSF
return makeotf(
matching_psfs[0]["path"], lambdanm=int(wave), bDoCleanup=False
)
return get_default_otf(wave, otfpath, approximate)
|
bscott711/LLSpy
|
setup.py
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from codecs import open
from os import path
import sys
with open('llspy/version.py') as f:
exec(f.read())
INCLUDELIBS = False
HERE = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(HERE, 'README.rst'), encoding='utf-8') as f:
README = f.read()
with open('LICENSE') as f:
LICENSE = f.read()
if sys.platform.startswith('win32'):
DATA_FILES = []
elif sys.platform.startswith('darwin'):
DATA_FILES = []
else:
DATA_FILES = []
PACKAGE_DATA = [path.join('gui', 'guiDefaults.ini'),
path.join('gui', 'img_window.ui'),
path.join('gui', 'before_after.png'),
path.join('gui', 'logo_dark.png'),
path.join('gui', 'logo_light.png')]
if INCLUDELIBS:
# add specific library by platform
if sys.platform.startswith('darwin'):
PACKAGE_DATA += [
'bin/*.app',
'lib/*.dylib',
]
elif sys.platform.startswith('win32'):
PACKAGE_DATA += [
path.join('bin', '*.exe'),
path.join('lib', '*.dll'),
]
else:
PACKAGE_DATA += [
'bin/cudaDeconv',
'bin/otfviewer',
'bin/radialft',
'lib/*.so',
]
setup(
name='llspy',
version=__version__,
description='Lattice Light Sheet Processing Tools',
long_description=README,
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/tlambert03/LLSpy2',
license='BSD 3-clause',
packages=find_packages(exclude=('tests', 'docs', 'pyinstaller')),
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Visualization'
],
python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=3.6',
package_data={
'llspy': PACKAGE_DATA,
},
data_files=DATA_FILES,
install_requires=[
'numpy',
'scipy',
'tifffile',
'numba',
'voluptuous',
'click',
'watchdog',
'pyqt5',
'matplotlib',
'spimagine',
'gputools',
'sentry-sdk',
],
entry_points={
'console_scripts': [
'lls = llspy.bin.llspy_cli:cli',
'lls-gui = llspy.bin.llspy_gui:main'
],
},
)
|
bscott711/LLSpy
|
llspy/gui/mainwindow.py
|
<gh_stars>0
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import division, print_function
import json
import logging
import os
import os.path as osp
import numpy as np
from PyQt5 import QtCore, QtGui
from PyQt5 import QtWidgets as QtW
import llspy
import llspy.gui.exceptions as err
from fiducialreg.fiducialreg import RegFile, RegistrationError
from llspy.gui import workers
from llspy.gui.camcalibgui import CamCalibDialog
from llspy.gui.helpers import (
guirestore,
guisave,
newWorkerThread,
reveal,
shortname,
string_to_iterable,
)
from llspy.gui.img_dialog import ImgDialog
from llspy.gui.main_gui import Ui_Main_GUI
from llspy.gui.qtlogger import NotificationHandler
from .watcher import ActiveWatcher, MainHandler, Observer
logger = logging.getLogger() # set root logger
logger.setLevel(logging.DEBUG)
lhStdout = logger.handlers[0] # grab console handler so we can delete later
ch = logging.StreamHandler() # create new console handler
ch.setLevel(logging.ERROR) # with desired logging level
# ch.addFilter(logging.Filter('llspy')) # and any filters
logger.addHandler(ch) # add it to the root logger
logger.removeHandler(lhStdout) # and delete the original streamhandler
# import sys
# sys.path.append(osp.join(osp.abspath(__file__), os.pardir, os.pardir))
# Ui_Main_GUI = uic.loadUiType(osp.join(thisDirectory, 'main_gui.ui'))[0]
# form_class = uic.loadUiType('./llspy/gui/main_gui.ui')[0] # for debugging
# platform independent settings file
QtCore.QCoreApplication.setOrganizationName("llspy")
QtCore.QCoreApplication.setOrganizationDomain("llspy.com")
sessionSettings = QtCore.QSettings("llspy", "llspyGUI")
defaultSettings = QtCore.QSettings("llspy", "llspyDefaults")
# programDefaults are provided in guiDefaults.ini as a reasonable starting place
# this line finds the relative path depending on whether we're running in a
# pyinstaller bundle or live.
defaultINI = llspy.util.getAbsoluteResourcePath("gui/guiDefaults.ini")
programDefaults = QtCore.QSettings(defaultINI, QtCore.QSettings.IniFormat)
_napari = None
try:
import napari as _napari
if hasattr(_napari.view_layers, "view_multichannel"):
logger.warning("napari imported, but needs to be updated")
_napari = None
except ImportError:
logger.warning("napari unavailable.")
_SPIMAGINE_IMPORTED = False
if not sessionSettings.value("disableSpimagineCheckBox", False, type=bool):
try:
# raise ImportError("skipping")
with llspy.util.HiddenPrints():
from spimagine import DataModel, NumpyData
from spimagine.gui.mainwidget import MainWidget as spimagineWidget
_SPIMAGINE_IMPORTED = True
except ImportError as e:
print(e)
logger.error("could not import spimagine.")
class LLSDragDropTable(QtW.QTableWidget):
colHeaders = ["path", "name", "nC", "nT", "nZ", "nY", "nX", "angle", "dz", "dx"]
nCOLS = len(colHeaders)
# A signal needs to be defined on class level:
dropSignal = QtCore.pyqtSignal(list, name="dropped")
# This signal emits when a URL is dropped onto this list,
# and triggers handler defined in parent widget.
def __init__(self, parent=None):
super(LLSDragDropTable, self).__init__(0, self.nCOLS, parent)
self.setAcceptDrops(True)
self.setSelectionMode(QtW.QAbstractItemView.ExtendedSelection)
self.setSelectionBehavior(QtW.QAbstractItemView.SelectRows)
self.setEditTriggers(QtW.QAbstractItemView.DoubleClicked)
self.setGridStyle(3) # dotted grid line
self.llsObjects = {} # dict to hold LLSdir Objects when instantiated
self.setHorizontalHeaderLabels(self.colHeaders)
self.hideColumn(0) # column 0 is a hidden col for the full pathname
header = self.horizontalHeader()
header.setSectionResizeMode(1, QtW.QHeaderView.Stretch)
header.resizeSection(2, 27)
header.resizeSection(3, 45)
header.resizeSection(4, 40)
header.resizeSection(5, 40)
header.resizeSection(6, 40)
header.resizeSection(7, 40)
header.resizeSection(8, 48)
header.resizeSection(9, 48)
self.cellChanged.connect(self.onCellChanged)
@QtCore.pyqtSlot(int, int)
def onCellChanged(self, row, col):
# if it's not one of the last few columns that changed, ignore
if col < 7:
return
# must be the ACTIVE column that changed...
if col == self.currentColumn():
self.cellChanged.disconnect(self.onCellChanged)
try:
val = float(self.currentItem().text())
except ValueError:
self.currentItem().setText("0.0")
raise err.InvalidSettingsError("Value entered was not a number")
try:
if col == 7:
if not (-90 < val < 90):
self.currentItem().setText("0.0")
raise err.InvalidSettingsError(
"angle must be between -90 and 90"
)
self.getLLSObjectByIndex(row).parameters["angle"] = val
if col == 8:
if not (0 < val < 20):
self.currentItem().setText("0.0")
raise err.InvalidSettingsError(
"dz must be between 0 and 20 (microns)"
)
self.getLLSObjectByIndex(row).parameters["dz"] = val
if col == 9:
if not (0 < val < 5):
self.currentItem().setText("0.0")
raise err.InvalidSettingsError(
"dx must be between 0 and 5 (microns)"
)
self.getLLSObjectByIndex(row).parameters["dx"] = val
# change color once updated
finally:
if (
(col == 7 and not (-90 < val < 90))
or (col == 8 and not (0 < val < 20))
or (col == 9 and not (0 < val < 5))
):
self.currentItem().setForeground(QtCore.Qt.white)
self.currentItem().setBackground(QtCore.Qt.red)
else:
self.currentItem().setForeground(QtCore.Qt.black)
self.currentItem().setBackground(QtCore.Qt.white)
self.cellChanged.connect(self.onCellChanged)
@QtCore.pyqtSlot(str)
def addPath(self, path):
try:
self.cellChanged.disconnect(self.onCellChanged)
except TypeError:
pass
if not (osp.exists(path) and osp.isdir(path)):
return
mainGUI = self.parent().parent().parent().parent().parent()
# If this folder is not on the list yet, add it to the list:
if not llspy.util.pathHasPattern(path, "*Settings.txt"):
if not mainGUI.allowNoSettingsCheckBox.isChecked():
logger.warning("No Settings.txt! Ignoring: {}".format(path))
return
# if it's already on the list, don't add it
if len(self.findItems(path, QtCore.Qt.MatchExactly)):
return
# if it's a folder containing files with "_Iter_" warn the user...
if llspy.util.pathHasPattern(path, "*Iter_*"):
if sessionSettings.value("warnIterFolder", True, type=bool):
box = QtW.QMessageBox()
box.setWindowTitle("Note")
box.setText(
"You have added a folder that appears to have been acquired"
' in Script Editor: it has "Iter_" in the filenames.\n\n'
"LLSpy generally assumes that each folder contains "
"a single position timelapse dataset (see docs for assumptions "
"about data format). Hit PROCESS ANYWAY to process this folder as is, "
"but it may yield unexpected results. You may also RENAME ITERS, "
"this will RENAME all files as if they were single experiments "
"acquired at different positions and place them into their own "
"folders (cannot be undone). Hit CANCEL to prevent adding this "
"item to the queue."
)
box.setIcon(QtW.QMessageBox.Warning)
box.addButton(QtW.QMessageBox.Cancel)
box.addButton("Process Anyway", QtW.QMessageBox.YesRole)
box.addButton("Rename Iters", QtW.QMessageBox.ActionRole)
box.setDefaultButton(QtW.QMessageBox.Cancel)
# pref = QtW.QCheckBox("Remember my answer")
# box.setCheckBox(pref)
reply = box.exec_()
if reply > 1000: # cancel hit
return
elif reply == 1: # rename iters hit
if not hasattr(self, "renamedPaths"):
self.renamedPaths = []
newfolders = llspy.llsdir.rename_iters(path)
self.renamedPaths.append(path)
# self.removePath(path)
[self.addPath(osp.join(path, p)) for p in newfolders]
return
elif reply == 0: # process anyway hit
pass
E = llspy.LLSdir(path)
if E.has_settings and not E.has_lls_tiffs:
if not E.is_compressed() and llspy.util.pathHasPattern(path, "*.tif"):
if sessionSettings.value("warnOnNoLLStiffs", True, type=bool):
box = QtW.QMessageBox()
box.setWindowTitle(
"Path has tiff files and Settings.txt file, but none of them match"
" the file pattern."
)
box.setText(
"Path has tiff files, but none of them match"
" the file pattern specified in the config tab. Please read "
"the section on filename parsing in the documentation for more info.\n\n"
"http://llspy.readthedocs.io/en/latest/main.html#parsing\n"
)
box.setIcon(QtW.QMessageBox.Warning)
box.addButton(QtW.QMessageBox.Ok)
box.setDefaultButton(QtW.QMessageBox.Ok)
# pref = QtW.QCheckBox("Just skip these folders in the future")
# box.setCheckBox(pref)
def setPref(value):
sessionSettings.setValue("warnOnNoLLStiffs", bool(value))
sessionSettings.sync()
# pref.stateChanged.connect(setPref)
box.exec_()
return
logger.info("Adding to queue: %s" % shortname(path))
rowPosition = self.rowCount()
self.insertRow(rowPosition)
item = [
path,
shortname(str(E.path)),
str(E.parameters.nc),
str(E.parameters.nt),
str(E.parameters.nz),
str(E.parameters.ny),
str(E.parameters.nx),
]
if E.has_settings:
item.extend(
[
"{:2.1f}".format(E.parameters.angle)
if E.parameters.samplescan
else "0",
"{:0.3f}".format(E.parameters.dz),
"{:0.3f}".format(E.parameters.dx),
]
)
else:
dx = E.parameters.dx or mainGUI.defaultDxSpin.value()
dz = E.parameters.dz or mainGUI.defaultDzSpin.value()
angle = E.parameters.angle or mainGUI.defaultAngleSpin.value()
item.extend(
["{:2.1f}".format(angle), "{:0.3f}".format(dz), "{:0.3f}".format(dx)]
)
E.parameters.angle = angle
E.parameters.samplescan = True if angle > 0 else False
E.parameters.dx = dx
E.parameters.dz = dz
for col, elem in enumerate(item):
entry = QtW.QTableWidgetItem(elem)
if col < 7:
entry.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
else:
entry.setFlags(
QtCore.Qt.ItemIsSelectable
| QtCore.Qt.ItemIsEnabled
| QtCore.Qt.ItemIsEditable
)
if not E.has_settings:
faintRed = QtGui.QBrush(QtGui.QColor(255, 0, 0, 30))
lightGray = QtGui.QBrush(QtGui.QColor(160, 160, 160))
entry.setForeground(lightGray)
entry.setBackground(faintRed)
self.setItem(rowPosition, col, entry)
if col > 7 and float(elem) == 0:
entry.setForeground(QtCore.Qt.white)
entry.setBackground(QtCore.Qt.red)
self.llsObjects[path] = E
self.cellChanged.connect(self.onCellChanged)
def selectedPaths(self):
selectedRows = self.selectionModel().selectedRows()
return [self.getPathByIndex(i.row()) for i in selectedRows]
def selectedObjects(self):
return [self.getLLSObjectByPath(p) for p in self.selectedPaths()]
@QtCore.pyqtSlot(str)
def removePath(self, path):
try:
self.llsObjects.pop(path)
except KeyError:
logger.warning("Could not remove path {} ... not in queue".format(path))
return
items = self.findItems(path, QtCore.Qt.MatchExactly)
for item in items:
self.removeRow(item.row())
if hasattr(self, "skipped_items"):
if path in self.skipped_items:
self.skipped_items.remove(path)
def getPathByIndex(self, index):
return self.item(index, 0).text()
def getLLSObjectByPath(self, path):
return self.llsObjects[path]
def getLLSObjectByIndex(self, index):
return self.llsObjects[self.getPathByIndex(index)]
def setRowBackgroudColor(self, row, color):
try:
self.cellChanged.disconnect(self.onCellChanged)
except TypeError:
pass
if isinstance(color, QtGui.QColor):
brush = QtGui.QBrush(color)
else:
brush = QtGui.QBrush(QtGui.QColor(color))
for col in range(self.nCOLS):
self.item(row, col).setBackground(brush)
if col > 7 and float(self.item(row, col).text()) == 0:
self.item(row, col).setForeground(QtCore.Qt.white)
self.item(row, col).setBackground(QtCore.Qt.red)
self.cellChanged.connect(self.onCellChanged)
def dragEnterEvent(self, event):
if event.mimeData().hasUrls:
event.accept()
else:
event.ignore()
def dragMoveEvent(self, event):
if event.mimeData().hasUrls:
event.setDropAction(QtCore.Qt.CopyAction)
event.accept()
else:
event.ignore()
def dropEvent(self, event):
if event.mimeData().hasUrls:
event.setDropAction(QtCore.Qt.CopyAction)
event.accept()
# links = []
for url in event.mimeData().urls():
# links.append(str(url.toLocalFile()))
self.addPath(str(url.toLocalFile()))
# self.dropSignal.emit(links)
# for url in links:
# self.listbox.addPath(url)
else:
event.ignore()
def keyPressEvent(self, event):
super(LLSDragDropTable, self).keyPressEvent(event)
if (
event.key() == QtCore.Qt.Key_Delete
or event.key() == QtCore.Qt.Key_Backspace
):
indices = self.selectionModel().selectedRows()
i = 0
for index in sorted(indices):
removerow = index.row() - i
path = self.getPathByIndex(removerow)
logger.info("Removing from queue: %s" % shortname(path))
self.removePath(path)
i += 1
class RegistrationTab(object):
def __init__(self):
self.RegCalibPathLoadButton.clicked.connect(self.setRegCalibPath)
self.GenerateRegFileButton.clicked.connect(self.generateCalibrationFile)
self.RegCalibPreviewButton.clicked.connect(self.previewRegistration)
self.RegFilePathLoadButton.clicked.connect(self.loadRegistrationFile)
self.RegCalib_channelRefCombo.clear()
self.RegCalib_channelRefModeCombo.clear()
def setRegCalibPath(self):
path = QtW.QFileDialog.getExistingDirectory(
self,
"Set Registration Calibration Directory",
"",
QtW.QFileDialog.ShowDirsOnly,
)
if path is None or path is "":
return
RD = llspy.RegDir(path)
if not RD.isValid:
raise err.RegistrationError(
"Registration Calibration dir not valid: {}".format(RD.path)
)
self.RegCalibPathLineEdit.setText(path)
layout = self.RegCalibRefGroupLayout
group = self.RegCalibRefChannelsGroup
for cb in group.findChildren(QtW.QCheckBox):
layout.removeWidget(cb)
cb.setParent(None)
for wave in RD.parameters.channels.values():
box = QtW.QCheckBox(str(wave), group)
layout.addWidget(box)
box.setChecked(True)
def generateCalibrationFile(self):
group = self.RegCalibRefChannelsGroup
refs = [
int(cb.text()) for cb in group.findChildren(QtW.QCheckBox) if cb.isChecked()
]
path = self.RegCalibPathLineEdit.text()
if not path or path == "":
raise err.InvalidSettingsError("Please load a fiducial dataset path first")
if not len(refs):
raise err.InvalidSettingsError("Select at least one reference channel")
autoThresh = self.RegAutoThreshCheckbox.isChecked()
if autoThresh:
minbeads = int(self.RegMinBeadsSpin.value())
RD = llspy.RegDir(path, usejson=False, threshold="auto", mincount=minbeads)
else:
threshold = int(self.RegBeadThreshSpin.value())
RD = llspy.RegDir(path, threshold=threshold, usejson=False)
if not RD.isValid:
raise err.RegistrationError(
"Registration Calibration dir not valid: {}".format(RD.path)
)
outdir = QtW.QFileDialog.getExistingDirectory(
self,
"Chose destination for registration file",
"",
QtW.QFileDialog.ShowDirsOnly,
)
if outdir is None or outdir is "":
return
class RegThread(QtCore.QThread):
finished = QtCore.pyqtSignal(str)
warning = QtCore.pyqtSignal(str, str)
def __init__(self, RD, outdir, refs):
QtCore.QThread.__init__(self)
self.RD = RD
self.outdir = outdir
self.refs = refs
def run(self):
try:
outfile, outstring = self.RD.write_reg_file(outdir, refs=self.refs)
counts = self.RD.cloudset().count
if np.std(counts) > 15:
outstr = "\n".join(
[
"wave: {}, beads: {}".format(channel, counts[i])
for i, channel in enumerate(self.RD.waves)
]
)
self.warning.emit(
"Suspicious Registration Result",
"Warning: there was a large variation in the number "
"of beads per channel. Auto-detection may have failed. "
"Try changing 'Min number of beads'...\n\n" + outstr,
)
except RegistrationError as e:
raise err.RegistrationError("Fiducial registration failed:", str(e))
# also write to appdir ... may use it later
# TODO: consider making app_dir a global APP attribute,
# like gpulist
from click import get_app_dir
appdir = get_app_dir("LLSpy")
if not os.path.isdir(appdir):
os.mkdir(appdir)
regdir = os.path.join(appdir, "regfiles")
if not os.path.isdir(regdir):
os.mkdir(regdir)
outfile2 = os.path.join(regdir, os.path.basename(outfile))
with open(outfile2, "w") as file:
file.write(outstring)
logger.debug("registration file output: {}".format(outfile))
logger.debug("registration file output: {}".format(outfile2))
self.finished.emit(outfile)
def finishup(outfile):
self.statusBar.showMessage(
"Registration file written: {}".format(outfile), 5000
)
self.loadRegistrationFile(outfile)
def notifyuser(title, msg):
QtW.QMessageBox.warning(self, title, msg, QtW.QMessageBox.Ok)
self.regthreads = []
regthread = RegThread(RD, outdir, refs)
regthread.finished.connect(finishup)
regthread.warning.connect(notifyuser)
self.regthreads.append(regthread)
self.statusBar.showMessage(
"Calculating registrations for ref channels: {}...".format(refs)
)
regthread.start()
# TODO: this is mostly duplicate functionality of loadRegObject below
def loadRegistrationFile(self, file=None):
if not file:
file = QtW.QFileDialog.getOpenFileName(
self,
"Choose registration file ",
os.path.expanduser("~"),
"Text Files (*.reg *.txt *.json)",
)[0]
if file is None or file is "":
return
try:
with open(file) as json_data:
regdict = json.load(json_data)
refs = sorted(list(set([t["reference"] for t in regdict["tforms"]])))
# mov = set([t['moving'] for t in regdict['tforms']])
modes = ["None"]
modes.extend(
sorted(
list(
set(
[
t["mode"].title().replace("Cpd", "CPD")
for t in regdict["tforms"]
]
)
)
)
)
self.RegCalib_channelRefCombo.clear()
self.RegCalib_channelRefCombo.addItems([str(r) for r in refs])
self.RegCalib_channelRefModeCombo.clear()
self.RegCalib_channelRefModeCombo.addItems(modes)
self.RegFilePath.setText(file)
except json.decoder.JSONDecodeError as e:
raise err.RegistrationError("Failed to parse registration file", str(e))
except Exception as e:
raise err.RegistrationError("Failed to load registration file", str(e))
def previewRegistration(self):
RD = llspy.RegDir(self.RegCalibPathLineEdit.text())
if not RD.isValid:
raise err.RegistrationError(
"Registration Calibration dir not valid. Please check Fiducial Data path above."
)
if not self.RegFilePath.text():
QtW.QMessageBox.warning(
self,
"Must load registration file!",
"No registration file!\n\nPlease click load, "
"and load a registration file. Or use the "
"generate button to generate and load a new one.",
QtW.QMessageBox.Ok,
QtW.QMessageBox.NoButton,
)
return
@QtCore.pyqtSlot(np.ndarray, float, float, dict)
def displayRegPreview(array, dx=None, dz=None, params=None):
win = ImgDialog(
array,
info=params,
title="Registration Mode: {} -- RefWave: {}".format(
opts["regMode"], opts["regRefWave"]
),
)
win.overlayButton.click()
win.maxProjButton.click()
self.spimwins.append(win)
self.previewButton.setDisabled(True)
self.previewButton.setText("Working...")
try:
opts = self.getValidatedOptions()
except Exception:
self.previewButton.setEnabled(True)
self.previewButton.setText("Preview")
raise
opts["regMode"] = self.RegCalib_channelRefModeCombo.currentText()
if opts["regMode"].lower() == "none":
opts["doReg"] = False
else:
opts["doReg"] = True
opts["regRefWave"] = int(self.RegCalib_channelRefCombo.currentText())
opts["regCalibPath"] = self.RegFilePath.text()
opts["correctFlash"] = False
opts["medianFilter"] = False
opts["trimZ"] = (0, 0)
opts["trimY"] = (0, 0)
opts["trimX"] = (0, 0)
opts["nIters"] = 0
w, thread = newWorkerThread(
workers.TimePointWorker,
RD,
[0],
None,
opts,
workerConnect={"previewReady": displayRegPreview},
start=True,
)
w.finished.connect(lambda: self.previewButton.setEnabled(True))
w.finished.connect(lambda: self.previewButton.setText("Preview"))
self.previewthreads = (w, thread)
class main_GUI(QtW.QMainWindow, Ui_Main_GUI, RegistrationTab):
"""docstring for main_GUI"""
sig_abort_LLSworkers = QtCore.pyqtSignal()
sig_item_finished = QtCore.pyqtSignal()
sig_processing_done = QtCore.pyqtSignal()
def __init__(self, parent=None):
super(main_GUI, self).__init__(parent)
self.setupUi(self) # method inherited from form_class to init UI
self.setWindowTitle("LLSpy :: Lattice Light Sheet Processing")
self.setObjectName("main_GUI")
RegistrationTab.__init__(self)
self.LLSItemThreads = []
self.compressionThreads = []
self.argQueue = [] # holds all argument lists that will be sent to threads
self.aborted = False # current abort status
self.inProcess = False
self.observer = None # for watching the watchdir
self.activeWatchers = {}
self.spimwins = []
# delete and reintroduce custom LLSDragDropTable
self.listbox.setParent(None)
self.listbox = LLSDragDropTable(self)
self.process_tab_layout.insertWidget(0, self.listbox)
handler = NotificationHandler()
handler.emitSignal.connect(self.log.append)
logger.addHandler(handler)
self.camcorDialog = CamCalibDialog()
self.genFlashParams.clicked.connect(self.camcorDialog.show)
self.actionCamera_Calibration.triggered.connect(self.camcorDialog.show)
# connect buttons
self.previewButton.clicked.connect(self.onPreview)
self.processButton.clicked.connect(self.onProcess)
self.errorOptOutCheckBox.stateChanged.connect(self.toggleOptOut)
self.useBundledBinariesCheckBox.stateChanged.connect(self.checkBundled)
def toggleActiveGPU(val):
gpunum = int(self.sender().objectName().strip("useGPU_"))
app = QtCore.QCoreApplication.instance()
if not hasattr(app, "gpuset"):
app.gpuset = set()
if val:
app.gpuset.add(gpunum)
logger.debug("GPU {} added to gpuset.".format(gpunum))
else:
if gpunum in app.gpuset:
app.gpuset.remove(gpunum)
logger.debug("GPU {} removed from gpuset.".format(gpunum))
logger.debug("GPUset now: {}".format(app.gpuset))
# add GPU checkboxes and add
try:
app = QtCore.QCoreApplication.instance()
if not hasattr(app, "gpuset"):
app.gpuset = set()
gpulist = llspy.cudabinwrapper.gpulist()
if len(gpulist):
for i, gpu in enumerate(gpulist):
box = QtW.QCheckBox(self.tab_config)
box.setChecked(True)
box.setObjectName("useGPU_{}".format(i))
box.setText(gpu.strip("GeForce"))
box.stateChanged.connect(toggleActiveGPU)
app.gpuset.add(i)
self.gpuGroupBoxLayout.addWidget(box)
else:
label = QtW.QLabel(self.tab_config)
label.setText("No CUDA-capabled GPUs detected")
self.gpuGroupBoxLayout.addWidget(label)
except llspy.cudabinwrapper.CUDAbinException as e:
logger.warn(e)
pass
self.watchDirToolButton.clicked.connect(self.changeWatchDir)
self.watchDirCheckBox.stateChanged.connect(
lambda st: self.startWatcher() if st else self.stopWatcher()
)
# connect actions
self.actionReveal.triggered.connect(self.revealSelected)
self.actionMerge_MIPs_from_folder.triggered.connect(self.mergeMIPtool)
self.actionOpen_LLSdir.triggered.connect(self.openLLSdir)
self.actionRun.triggered.connect(self.onProcess)
self.actionAbort.triggered.connect(self.abort_workers)
self.actionClose_All_Previews.triggered.connect(self.close_all_previews)
self.actionPreview.triggered.connect(self.onPreview)
self.actionSave_Settings_as_Default.triggered.connect(self.saveCurrentAsDefault)
self.actionLoad_Default_Settings.triggered.connect(self.loadDefaultSettings)
self.actionReduce_to_Raw.triggered.connect(self.reduceSelected)
self.actionFreeze.triggered.connect(self.freezeSelected)
self.actionCompress_Folder.triggered.connect(self.compressSelected)
self.actionDecompress_Folder.triggered.connect(self.decompressSelected)
self.actionConcatenate.triggered.connect(self.concatenateSelected)
self.actionRename_Scripted.triggered.connect(self.renameSelected)
self.actionUndo_Rename_Iters.triggered.connect(self.undoRenameSelected)
self.actionAbout_LLSpy.triggered.connect(self.showAboutWindow)
self.actionHelp.triggered.connect(self.showHelpWindow)
# set validators for cRange and tRange fields
ctrangeRX = QtCore.QRegExp("(\d[\d-]*,?)*") # could be better
ctrangeValidator = QtGui.QRegExpValidator(ctrangeRX)
self.processCRangeLineEdit.setValidator(ctrangeValidator)
self.processTRangeLineEdit.setValidator(ctrangeValidator)
self.previewCRangeLineEdit.setValidator(ctrangeValidator)
self.previewTRangeLineEdit.setValidator(ctrangeValidator)
# FIXME: this way of doing it clears the text field if you hit cancel
self.RegProcessPathPushButton.clicked.connect(self.setFiducialData)
self.RegProcessLoadRegFile.clicked.connect(self.loadProcessRegFile)
self.cudaDeconvPathToolButton.clicked.connect(self.setCudaDeconvPath)
self.otfFolderToolButton.clicked.connect(self.setOTFdirPath)
self.camParamTiffToolButton.clicked.connect(self.setCamParamPath)
self.RegProcessPathLineEdit.setText("")
self.RegProcessPathLineEdit.textChanged.connect(self.loadRegObject)
self.filenamePatternLineEdit.textChanged.connect(self.set_fname_pattern)
self.disableSpimagineCheckBox.clicked.connect(
lambda: QtW.QMessageBox.information(
self,
"Restart Required",
"Please quit and restart LLSpy for changes to take effect",
QtW.QMessageBox.Ok,
)
)
# connect worker signals and slots
self.sig_item_finished.connect(self.on_item_finished)
self.sig_processing_done.connect(self.on_proc_finished)
self.RegCalib_channelRefModeCombo.clear()
self.RegCalib_channelRefCombo.clear()
# Restore settings from previous session and show ready status
guirestore(self, sessionSettings, programDefaults)
self.availableCompression = []
# get compression options
for ctype in ["lbzip2", "bzip2", "pbzip2", "pigz", "gzip"]:
if llspy.util.which(ctype) is not None:
self.availableCompression.append(ctype)
self.compressTypeCombo.addItems(self.availableCompression)
if not self.availableCompression:
self.compressTypeCombo.clear()
self.compressTypeCombo.setDisabled(True)
self.compressRawCheckBox.setChecked(False)
self.compressRawCheckBox.setDisabled(True)
self.compressRawCheckBox.setText("no compression binaries found")
self.RegCalibPathLineEdit.setText("")
self.RegFilePath.setText("")
self.clock.display("00:00:00")
self.statusBar.showMessage("Ready")
# TODO: reenable when feature is ready
self.watchModeServerRadio.setChecked(True)
self.watchModeAcquisitionRadio.setDisabled(True)
if not ActiveWatcher:
self.watchModeGroupBox.setVisible(False)
self.watchModeLabel.setVisible(False)
self.watchDirCheckBox.setVisible(False)
self.watchDirLineEdit.setVisible(False)
self.watchDirToolButton.setVisible(False)
self.watcherStatus = QtW.QLabel()
self.statusBar.insertPermanentWidget(0, self.watcherStatus)
if not _SPIMAGINE_IMPORTED:
if _napari:
self.prevBackendNapariRadio.setChecked(True)
else:
self.prevBackendMatplotlibRadio.setChecked(True)
self.prevBackendSpimagineRadio.setDisabled(True)
self.prevBackendSpimagineRadio.setText("spimagine [unavailable]")
if not _napari:
if _SPIMAGINE_IMPORTED:
self.prevBackendSpimagineRadio.setChecked(True)
else:
self.prevBackendMatplotlibRadio.setChecked(True)
self.prevBackendNapariRadio.setDisabled(True)
self.prevBackendNapariRadio.setText("napari [unavailable]")
self.show()
self.raise_()
if self.watchDirCheckBox.isChecked():
self.startWatcher()
@QtCore.pyqtSlot()
def set_fname_pattern(self):
llspy.llsdir.__FPATTERN__ = self.filenamePatternLineEdit.text() + "{}"
@QtCore.pyqtSlot()
def startWatcher(self):
self.watchdir = self.watchDirLineEdit.text()
if osp.isdir(self.watchdir):
logger.info("Starting watcher on {}".format(self.watchdir))
# TODO: check to see if we need to save watchHandler
self.watcherStatus.setText("👁 {}".format(osp.basename(self.watchdir)))
watchHandler = MainHandler()
watchHandler.foundLLSdir.connect(self.on_watcher_found_item)
watchHandler.lostListItem.connect(self.listbox.removePath)
self.observer = Observer()
self.observer.schedule(watchHandler, self.watchdir, recursive=True)
self.observer.start()
@QtCore.pyqtSlot()
def stopWatcher(self):
if self.observer is not None and self.observer.is_alive():
self.observer.stop()
self.observer.join()
self.observer = None
logging.info("Stopped watcher on {}".format(self.watchdir))
self.watchdir = None
if not self.observer:
self.watcherStatus.setText("")
for watcher in self.activeWatchers.values():
watcher.terminate()
@QtCore.pyqtSlot(str)
def on_watcher_found_item(self, path):
if self.watchModeAcquisitionRadio.isChecked():
# assume more files are coming (like during live acquisition)
activeWatcher = ActiveWatcher(path)
activeWatcher.finished.connect(activeWatcher.deleteLater)
activeWatcher.status_update.connect(self.statusBar.showMessage)
self.activeWatchers[path] = activeWatcher
elif self.watchModeServerRadio.isChecked():
# assumes folders are completely finished when dropped
self.listbox.addPath(path)
self.onProcess()
@QtCore.pyqtSlot()
def changeWatchDir(self):
self.watchDirLineEdit.setText(
QtW.QFileDialog.getExistingDirectory(
self,
"Choose directory to monitor for new LLSdirs",
"",
QtW.QFileDialog.ShowDirsOnly,
)
)
if self.watchDirCheckBox.isChecked():
self.stopWatcher()
self.startWatcher()
@QtCore.pyqtSlot(str)
def loadRegObject(self, path):
if path in (None, ""):
return
if not os.path.exists(path):
self.RegProcessPathLineEdit.setText("")
return
try:
RO = llspy.llsdir.get_regObj(path)
except json.decoder.JSONDecodeError as e:
self.RegProcessPathLineEdit.setText("")
raise err.RegistrationError("Failed to parse registration file", str(e))
except RegistrationError as e:
self.RegProcessPathLineEdit.setText("")
raise err.RegistrationError(
"Failed to load registration calibration data", str(e)
)
finally:
self.RegProcessChannelRefModeCombo.clear()
self.RegProcessChannelRefCombo.clear()
self.RegProcessChannelRefCombo.addItems([str(r) for r in RO.waves])
modeorder = [
"2step",
"translation",
"rigid",
"similarity",
"affine",
"cpd_affine",
"cpd_rigid",
"cpd_similarity",
"cpd_2step",
]
# RegDirs allow all modes, RegFiles only allow modes that were calculated
# at the time of file creation
if isinstance(RO, llspy.RegDir):
modes = [m.title().replace("Cpd", "CPD") for m in modeorder]
elif isinstance(RO, RegFile):
modes = [m.lower() for m in RO.modes]
modes = [m.title().replace("Cpd", "CPD") for m in modeorder if m in modes]
self.RegProcessChannelRefModeCombo.addItems(modes)
def setFiducialData(self):
path = QtW.QFileDialog.getExistingDirectory(
self,
"Set Registration Calibration Directory",
"",
QtW.QFileDialog.ShowDirsOnly,
)
if path is None or path == "":
return
else:
self.RegProcessPathLineEdit.setText(path)
def loadProcessRegFile(self, file=None):
if not file:
file = QtW.QFileDialog.getOpenFileName(
self,
"Choose registration file ",
os.path.expanduser("~"),
"Text Files (*.reg *.txt *.json)",
)[0]
if file is None or file is "":
return
self.RegProcessPathLineEdit.setText(file)
def saveCurrentAsDefault(self):
if len(defaultSettings.childKeys()):
reply = QtW.QMessageBox.question(
self,
"Save Settings",
"Overwrite existing default GUI settings?",
QtW.QMessageBox.Yes | QtW.QMessageBox.No,
QtW.QMessageBox.No,
)
if reply != QtW.QMessageBox.Yes:
return
guisave(self, defaultSettings)
def loadProgramDefaults(self):
guirestore(self, QtCore.QSettings(), programDefaults)
def loadDefaultSettings(self):
if not len(defaultSettings.childKeys()):
reply = QtW.QMessageBox.information(
self,
"Load Settings",
"Default settings have not yet been saved. Use Save Settings",
)
if reply != QtW.QMessageBox.Yes:
return
guirestore(self, defaultSettings, programDefaults)
def openLLSdir(self):
qUrl = QtW.QFileDialog.getExistingDirectoryUrl(
self, "Choose LLSdir to add to list", options=QtW.QFileDialog.ShowDirsOnly
)
if qUrl.path() is not None:
self.listbox.addPath(qUrl.path())
def incrementProgress(self):
# with no values, simply increment progressbar
self.progressBar.setValue(self.progressBar.value() + 1)
def onPreview(self):
self.previewButton.setDisabled(True)
self.previewButton.setText("Working...")
if self.listbox.rowCount() == 0:
QtW.QMessageBox.warning(
self,
"Nothing Added!",
"Nothing to preview! Drop LLS experiment folders into the list",
QtW.QMessageBox.Ok,
QtW.QMessageBox.NoButton,
)
self.previewButton.setEnabled(True)
self.previewButton.setText("Preview")
return
# if there's only one item on the list show it
if self.listbox.rowCount() == 1:
firstRowSelected = 0
# otherwise, prompt the user to select one
else:
selectedRows = self.listbox.selectionModel().selectedRows()
if not len(selectedRows):
QtW.QMessageBox.warning(
self,
"Nothing Selected!",
"Please select an item (row) from the table to preview",
QtW.QMessageBox.Ok,
QtW.QMessageBox.NoButton,
)
self.previewButton.setEnabled(True)
self.previewButton.setText("Preview")
return
else:
# if they select multiple, chose the first one
firstRowSelected = selectedRows[0].row()
procTRangetext = self.previewTRangeLineEdit.text()
procCRangetext = self.previewCRangeLineEdit.text()
try:
self.lastopts = self.getValidatedOptions()
except Exception:
self.previewButton.setEnabled(True)
self.previewButton.setText("Preview")
raise
if procTRangetext:
tRange = string_to_iterable(procTRangetext)
else:
tRange = [0]
if procCRangetext:
cRange = string_to_iterable(procCRangetext)
if self.lastopts["correctFlash"] and sessionSettings.value(
"warnCameraCorPreview", True, type=bool
):
box = QtW.QMessageBox()
box.setWindowTitle("Note")
box.setText(
"You have selected to preview a subset of channels, but "
"have also selected Flash camera correction. Note that the camera "
"correction requires all channels to be enabled. Preview will not "
"reflect accurate camera correction."
)
box.setIcon(QtW.QMessageBox.Warning)
box.addButton(QtW.QMessageBox.Ok)
box.setDefaultButton(QtW.QMessageBox.Ok)
pref = QtW.QCheckBox("Don't remind me.")
box.setCheckBox(pref)
def dontRemind(value):
if value:
sessionSettings.setValue("warnCameraCorPreview", False)
else:
sessionSettings.setValue("warnCameraCorPreview", True)
sessionSettings.sync()
pref.stateChanged.connect(dontRemind)
box.exec_()
else:
cRange = None # means all channels
self.previewPath = self.listbox.getPathByIndex(firstRowSelected)
obj = self.listbox.getLLSObjectByPath(self.previewPath)
if not obj.parameters.isReady():
self.previewButton.setEnabled(True)
self.previewButton.setText("Preview")
raise err.InvalidSettingsError(
"Parameters are incomplete for this item. "
"Please add any missing/higlighted parameters."
)
if not os.path.exists(self.previewPath):
self.statusBar.showMessage(
"Skipping! path no longer exists: {}".format(self.previewPath), 5000
)
self.listbox.removePath(self.previewPath)
self.previewButton.setEnabled(True)
self.previewButton.setText("Preview")
return
w, thread = newWorkerThread(
workers.TimePointWorker,
obj,
tRange,
cRange,
self.lastopts,
workerConnect={
"previewReady": self.displayPreview,
"updateCrop": self.updateCrop,
},
start=True,
)
w.finished.connect(lambda: self.previewButton.setEnabled(True))
w.finished.connect(lambda: self.previewButton.setText("Preview"))
self.previewthreads = (w, thread)
@QtCore.pyqtSlot(int, int)
def updateCrop(self, width, offset):
self.cropWidthSpinBox.setValue(width)
self.cropShiftSpinBox.setValue(offset)
@QtCore.pyqtSlot(np.ndarray, float, float, dict)
def displayPreview(self, array, dx, dz, params=None):
if self.prevBackendNapariRadio.isChecked() and _napari:
cmaps = ("green", "magenta", "cyan", "red", "gray")
viewer = _napari.Viewer()
_scale = (dz / dx, 1, 1)
if len(params.get("cRange", 1)) > 1:
viewer.add_image(
array.copy(),
channel_axis=-4,
colormap=cmaps,
name=[str(n) for n in params.get("wavelength")],
scale=_scale,
is_pyramid=False,
)
else:
viewer.add_image(
array.copy(), scale=_scale, blending="additive", colormap="gray",
)
viewer.dims.set_point(0, viewer.dims.range[0][1] // 2)
viewer.dims.ndisplay = 3
self.spimwins.append(viewer)
elif self.prevBackendSpimagineRadio.isChecked() and _SPIMAGINE_IMPORTED:
if np.squeeze(array).ndim > 4:
arrays = [array[:, i] for i in range(array.shape[1])]
else:
arrays = [np.squeeze(array)]
for arr in arrays:
datamax = arr.max()
datamin = arr.min()
dataRange = datamax - datamin
vmin_init = datamin - dataRange * 0.02
vmax_init = datamax * 0.75
win = spimagineWidget()
win.setAttribute(QtCore.Qt.WA_DeleteOnClose)
win.setModel(DataModel(NumpyData(arr)))
win.setWindowTitle(shortname(self.previewPath))
win.transform.setStackUnits(dx, dx, dz)
win.transform.setGamma(0.9)
win.transform.setMax(vmax_init)
win.transform.setMin(vmin_init)
win.transform.setZoom(1.3)
# enable slice view by default
win.sliceWidget.checkSlice.setCheckState(2)
win.sliceWidget.glSliceWidget.interp = False
win.checkSliceView.setChecked(True)
win.sliceWidget.sliderSlice.setValue(int(arr.shape[-3] / 2))
# win.impListView.add_image_processor(myImp())
# win.impListView.add_image_processor(imageprocessor.LucyRichProcessor())
win.setLoopBounce(False)
win.settingsView.playInterval.setText("100")
win.resize(1500, 900)
win.show()
win.raise_()
# mainwidget doesn't know what order the colormaps are in
colormaps = win.volSettingsView.colormaps
win.volSettingsView.colorCombo.setCurrentIndex(
colormaps.index("inferno")
)
win.sliceWidget.glSliceWidget.set_colormap("grays")
# could have it rotating by default
# win.rotate()
self.spimwins.append(win)
else:
# FIXME: pyplot should not be imported in pyqt
# use https://matplotlib.org/2.0.0/api/backend_qt5agg_api.html
win = ImgDialog(array, info=params, title=shortname(self.previewPath))
self.spimwins.append(win)
@QtCore.pyqtSlot()
def close_all_previews(self):
if hasattr(self, "spimwins"):
for win in self.spimwins:
try:
win.closeMe()
except Exception:
try:
win.close()
except Exception:
pass
self.spimwins = []
def onProcess(self):
# prevent additional button clicks which processing
self.disableProcessButton()
self.listbox.skipped_items = set()
if self.listbox.rowCount() == 0:
QtW.QMessageBox.warning(
self,
"Nothing Added!",
"Nothing to process! Drag and drop folders into the list",
QtW.QMessageBox.Ok,
QtW.QMessageBox.NoButton,
)
self.enableProcessButton()
return
# store current options for this processing run. TODO: Unecessary?
try:
self.optionsOnProcessClick = self.getValidatedOptions()
op = self.optionsOnProcessClick
if not (
op["nIters"]
or (op["keepCorrected"] and (op["correctFlash"] or op["medianFilter"]))
or op["saveDeskewedRaw"]
or op["doReg"]
):
self.show_error_window(
"Nothing done!",
"Nothing done!",
"No deconvolution, deskewing, image correction, "
"or registration performed. Check GUI options.",
"",
)
except Exception:
self.enableProcessButton()
raise
if not self.inProcess: # for now, only one item allowed processing at a time
self.inProcess = True
self.process_next_item()
else:
logger.warning("Ignoring request to process, already processing...")
def process_next_item(self):
# get path from first row and create a new LLSdir object
numskipped = len(self.listbox.skipped_items)
self.currentItem = self.listbox.item(numskipped, 1).text()
self.currentPath = self.listbox.getPathByIndex(numskipped)
obj = self.listbox.getLLSObjectByPath(self.currentPath)
def skip():
self.listbox.removePath(self.currentPath)
self.look_for_next_item()
return
if not os.path.exists(self.currentPath):
msg = "Skipping! path no longer exists: {}".format(self.currentPath)
logger.info(msg)
self.statusBar.showMessage(msg, 5000)
skip()
return
idx = 0 # might use this later to spawn more threads
opts = self.optionsOnProcessClick
# check if already processed
if llspy.util.pathHasPattern(
self.currentPath, "*" + llspy.config.__OUTPUTLOG__
):
if not opts["reprocess"]:
msg = "Skipping! Path already processed: {}".format(self.currentPath)
logger.info(msg)
self.statusBar.showMessage(msg, 5000)
skip()
return
if not len(QtCore.QCoreApplication.instance().gpuset):
self.on_proc_finished()
raise err.InvalidSettingsError("No GPUs selected. Check Config Tab")
self.statusBar.showMessage(
"Starting processing on {} ...".format(shortname(self.currentPath))
)
LLSworker, thread = newWorkerThread(
workers.LLSitemWorker,
obj,
idx,
opts,
workerConnect={
"finished": self.on_item_finished,
"status_update": self.statusBar.showMessage,
"progressMaxVal": self.progressBar.setMaximum,
"progressValue": self.progressBar.setValue,
"progressUp": self.incrementProgress,
"clockUpdate": self.clock.display,
"error": self.abort_workers,
"skipped": self.skip_item,
# 'error': self.errorstring # implement error signal?
},
)
self.LLSItemThreads.append((thread, LLSworker))
# connect mainGUI abort LLSworker signal to the new LLSworker
self.sig_abort_LLSworkers.connect(LLSworker.abort)
# prepare and start LLSworker:
# thread.started.connect(LLSworker.work)
thread.start() # this will emit 'started' and start thread's event loop
# recolor the first row to indicate processing
self.listbox.setRowBackgroudColor(numskipped, QtGui.QColor(0, 0, 255, 30))
self.listbox.clearSelection()
# start a timer in the main GUI to measure item processing time
self.timer = QtCore.QTime()
self.timer.restart()
def disableProcessButton(self):
# turn Process button into a Cancel button and udpate menu items
self.processButton.clicked.disconnect()
self.processButton.setText("CANCEL")
self.processButton.clicked.connect(self.abort_workers)
self.processButton.setEnabled(True)
self.actionRun.setDisabled(True)
self.actionAbort.setEnabled(True)
def enableProcessButton(self):
# change Process button back to "Process" and udpate menu items
self.processButton.clicked.disconnect()
self.processButton.clicked.connect(self.onProcess)
self.processButton.setText("Process")
self.actionRun.setEnabled(True)
self.actionAbort.setDisabled(True)
self.inProcess = False
@QtCore.pyqtSlot()
def on_proc_finished(self):
# reinit statusbar and clock
self.statusBar.showMessage("Ready")
self.clock.display("00:00:00")
self.inProcess = False
self.aborted = False
logger.info("Processing Finished")
self.enableProcessButton()
@QtCore.pyqtSlot()
def on_item_finished(self):
if len(self.LLSItemThreads):
thread, worker = self.LLSItemThreads.pop(0)
thread.quit()
thread.wait()
self.clock.display("00:00:00")
self.progressBar.setValue(0)
if self.aborted:
self.sig_processing_done.emit()
else:
try:
itemTime = QtCore.QTime(0, 0).addMSecs(self.timer.elapsed()).toString()
logger.info(
">" * 4
+ " Item {} finished in {} ".format(self.currentItem, itemTime)
+ "<" * 4
)
except AttributeError:
pass
self.listbox.removePath(self.currentPath)
self.currentPath = None
self.currentItem = None
self.look_for_next_item()
@QtCore.pyqtSlot(str)
def skip_item(self, path):
if len(self.LLSItemThreads):
thread, worker = self.LLSItemThreads.pop(0)
thread.quit()
thread.wait()
self.listbox.setRowBackgroudColor(len(self.listbox.skipped_items), "#FFFFFF")
self.listbox.skipped_items.add(path)
self.look_for_next_item()
@QtCore.pyqtSlot()
def abort_workers(self):
self.statusBar.showMessage("Aborting ...")
logger.info("Message sent to abort ...")
if len(self.LLSItemThreads):
self.aborted = True
self.sig_abort_LLSworkers.emit()
for row in range(self.listbox.rowCount()):
self.listbox.setRowBackgroudColor(row, "#FFFFFF")
# self.processButton.setDisabled(True) # will be reenabled when workers done
else:
self.sig_processing_done.emit()
def look_for_next_item(self):
if self.listbox.rowCount() > len(self.listbox.skipped_items):
self.process_next_item()
else:
if self.listbox.rowCount() <= len(self.listbox.skipped_items):
self.sig_processing_done.emit()
for row in range(self.listbox.rowCount()):
self.listbox.setRowBackgroudColor(row, "#FFFFFF")
def getValidatedOptions(self):
options = {
"correctFlash": self.camcorCheckBox.isChecked(),
"flashCorrectTarget": self.camcorTargetCombo.currentText(),
"medianFilter": self.medianFilterCheckBox.isChecked(),
"keepCorrected": self.saveCamCorrectedCheckBox.isChecked(),
"trimZ": (self.trimZ0SpinBox.value(), self.trimZ1SpinBox.value()),
"trimY": (self.trimY0SpinBox.value(), self.trimY1SpinBox.value()),
"trimX": (self.trimX0SpinBox.value(), self.trimX1SpinBox.value()),
"nIters": self.iterationsSpinBox.value()
if self.doDeconGroupBox.isChecked()
else 0,
"napodize": self.apodizeSpinBox.value(),
"nzblend": self.zblendSpinBox.value(),
# if bRotate == True and rotateAngle is not none: rotate based on sheet angle
# this will be done in the LLSdir function
"bRotate": self.rotateGroupBox.isChecked(),
"rotateRev": self.rotateReverseCheckBox.isChecked(),
"rotate": (
self.rotateOverrideSpinBox.value()
if self.rotateOverrideCheckBox.isChecked()
else None
),
"saveDeskewedRaw": self.saveDeskewedCheckBox.isChecked(),
# 'bsaveDecon': self.saveDeconvolvedCheckBox.isChecked(),
"MIP": tuple(
[
int(i)
for i in (
self.deconXMIPCheckBox.isChecked(),
self.deconYMIPCheckBox.isChecked(),
self.deconZMIPCheckBox.isChecked(),
)
]
),
"rMIP": tuple(
[
int(i)
for i in (
self.deskewedXMIPCheckBox.isChecked(),
self.deskewedYMIPCheckBox.isChecked(),
self.deskewedZMIPCheckBox.isChecked(),
)
]
),
"mergeMIPs": self.deconJoinMIPCheckBox.isChecked(),
# 'mergeMIPsraw': self.deskewedJoinMIPCheckBox.isChecked(),
"uint16": ("16" in self.deconvolvedBitDepthCombo.currentText()),
"uint16raw": ("16" in self.deskewedBitDepthCombo.currentText()),
"bleachCorrection": self.bleachCorrectionCheckBox.isChecked(),
"doReg": self.doRegistrationGroupBox.isChecked(),
"deleteUnregistered": self.discardUnregisteredCheckBox.isChecked(),
"regMode": (
self.RegProcessChannelRefModeCombo.currentText()
if self.RegProcessChannelRefModeCombo.currentText()
else "none"
),
"otfDir": self.otfFolderLineEdit.text()
if self.otfFolderLineEdit.text() != ""
else None,
"compressRaw": self.compressRawCheckBox.isChecked(),
"compressionType": self.compressTypeCombo.currentText(),
"reprocess": self.reprocessCheckBox.isChecked(),
"width": self.cropWidthSpinBox.value(),
"shift": self.cropShiftSpinBox.value(),
"cropPad": self.autocropPadSpinBox.value(),
"background": (
-1
if self.backgroundAutoRadio.isChecked()
else self.backgroundFixedSpinBox.value()
),
"padval": self.padValSpinBox.value(),
# 'FlatStart': self.flatStartCheckBox.isChecked(),
"dupRevStack": self.dupRevStackCheckBox.isChecked(),
"lzw": self.useLZWCheckBox.isChecked(),
# 'bRollingBall': self.backgroundRollingRadio.isChecked(),
# 'rollingBall': self.backgroundRollingSpinBox.value()
}
if options["nIters"] > 0 and not options["otfDir"]:
raise err.InvalidSettingsError(
"Deconvolution requested but no OTF available", "Check OTF path"
)
# otherwise a cudaDeconv error occurs... could FIXME in cudadeconv
if not options["saveDeskewedRaw"]:
options["rMIP"] = (0, 0, 0)
if options["correctFlash"]:
options["camparamsPath"] = self.camParamTiffLineEdit.text()
if not osp.isfile(options["camparamsPath"]):
raise err.InvalidSettingsError(
"Flash pixel correction requested, but camera parameters file "
"not provided.",
"Check CamParam Tiff path.\n\n"
"For information on how to generate this file for your camera,"
" see documentation at llspy.readthedocs.io",
)
else:
options["camparamsPath"] = None
rCalibText = self.RegProcessPathLineEdit.text()
if rCalibText and rCalibText is not "":
options["regCalibPath"] = rCalibText
else:
options["regCalibPath"] = None
if not self.RegProcessChannelRefCombo.currentText():
options["regRefWave"] = 0
else:
text = self.RegProcessChannelRefCombo.currentText()
if text.isdigit():
options["regRefWave"] = int(text)
else:
if options["doReg"]:
self.show_error_window(
"Problem with channel registration settings!",
"Registration Error",
"Channel registration was selected, "
"but the selected reference wavelength does not seem to be a "
"number. This may be an issue with filenaming convention. "
"Please read docs regarding data structure assumptions.",
)
else:
options["regRefWave"] = 0
if options["doReg"] and options["regCalibPath"] in (None, ""):
raise err.InvalidSettingsError(
"Registration requested, but calibration object not provided.",
"In the post-processing section, click Use RegFile to load a "
"previously generated registration file "
"or click Use Dataset to use a folder of fiducials. Registraion "
"files can be generated on the registration tab.",
)
if options["doReg"]:
ro = llspy.llsdir.get_regObj(options["regCalibPath"])
if not ro or not ro.isValid:
raise err.InvalidSettingsError(
"Registration requested, but calibration path does not point to"
" either a valid registration file or a fiducial marker dataset. "
"Check registration settings, or default registration folder in "
"config tab."
)
if self.croppingGroupBox.isChecked():
if self.cropAutoRadio.isChecked():
options["cropMode"] = "auto"
elif self.cropManualRadio.isChecked():
options["cropMode"] = "manual"
else:
options["cropMode"] = "none"
procCRangetext = self.processCRangeLineEdit.text()
if procCRangetext:
options["cRange"] = string_to_iterable(procCRangetext)
else:
options["cRange"] = None
procTRangetext = self.processTRangeLineEdit.text()
if procTRangetext:
options["tRange"] = string_to_iterable(procTRangetext)
else:
options["tRange"] = None
return options
def reduceSelected(self):
for item in self.listbox.selectedPaths():
llspy.LLSdir(item).reduce_to_raw(
keepmip=self.saveMIPsDuringReduceCheckBox.isChecked()
)
def freezeSelected(self):
for item in self.listbox.selectedPaths():
llspy.LLSdir(item).reduce_to_raw(
keepmip=self.saveMIPsDuringReduceCheckBox.isChecked()
)
self.compressItem(item)
def compressSelected(self):
[self.compressItem(item) for item in self.listbox.selectedPaths()]
def compressItem(self, item):
def has_tiff(path):
for f in os.listdir(path):
if f.endswith(".tif"):
return True
return False
# figure out what type of folder this is
if not has_tiff(item):
self.statusBar.showMessage(
"No tiffs to compress in " + shortname(item), 4000
)
return
worker, thread = newWorkerThread(
workers.CompressionWorker,
item,
"compress",
self.compressTypeCombo.currentText(),
workerConnect={
"status_update": self.statusBar.showMessage,
"finished": lambda: self.statusBar.showMessage(
"Compression finished", 4000
),
},
start=True,
)
self.compressionThreads.append((worker, thread))
def decompressSelected(self):
for item in self.listbox.selectedPaths():
if not llspy.util.find_filepattern(item, "*.tar*"):
self.statusBar.showMessage(
"No .tar file found in " + shortname(item), 4000
)
continue
def onfinish():
self.listbox.llsObjects[item]._register_tiffs()
self.statusBar.showMessage("Decompression finished", 4000)
worker, thread = newWorkerThread(
workers.CompressionWorker,
item,
"decompress",
self.compressTypeCombo.currentText(),
workerConnect={
"status_update": self.statusBar.showMessage,
"finished": onfinish,
},
start=True,
)
self.compressionThreads.append((worker, thread))
def revealSelected(self):
selectedPaths = self.listbox.selectedPaths()
if len(selectedPaths):
for p in selectedPaths:
if os.path.exists(p):
reveal(p)
def concatenateSelected(self):
selectedPaths = self.listbox.selectedPaths()
if len(selectedPaths) > 1:
llspy.llsdir.concatenate_folders(selectedPaths)
[self.listbox.removePath(p) for p in selectedPaths]
[self.listbox.addPath(p) for p in selectedPaths]
def undoRenameSelected(self):
box = QtW.QMessageBox()
box.setWindowTitle("Undo Renaming")
box.setText(
"Do you want to undo all renaming that has occured in this session?, or chose a directory?"
)
box.setIcon(QtW.QMessageBox.Question)
box.addButton(QtW.QMessageBox.Cancel)
box.addButton("Undo Everything", QtW.QMessageBox.YesRole)
box.addButton("Choose Specific Directory", QtW.QMessageBox.ActionRole)
box.setDefaultButton(QtW.QMessageBox.Cancel)
reply = box.exec_()
if reply > 1000: # cancel hit
return
elif reply == 1: # action role hit
path = QtW.QFileDialog.getExistingDirectory(
self,
"Choose Directory to Undo",
os.path.expanduser("~"),
QtW.QFileDialog.ShowDirsOnly,
)
if path:
paths = [path]
else:
paths = []
elif reply == 0: # yes role hit
if (
not hasattr(self.listbox, "renamedPaths")
or not self.listbox.renamedPaths
):
return
paths = self.listbox.renamedPaths
for P in paths:
for root, subd, file in os.walk(P):
self.listbox.removePath(root)
for d in subd:
self.listbox.removePath(os.path.join(root, d))
llspy.llsdir.undo_rename_iters(P)
self.listbox.renamedPaths = []
def renameSelected(self):
if not hasattr(self.listbox, "renamedPaths"):
self.listbox.renamedPaths = []
for item in self.listbox.selectedPaths():
llspy.llsdir.rename_iters(item)
self.listbox.renamedPaths.append(item)
self.listbox.removePath(item)
[self.listbox.addPath(osp.join(item, p)) for p in os.listdir(item)]
def mergeMIPtool(self):
if len(self.listbox.selectedPaths()):
for obj in self.listbox.selectedObjects():
obj.mergemips()
else:
path = QtW.QFileDialog.getExistingDirectory(
self,
"Choose Directory with MIPs to merge",
os.path.expanduser("~"),
QtW.QFileDialog.ShowDirsOnly,
)
if path:
for axis in ["z", "y", "x"]:
llspy.llsdir.mergemips(path, axis, dx=0.102, delete=True)
def toggleOptOut(self, value):
err._OPTOUT = True if value else False
def checkBundled(self, value):
if value:
try:
self.setBinaryPath(llspy.cudabinwrapper.get_bundled_binary())
except llspy.cudabinwrapper.CUDAbinException:
raise err.MissingBinaryError(
"Could not load bundled cudaDeconv. Check that it is installed. read docs"
)
else:
self.setBinaryPath(self.cudaDeconvPathLineEdit.text())
version = llspy.cudabinwrapper.get_version() or ""
if "error" in version.lower():
version = "NOT FOUND! is this an LLSpy cudaDeconv?\n"
logger.info("cudaDeconv version: {}".format(version))
def setBinaryPath(self, path):
workers._CUDABIN = path
logger.info("Using cudaDeconv binary: {}".format(workers._CUDABIN))
@QtCore.pyqtSlot()
def setCudaDeconvPath(self, path=None):
if not path:
path = QtW.QFileDialog.getOpenFileName(
self, "Choose cudaDeconv Binary", "/usr/local/bin/"
)[0]
if path:
if llspy.cudabinwrapper.is_cudaDeconv(path):
self.cudaDeconvPathLineEdit.setText(path)
if self.useBundledBinariesCheckBox.isChecked():
self.setBinaryPath(self.cudaDeconvPathLineEdit.text())
else:
QtW.QMessageBox.critical(
self,
"Invalid File",
"That file does not appear to be a valid cudaDeconv exectuable",
QtW.QMessageBox.Ok,
)
@QtCore.pyqtSlot()
def setOTFdirPath(self, path=None):
if not path:
path = QtW.QFileDialog.getExistingDirectory(
self,
"Choose OTF Directory",
os.path.expanduser("~"),
QtW.QFileDialog.ShowDirsOnly,
)
if path:
if llspy.otf.dir_has_otfs(path):
self.otfFolderLineEdit.setText(path)
else:
QtW.QMessageBox.warning(
self,
"Invalid OTF Directory",
"That folder does not appear to contain any OTF or PSF tif files",
QtW.QMessageBox.Ok,
)
@QtCore.pyqtSlot()
def setCamParamPath(self, path=None):
if not path:
path = QtW.QFileDialog.getOpenFileName(
self,
"Choose camera parameters tiff",
os.path.expanduser("~"),
"Image Files (*.tif *.tiff)",
)[0]
if path:
if llspy.camera.seemsValidCamParams(path):
self.camParamTiffLineEdit.setText(path)
else:
QtW.QMessageBox.critical(
self,
"Invalid File",
"That file does not appear to be a valid camera parameters tiff. "
"It must have >= 3 planes. See llspy.readthedocs.io for details.",
QtW.QMessageBox.Ok,
)
@QtCore.pyqtSlot(str, str, str, str)
def show_error_window(self, errMsg, title=None, info=None, detail=None):
self.msgBox = QtW.QMessageBox()
if title is None or title is "":
title = "LLSpy Error"
self.msgBox.setWindowTitle(title)
# self.msgBox.setTextFormat(QtCore.Qt.RichText)
self.msgBox.setIcon(QtW.QMessageBox.Warning)
self.msgBox.setText(errMsg)
if info is not None and info is not "":
self.msgBox.setInformativeText(info + "\n")
if detail is not None and detail is not "":
self.msgBox.setDetailedText(detail)
self.msgBox.exec_()
def showAboutWindow(self):
import datetime
now = datetime.datetime.now()
QtW.QMessageBox.about(
self,
"LLSpy",
"""LLSpy v.{}\n
Copyright © {}, President and Fellows of Harvard College. All rights reserved.\n\n
Developed by <NAME>\n\n
The cudaDeconv deconvolution program was written by <NAME> and by <NAME> at Janelia Research Campus, and modified by Talley Lambert for LLSpy. """.format(
llspy.__version__, now.year
),
)
def showHelpWindow(self):
QtW.QMessageBox.about(
self, "LLSpy", "Please see documentation at llspy.readthedocs.io"
)
def closeEvent(self, event):
""" triggered when close button is clicked on main window """
if self.listbox.rowCount() and self.confirmOnQuitCheckBox.isChecked():
box = QtW.QMessageBox()
box.setWindowTitle("Unprocessed items!")
box.setText("You have unprocessed items. Are you sure you want to quit?")
box.setIcon(QtW.QMessageBox.Warning)
box.addButton(QtW.QMessageBox.Yes)
box.addButton(QtW.QMessageBox.No)
box.setDefaultButton(QtW.QMessageBox.Yes)
pref = QtW.QCheckBox("Always quit without confirmation")
box.setCheckBox(pref)
pref.stateChanged.connect(
lambda value: self.confirmOnQuitCheckBox.setChecked(False)
if value
else self.confirmOnQuitCheckBox.setChecked(True)
)
reply = box.exec_()
# reply = box.question(self, 'Unprocessed items!',
# "You have unprocessed items. Are you sure you want to quit?",
# QtW.QMessageBox.Yes | QtW.QMessageBox.No,
# QtW.QMessageBox.Yes)
if reply != QtW.QMessageBox.Yes:
event.ignore()
return
# if currently processing, need to shut down threads...
if self.inProcess:
self.abort_workers()
self.sig_processing_done.connect(self.quitProgram)
else:
self.quitProgram()
def quitProgram(self, save=True):
if save:
guisave(self, sessionSettings)
sessionSettings.setValue("cleanExit", True)
sessionSettings.sync()
QtW.QApplication.quit()
if __name__ == "__main__":
print("main function moved to llspy.bin.llspy_gui")
|
xugangwu95/stellargraph
|
demos/link-prediction/random-walks/utils/cl_arguments_parser.py
|
<reponame>xugangwu95/stellargraph
# -*- coding: utf-8 -*-
#
# Copyright 2017-2018 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
def parse_args():
"""
Parses the command line arguments.
Returns:
"""
parser = argparse.ArgumentParser(
description="Run link prediction on homogeneous and heterogeneous graphs."
)
parser.add_argument(
"--dataset_name",
nargs="?",
default="cora",
help="The dataset name as stored in graphs.json",
)
parser.add_argument(
"--metapaths",
nargs="?",
default="",
help="The metapaths to use for random walks in the metapath2vec algorithm on heterogeneous networks",
)
parser.add_argument(
"--p",
nargs="?",
default=0.1,
help="Percent of edges to sample for positive and negative examples (valid values 0 < p < 1)",
)
parser.add_argument(
"--subgraph_size",
nargs="?",
default=0.1,
help="Percent of nodes for a subgraph of the input data when --subsample is specified (valid values 0 < subgraph_size < 1)",
)
parser.add_argument(
"--edge_type", nargs="?", default="friend", help="The edge type to predict"
)
parser.add_argument(
"--edge_attribute_label",
nargs="?",
default="date",
help="The attribute label by which to split edges",
)
parser.add_argument(
"--edge_attribute_threshold",
nargs="?",
default=None,
help="Any edge with attribute value less that the threshold cannot be removed from graph",
)
parser.add_argument(
"--attribute_is_datetime",
dest="attribute_is_datetime",
action="store_true",
help="If specified, the edge attribute to split on is considered datetime in format dd/mm/yyyy",
)
parser.add_argument(
"--hin",
dest="hin",
action="store_true",
help="If specified, it indicates that the input graph in a heterogenous network; otherwise, the input graph is assumed homogeneous",
)
parser.add_argument(
"--input_graph",
nargs="?",
default="~/Projects/data/cora/cora.epgm/",
help="Input graph filename",
)
parser.add_argument(
"--output_node_features",
nargs="?",
default="~/Projects/data/cora/cora.features/cora.emb",
help="Input graph filename",
)
parser.add_argument(
"--sampling_method",
nargs="?",
default="global",
help="Negative edge sampling method: local or global",
)
parser.add_argument(
"--sampling_probs",
nargs="?",
default="0.0, 0.25, 0.50, 0.25",
help="Negative edge sample probabilities (for local sampling method) with respect to distance from starting node",
)
parser.add_argument(
"--show_hist",
dest="show_histograms",
action="store_true",
help="If specified, a histogram of the distances between source and target nodes for \
negative edge samples will be plotted.",
)
parser.add_argument(
"--subsample",
dest="subsample_graph",
action="store_true",
help="If specified, then the original graph is randomly subsampled to 10% of the original size, \
with respect to the number of nodes",
)
return parser.parse_args()
|
xugangwu95/stellargraph
|
tests/mapper/test_directed_node_generator.py
|
# -*- coding: utf-8 -*-
#
# Copyright 2017-2019 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import pytest
import networkx as nx
import pandas as pd
from stellargraph.mapper import DirectedGraphSAGENodeGenerator
from stellargraph.core.graph import StellarGraph, StellarDiGraph
def create_simple_graph():
"""
Creates a simple directed graph for testing. The node ids are integers.
Returns:
A small, directed graph with 3 nodes and 2 edges in StellarDiGraph format.
"""
g = nx.DiGraph()
edges = [(1, 2), (2, 3)]
g.add_edges_from(edges)
nodes = list(g.nodes())
features = [(node, -1.0 * node) for node in nodes]
df = pd.DataFrame(features, columns=["id", "f0"]).set_index("id")
return StellarDiGraph(g, node_features=df)
class TestDirectedNodeGenerator(object):
"""
Test various aspects of the directed GrapohSAGE node generator, with the focus
on the sampled neighbourhoods and the extracted features.
"""
def sample_one_hop(self, num_in_samples, num_out_samples):
g = create_simple_graph()
nodes = list(g.nodes())
in_samples = [num_in_samples]
out_samples = [num_out_samples]
gen = DirectedGraphSAGENodeGenerator(g, len(g), in_samples, out_samples)
flow = gen.flow(node_ids=nodes, shuffle=False)
# Obtain tree of sampled features
features = gen.sample_features(nodes)
num_hops = len(in_samples)
tree_len = 2 ** (num_hops + 1) - 1
assert len(features) == tree_len
# Check node features
node_features = features[0]
assert len(node_features) == len(nodes)
assert node_features.shape == (len(nodes), 1, 1)
for idx, node in enumerate(nodes):
assert node_features[idx, 0, 0] == -1.0 * node
# Check in-node features
in_features = features[1]
assert in_features.shape == (len(nodes), in_samples[0], 1)
for n_idx in range(in_samples[0]):
for idx, node in enumerate(nodes):
if node == 1:
# None -> 1
assert in_features[idx, n_idx, 0] == 0.0
elif node == 2:
# 1 -> 2
assert in_features[idx, n_idx, 0] == -1.0
elif node == 3:
# 2 -> 3
assert in_features[idx, n_idx, 0] == -2.0
else:
assert False
# Check out-node features
out_features = features[2]
assert out_features.shape == (len(nodes), out_samples[0], 1)
for n_idx in range(out_samples[0]):
for idx, node in enumerate(nodes):
if node == 1:
# 1 -> 2
assert out_features[idx, n_idx, 0] == -2.0
elif node == 2:
# 2 -> 3
assert out_features[idx, n_idx, 0] == -3.0
elif node == 3:
# 3 -> None
assert out_features[idx, n_idx, 0] == 0.0
else:
assert False
def test_one_hop(self):
# Test 1 in-node and 1 out-node sampling
self.sample_one_hop(1, 1)
# Test 0 in-nodes and 1 out-node sampling
self.sample_one_hop(0, 1)
# Test 1 in-node and 0 out-nodes sampling
self.sample_one_hop(1, 0)
# Test 0 in-nodes and 0 out-nodes sampling
self.sample_one_hop(0, 0)
# Test 2 in-nodes and 3 out-nodes sampling
self.sample_one_hop(2, 3)
def test_two_hop(self):
g = create_simple_graph()
nodes = list(g.nodes())
gen = DirectedGraphSAGENodeGenerator(
g, batch_size=len(g), in_samples=[1, 1], out_samples=[1, 1]
)
flow = gen.flow(node_ids=nodes, shuffle=False)
features = gen.sample_features(nodes)
num_hops = 2
tree_len = 2 ** (num_hops + 1) - 1
assert len(features) == tree_len
# Check node features
node_features = features[0]
assert len(node_features) == len(nodes)
assert node_features.shape == (len(nodes), 1, 1)
for idx, node in enumerate(nodes):
assert node_features[idx, 0, 0] == -1.0 * node
# Check in-node features
in_features = features[1]
assert in_features.shape == (len(nodes), 1, 1)
for idx, node in enumerate(nodes):
if node == 1:
# *None -> 1
assert in_features[idx, 0, 0] == 0.0
elif node == 2:
# *1 -> 2
assert in_features[idx, 0, 0] == -1.0
elif node == 3:
# *2 -> 3
assert in_features[idx, 0, 0] == -2.0
else:
assert False
# Check out-node features
out_features = features[2]
assert out_features.shape == (len(nodes), 1, 1)
for idx, node in enumerate(nodes):
if node == 1:
# 1 -> *2
assert out_features[idx, 0, 0] == -2.0
elif node == 2:
# 2 -> *3
assert out_features[idx, 0, 0] == -3.0
elif node == 3:
# 3 -> *None
assert out_features[idx, 0, 0] == 0.0
else:
assert False
# Check in-in-node features
in_features = features[3]
assert in_features.shape == (len(nodes), 1, 1)
for idx, node in enumerate(nodes):
if node == 1:
# *None -> None -> 1
assert in_features[idx, 0, 0] == 0.0
elif node == 2:
# *None -> 1 -> 2
assert in_features[idx, 0, 0] == 0.0
elif node == 3:
# *1 -> 2 -> 3
assert in_features[idx, 0, 0] == -1.0
else:
assert False
# Check in-out-node features
in_features = features[4]
assert in_features.shape == (len(nodes), 1, 1)
for idx, node in enumerate(nodes):
if node == 1:
# *None <- None -> 1
assert in_features[idx, 0, 0] == 0.0
elif node == 2:
# *2 <- 1 -> 2
assert in_features[idx, 0, 0] == -2.0
elif node == 3:
# *3 <- 2 -> 3
assert in_features[idx, 0, 0] == -3.0
else:
assert False
# Check out-in-node features
out_features = features[5]
assert out_features.shape == (len(nodes), 1, 1)
for idx, node in enumerate(nodes):
if node == 1:
# 1 -> 2 <- *1
assert out_features[idx, 0, 0] == -1.0
elif node == 2:
# 2 -> 3 <- *2
assert out_features[idx, 0, 0] == -2.0
elif node == 3:
# 3 -> None <- *None
assert out_features[idx, 0, 0] == 0.0
else:
assert False
# Check out-out-node features
out_features = features[6]
assert out_features.shape == (len(nodes), 1, 1)
for idx, node in enumerate(nodes):
if node == 1:
# 1 -> 2 -> *3
assert out_features[idx, 0, 0] == -3.0
elif node == 2:
# 2 -> 3 -> *None
assert out_features[idx, 0, 0] == 0.0
elif node == 3:
# 3 -> None -> *None
assert out_features[idx, 0, 0] == 0.0
else:
assert False
|
xugangwu95/stellargraph
|
stellargraph/version.py
|
<reponame>xugangwu95/stellargraph
# Global version information
__version__ = "0.9.0b"
|
xugangwu95/stellargraph
|
tests/core/test_utils.py
|
<reponame>xugangwu95/stellargraph
# -*- coding: utf-8 -*-
#
# Copyright 2018-2019 Data61, CSIROß
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utils tests:
"""
import pytest
import random
import networkx as nx
import numpy as np
import scipy as sp
from stellargraph.core.utils import *
from stellargraph.core.graph import *
def example_graph(feature_size=None, n_edges=20, n_nodes=6, n_isolates=1):
G = nx.Graph()
n_noniso = n_nodes - n_isolates
edges = [
(random.randint(0, n_noniso - 1), random.randint(0, n_noniso - 1))
for _ in range(n_edges)
]
G.add_nodes_from(range(n_nodes))
G.add_edges_from(edges, label="default")
# Add example features
if feature_size is not None:
for v in G.nodes():
G.nodes[v]["feature"] = int(v) * np.ones(feature_size, dtype="int")
return StellarGraph(G, node_features="feature")
else:
return StellarGraph(G)
@pytest.fixture(scope="session", autouse=True)
def beforeall():
G = example_graph(feature_size=4, n_nodes=6, n_isolates=1, n_edges=20)
pytest.G = G
def test_normalize_adj():
node_list = list(pytest.G.nodes())
Aadj = nx.adjacency_matrix(pytest.G, nodelist=node_list)
csr = normalize_adj(Aadj)
dense = csr.todense()
assert 5 == pytest.approx(dense.sum(), 0.1)
assert csr.get_shape() == Aadj.get_shape()
csr = normalize_adj(Aadj, symmetric=False)
dense = csr.todense()
assert 5 == pytest.approx(dense.sum(), 0.1)
assert csr.get_shape() == Aadj.get_shape()
def test_normalized_laplacian():
node_list = list(pytest.G.nodes())
Aadj = nx.adjacency_matrix(pytest.G, nodelist=node_list)
laplacian = normalized_laplacian(Aadj)
assert 1 == pytest.approx(laplacian.sum(), 0.2)
assert laplacian.get_shape() == Aadj.get_shape()
laplacian = normalized_laplacian(Aadj, symmetric=False)
assert 1 == pytest.approx(laplacian.sum(), 0.2)
assert laplacian.get_shape() == Aadj.get_shape()
def test_rescale_laplacian():
node_list = list(pytest.G.nodes())
Aadj = nx.adjacency_matrix(pytest.G, nodelist=node_list)
rl = rescale_laplacian(normalized_laplacian(Aadj))
assert rl.max() < 1
assert rl.get_shape() == Aadj.get_shape()
def test_chebyshev_polynomial():
node_list = list(pytest.G.nodes())
Aadj = nx.adjacency_matrix(pytest.G, nodelist=node_list)
k = 2
cp = chebyshev_polynomial(rescale_laplacian(normalized_laplacian(Aadj)), k)
assert len(cp) == k + 1
assert np.array_equal(cp[0].todense(), sp.eye(Aadj.shape[0]).todense())
assert cp[1].max() < 1
assert 5 == pytest.approx(cp[2].todense()[:5, :5].sum(), 0.1)
def test_GCN_Aadj_feats_op():
node_list = list(pytest.G.nodes())
Aadj = nx.adjacency_matrix(pytest.G, nodelist=node_list)
features = pytest.G.get_feature_for_nodes(node_list)
features_, Aadj_ = GCN_Aadj_feats_op(features=features, A=Aadj, method="gcn")
assert np.array_equal(features, features_)
assert 6 == pytest.approx(Aadj_.todense().sum(), 0.1)
features_, Aadj_ = GCN_Aadj_feats_op(
features=features, A=Aadj, method="chebyshev", k=2
)
assert len(features_) == 4
assert np.array_equal(features_[0], features_[0])
assert np.array_equal(features_[1].todense(), sp.eye(Aadj.shape[0]).todense())
assert features_[2].max() < 1
assert 5 == pytest.approx(features_[3].todense()[:5, :5].sum(), 0.1)
assert Aadj.get_shape() == Aadj_.get_shape()
# k must an integer greater than or equal to 2
with pytest.raises(ValueError):
GCN_Aadj_feats_op(features=features, A=Aadj, method="chebyshev", k=1)
with pytest.raises(ValueError):
GCN_Aadj_feats_op(features=features, A=Aadj, method="chebyshev", k=2.0)
with pytest.raises(ValueError):
GCN_Aadj_feats_op(features=features, A=Aadj, method="chebyshev", k=None)
# k must be positive integer
with pytest.raises(ValueError):
GCN_Aadj_feats_op(features=features, A=Aadj, method="sgc", k=None)
with pytest.raises(ValueError):
GCN_Aadj_feats_op(features=features, A=Aadj, method="sgc", k=0)
with pytest.raises(ValueError):
GCN_Aadj_feats_op(features=features, A=Aadj, method="sgc", k=-191)
with pytest.raises(ValueError):
GCN_Aadj_feats_op(features=features, A=Aadj, method="sgc", k=2.0)
features_, Aadj_ = GCN_Aadj_feats_op(features=features, A=Aadj, method="sgc", k=2)
assert len(features_) == 6
assert np.array_equal(features, features_)
assert Aadj.get_shape() == Aadj_.get_shape()
# Check if the power of the normalised adjacency matrix is calculated correctly.
# First retrieve the normalised adjacency matrix using localpool filter.
features_, Aadj_norm = GCN_Aadj_feats_op(features=features, A=Aadj, method="gcn")
Aadj_norm = Aadj_norm.todense()
Aadj_power_2 = np.linalg.matrix_power(Aadj_norm, 2) # raise it to the power of 2
# Both matrices should have the same shape
assert Aadj_power_2.shape == Aadj_.get_shape()
# and the same values.
assert pytest.approx(Aadj_power_2) == Aadj_.todense()
|
xugangwu95/stellargraph
|
stellargraph/globalvar.py
|
# This file contains global attributes used throughout stellargraph
FEATURE_ATTR_NAME = "feature"
TARGET_ATTR_NAME = "target"
TYPE_ATTR_NAME = "label"
UNKNOWN_TARGET_ATTRIBUTE = "-1"
NODE_TYPE_DEFAULT = "default"
EDGE_TYPE_DEFAULT = "default"
|
xugangwu95/stellargraph
|
tests/layer/test_ppnp.py
|
<reponame>xugangwu95/stellargraph
from stellargraph.layer import PPNP
from stellargraph.mapper import FullBatchNodeGenerator
from stellargraph import StellarGraph
from stellargraph.core.utils import PPNP_Aadj_feats_op
import networkx as nx
import pandas as pd
import numpy as np
from tensorflow import keras
import pytest
def create_graph_features():
G = nx.Graph()
G.add_nodes_from(["a", "b", "c"])
G.add_edges_from([("a", "b"), ("b", "c"), ("a", "c")])
G = G.to_undirected()
return G, np.array([[1, 1], [1, 0], [0, 1]])
def test_PPNP_edge_cases():
G, features = create_graph_features()
adj = nx.to_scipy_sparse_matrix(G)
features, adj = PPNP_Aadj_feats_op(features, adj)
nodes = G.nodes()
node_features = pd.DataFrame.from_dict(
{n: f for n, f in zip(nodes, features)}, orient="index"
)
G = StellarGraph(G, node_features=node_features)
ppnp_sparse_failed = False
try:
generator = FullBatchNodeGenerator(G, sparse=True, method="ppnp")
except ValueError as e:
ppnp_sparse_failed = True
assert ppnp_sparse_failed
generator = FullBatchNodeGenerator(G, sparse=False, method="ppnp")
try:
ppnpModel = PPNP([2, 2], ["relu"], generator=generator, dropout=0.5)
except ValueError as e:
error = e
assert str(error) == "The number of layers should equal the number of activations"
try:
ppnpModel = PPNP([2], ["relu"], generator=[0, 1], dropout=0.5)
except TypeError as e:
error = e
assert str(error) == "Generator should be a instance of FullBatchNodeGenerator"
def test_PPNP_apply_dense():
G, features = create_graph_features()
adj = nx.to_scipy_sparse_matrix(G)
features, adj = PPNP_Aadj_feats_op(features, adj)
adj = adj[None, :, :]
nodes = G.nodes()
node_features = pd.DataFrame.from_dict(
{n: f for n, f in zip(nodes, features)}, orient="index"
)
G = StellarGraph(G, node_features=node_features)
generator = FullBatchNodeGenerator(G, sparse=False, method="ppnp")
ppnpModel = PPNP([2], ["relu"], generator=generator, dropout=0.5)
x_in, x_out = ppnpModel.node_model()
model = keras.Model(inputs=x_in, outputs=x_out)
# Check fit method
out_indices = np.array([[0, 1]], dtype="int32")
preds_1 = model.predict([features[None, :, :], out_indices, adj])
assert preds_1.shape == (1, 2, 2)
# Check fit_generator method
preds_2 = model.predict_generator(generator.flow(["a", "b"]))
assert preds_2.shape == (1, 2, 2)
assert preds_1 == pytest.approx(preds_2)
|
xugangwu95/stellargraph
|
tests/mapper/test_link_mappers.py
|
# -*- coding: utf-8 -*-
#
# Copyright 2018 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Mapper tests:
GraphSAGELinkMapper(
G: nx.Graph,
ids: List[Any],
link_labels: List[Any] or np.ndarray,
batch_size: int,
num_samples: List[int],
feature_size: Optional[int] = None,
name: AnyStr = None,
)
g
"""
import numpy as np
import networkx as nx
import pytest
import random
from stellargraph.mapper import *
from stellargraph.core.graph import *
from stellargraph.data.explorer import *
from stellargraph.data.unsupervised_sampler import *
def example_Graph_1(feature_size=None):
G = nx.Graph()
elist = [(1, 2), (2, 3), (1, 4), (3, 2)]
G.add_edges_from(elist)
# Add example features
if feature_size is not None:
for v in G.nodes():
G.nodes[v]["feature"] = np.ones(feature_size)
G = StellarGraph(G, node_features="feature")
return G
def example_DiGraph_1(feature_size=None):
G = nx.DiGraph()
elist = [(1, 2), (2, 3), (1, 4), (3, 2)]
G.add_edges_from(elist)
# Add example features
if feature_size is not None:
for v in G.nodes():
G.nodes[v]["feature"] = np.ones(feature_size)
G = StellarGraph(G, node_features="feature")
return G
def example_Graph_2(feature_size=None):
G = nx.Graph()
elist = [(1, 2), (2, 3), (1, 4), (4, 2)]
G.add_edges_from(elist)
# Add example features
if feature_size is not None:
for v in G.nodes():
G.nodes[v]["feature"] = int(v) * np.ones(feature_size)
G = StellarGraph(G, node_features="feature")
return G
def example_HIN_1(feature_size_by_type=None):
G = nx.Graph()
G.add_nodes_from([0, 1, 2, 3], label="movie")
G.add_nodes_from([4, 5], label="user")
G.add_edges_from([(0, 4), (1, 4), (1, 5), (2, 4), (3, 5)], label="rating")
G.add_edges_from([(4, 5)], label="friend")
# Add example features
if feature_size_by_type is not None:
for v, vdata in G.nodes(data=True):
nt = vdata["label"]
vdata["feature"] = int(v) * np.ones(feature_size_by_type[nt], dtype="int")
G = StellarGraph(G, node_features="feature")
return G
def example_HIN_homo(feature_size_by_type=None):
G = nx.Graph()
G.add_nodes_from([0, 1, 2, 3, 4, 5], label="user")
G.add_edges_from([(0, 4), (1, 4), (1, 5), (2, 4), (3, 5)], label="friend")
# Add example features
if feature_size_by_type is not None:
for v, vdata in G.nodes(data=True):
nt = vdata["label"]
vdata["feature"] = int(v) * np.ones(feature_size_by_type[nt], dtype="int")
G = StellarGraph(G, node_features="feature")
return G
def example_graph_random(feature_size=None, n_edges=20, n_nodes=6, n_isolates=1):
"""
Create random homogeneous graph
Args:
feature_size: Size of features for each node
n_edges: Number of edges
n_nodes: Number of nodes
n_isolates: Number of isolated nodes
Returns:
StellarGraph object
"""
connected = False
while not connected:
G = nx.Graph()
n_noniso = n_nodes - n_isolates
edges = [
(random.randint(0, n_noniso - 1), random.randint(0, n_noniso - 1))
for _ in range(n_edges)
]
G.add_nodes_from(range(n_nodes))
G.add_edges_from(edges, label="default")
# Check connectivity
connected = nx.is_connected(G.subgraph(range(n_noniso)))
# Add example features
if feature_size is not None:
for v in G.nodes():
G.nodes[v]["feature"] = int(v) * np.ones(feature_size, dtype="int")
return StellarGraph(G, node_features="feature")
else:
return StellarGraph(G)
def example_hin_random(
feature_size_by_type=None, nodes_by_type={}, n_isolates_by_type={}, edges_by_type={}
):
"""
Create random heterogeneous graph
Args:
feature_size_by_type: Dict of node types to feature size
nodes_by_type: Dict of node types to number of nodes
n_isolates_by_type: Dict of node types to number of isolates
edges_by_type: Dict of edge types to number of edges
Returns:
StellarGraph
Dictionary of node type to node labels
"""
check_isolates = False
while not check_isolates:
G = nx.Graph()
node_dict = {}
for nt in nodes_by_type:
nodes = ["{}_{}".format(nt, ii) for ii in range(nodes_by_type[nt])]
node_dict[nt] = nodes
G.add_nodes_from(nodes, label=nt)
for nt1, nt2 in edges_by_type:
nodes1 = node_dict[nt1]
nodes2 = node_dict[nt2]
niso1 = n_isolates_by_type.get(nt1, 0)
niso2 = n_isolates_by_type.get(nt2, 0)
nodes1 = nodes1[:-niso1] if niso1 > 0 else nodes1
nodes2 = nodes2[:-niso2] if niso2 > 0 else nodes1
edges = [
(random.choice(nodes1), random.choice(nodes2))
for _ in range(edges_by_type[(nt1, nt2)])
]
G.add_edges_from(edges, label="{}_{}".format(nt1, nt2))
check_isolates = all(
sum(deg[1] == 0 for deg in nx.degree(G, nodes)) == n_isolates_by_type[nt]
for nt, nodes in node_dict.items()
)
# Add example features
if feature_size_by_type is not None:
nt_jj = 0
for nt, nodes in node_dict.items():
for ii, n in enumerate(nodes):
G.nodes[n]["feature"] = (ii + 10 * nt_jj) * np.ones(
feature_size_by_type[nt], dtype="int"
)
nt_jj += 1
G = StellarGraph(G, node_features="feature")
else:
G = StellarGraph(G)
return G, node_dict
class Test_GraphSAGELinkGenerator:
"""
Tests of GraphSAGELinkGenerator class
"""
n_feat = 4
batch_size = 2
num_samples = [2, 2]
def test_LinkMapper_constructor(self):
G = example_Graph_1(self.n_feat)
edge_labels = [0] * G.number_of_edges()
generator = GraphSAGELinkGenerator(
G, batch_size=self.batch_size, num_samples=self.num_samples
)
mapper = generator.flow(G.edges(), edge_labels)
assert generator.batch_size == self.batch_size
assert mapper.data_size == G.number_of_edges()
assert len(mapper.ids) == G.number_of_edges()
G = example_DiGraph_1(self.n_feat)
edge_labels = [0] * G.number_of_edges()
generator = GraphSAGELinkGenerator(
G, batch_size=self.batch_size, num_samples=self.num_samples
)
mapper = generator.flow(G.edges(), edge_labels)
assert generator.batch_size == self.batch_size
assert mapper.data_size == G.number_of_edges()
assert len(mapper.ids) == G.number_of_edges()
def test_GraphSAGELinkGenerator_1(self):
G = example_Graph_2(self.n_feat)
data_size = G.number_of_edges()
edge_labels = [0] * data_size
mapper = GraphSAGELinkGenerator(
G, batch_size=self.batch_size, num_samples=self.num_samples
).flow(G.edges(), edge_labels)
assert len(mapper) == 2
for batch in range(len(mapper)):
nf, nl = mapper[batch]
assert len(nf) == 3 * 2
for ii in range(2):
assert nf[ii].shape == (min(self.batch_size, data_size), 1, self.n_feat)
assert nf[ii + 2].shape == (
min(self.batch_size, data_size),
2,
self.n_feat,
)
assert nf[ii + 2 * 2].shape == (
min(self.batch_size, data_size),
2 * 2,
self.n_feat,
)
assert len(nl) == min(self.batch_size, data_size)
assert all(nl == 0)
with pytest.raises(IndexError):
nf, nl = mapper[2]
def test_GraphSAGELinkGenerator_shuffle(self):
def test_edge_consistency(shuffle):
G = example_Graph_2(1)
edges = list(G.edges())
edge_labels = list(range(len(edges)))
mapper = GraphSAGELinkGenerator(G, batch_size=2, num_samples=[0]).flow(
edges, edge_labels, shuffle=shuffle
)
assert len(mapper) == 2
for batch in range(len(mapper)):
nf, nl = mapper[batch]
e1 = edges[nl[0]]
e2 = edges[nl[1]]
assert nf[0][0, 0, 0] == e1[0]
assert nf[1][0, 0, 0] == e1[1]
assert nf[0][1, 0, 0] == e2[0]
assert nf[1][1, 0, 0] == e2[1]
test_edge_consistency(True)
test_edge_consistency(False)
# def test_GraphSAGELinkGenerator_2(self):
#
# G = example_Graph_1(self.n_feat)
# data_size = G.number_of_edges()
# edge_labels = [0] * data_size
#
# with pytest.raises(RuntimeWarning):
# GraphSAGELinkGenerator(
# G,
# G.edges(),
# edge_labels,
# batch_size=self.batch_size,
# num_samples=self.num_samples,
# feature_size=2 * self.n_feat,
# )
def test_GraphSAGELinkGenerator_not_Stellargraph(self):
G = nx.Graph()
elist = [(1, 2), (2, 3), (1, 4), (3, 2)]
G.add_edges_from(elist)
# Add example features
for v in G.nodes():
G.nodes[v]["feature"] = np.ones(1)
with pytest.raises(TypeError):
GraphSAGELinkGenerator(
G, batch_size=self.batch_size, num_samples=self.num_samples
)
def test_GraphSAGELinkGenerator_zero_samples(self):
G = example_Graph_1(self.n_feat)
data_size = G.number_of_edges()
edge_labels = [0] * data_size
mapper = GraphSAGELinkGenerator(
G, batch_size=self.batch_size, num_samples=[0]
).flow(G.edges(), edge_labels)
assert len(mapper) == 2
for ii in range(1):
nf, nl = mapper[ii]
assert len(nf) == 2 * 2
for j in range(len(nf)):
if j < self.batch_size:
assert nf[j].shape == (
min(self.batch_size, data_size),
1,
self.n_feat,
)
else:
assert nf[j].shape == (
min(self.batch_size, data_size),
0,
self.n_feat,
)
assert len(nl) == min(self.batch_size, data_size)
assert all(nl == 0)
def test_GraphSAGELinkGenerator_no_samples(self):
"""
The SampledBFS sampler, created inside the mapper, currently throws a ValueError when the num_samples list is empty.
This might change in the future, so this test might have to be re-written.
"""
G = example_Graph_2(self.n_feat)
data_size = G.number_of_edges()
edge_labels = [0] * data_size
mapper = GraphSAGELinkGenerator(
G, batch_size=self.batch_size, num_samples=[]
).flow(G.edges(), edge_labels)
assert len(mapper) == 2
with pytest.raises(ValueError):
nf, nl = mapper[0]
def test_GraphSAGELinkGenerator_no_targets(self):
"""
This tests link generator's iterator for prediction, i.e., without targets provided
"""
G = example_Graph_2(self.n_feat)
gen = GraphSAGELinkGenerator(
G, batch_size=self.batch_size, num_samples=self.num_samples
).flow(G.edges())
for i in range(len(gen)):
assert gen[i][1] is None
def test_GraphSAGELinkGenerator_isolates(self):
"""
Test for handling of isolated nodes
"""
n_feat = 4
n_batch = 2
n_samples = [2, 2]
# test graph
G = example_graph_random(
feature_size=n_feat, n_nodes=6, n_isolates=2, n_edges=10
)
# Check sizes with one isolated node
head_links = [(1, 5)]
gen = GraphSAGELinkGenerator(G, batch_size=n_batch, num_samples=n_samples).flow(
head_links
)
ne, nl = gen[0]
assert pytest.approx([1, 1, 2, 2, 4, 4]) == [x.shape[1] for x in ne]
# Check sizes with two isolated nodes
head_links = [(4, 5)]
gen = GraphSAGELinkGenerator(G, batch_size=n_batch, num_samples=n_samples).flow(
head_links
)
ne, nl = gen[0]
assert pytest.approx([1, 1, 2, 2, 4, 4]) == [x.shape[1] for x in ne]
def test_GraphSAGELinkGenerator_unsupervisedSampler_flow(self):
"""
This tests link generator's initialization for on demand link generation i.e. there is no pregenerated list of samples provided to it.
"""
n_feat = 4
n_batch = 2
n_samples = [2, 2]
# test graph
G = example_graph_random(
feature_size=n_feat, n_nodes=6, n_isolates=2, n_edges=10
)
unsupervisedSamples = UnsupervisedSampler(G, nodes=G.nodes)
gen = GraphSAGELinkGenerator(G, batch_size=n_batch, num_samples=n_samples).flow(
unsupervisedSamples
)
# The flow method is not passed UnsupervisedSampler object or a list of samples is not passed
with pytest.raises(KeyError):
gen = GraphSAGELinkGenerator(
G, batch_size=n_batch, num_samples=n_samples
).flow("not_a_list_of_samples_or_a_sample_generator")
# The flow method is not passed nothing
with pytest.raises(TypeError):
gen = GraphSAGELinkGenerator(
G, batch_size=n_batch, num_samples=n_samples
).flow()
def test_GraphSAGELinkGenerator_unsupervisedSampler_sample_generation(self):
G = example_Graph_2(self.n_feat)
unsupervisedSamples = UnsupervisedSampler(G)
gen = GraphSAGELinkGenerator(
G, batch_size=self.batch_size, num_samples=self.num_samples
)
mapper = gen.flow(unsupervisedSamples)
assert mapper.data_size == 16
assert self.batch_size == 2
assert len(mapper) == 8
assert len(set(gen.head_node_types)) == 1
for batch in range(len(mapper)):
nf, nl = mapper[batch]
assert len(nf) == 3 * 2
for ii in range(2):
assert nf[ii].shape == (
min(self.batch_size, mapper.data_size),
1,
self.n_feat,
)
assert nf[ii + 2].shape == (
min(self.batch_size, mapper.data_size),
2,
self.n_feat,
)
assert nf[ii + 2 * 2].shape == (
min(self.batch_size, mapper.data_size),
2 * 2,
self.n_feat,
)
assert len(nl) == min(self.batch_size, mapper.data_size)
assert sorted(nl) == [0, 1]
with pytest.raises(IndexError):
nf, nl = mapper[8]
class Test_HinSAGELinkGenerator(object):
"""
Tests of HinSAGELinkGenerator class
"""
n_feat = {"user": 5, "movie": 10}
batch_size = 2
num_samples = [2, 3]
def test_HinSAGELinkGenerator_constructor(self):
# Constructor with a homogeneous graph:
G = example_HIN_homo(self.n_feat)
links = [(1, 4), (1, 5), (0, 4), (5, 0)] # ('user', 'user') links
link_labels = [0] * len(links)
gen = HinSAGELinkGenerator(
G,
batch_size=self.batch_size,
num_samples=self.num_samples,
head_node_types=["user", "user"],
)
mapper = gen.flow(links, link_labels)
assert mapper.data_size == len(links)
assert len(mapper.ids) == len(links)
assert tuple(gen.head_node_types) == ("user", "user")
# Constructor with a heterogeneous graph:
G = example_HIN_1(self.n_feat)
links = [(1, 4), (1, 5), (0, 4), (0, 5)] # ('movie', 'user') links
link_labels = [0] * len(links)
gen = HinSAGELinkGenerator(
G,
batch_size=self.batch_size,
num_samples=self.num_samples,
head_node_types=["movie", "user"],
)
mapper = gen.flow(links, link_labels)
assert mapper.data_size == len(links)
assert len(mapper.ids) == len(links)
assert mapper.data_size == len(link_labels)
assert tuple(gen.head_node_types) == ("movie", "user")
def test_HinSAGELinkGenerator_constructor_multiple_link_types(self):
G = example_HIN_1(self.n_feat)
# first 3 are ('movie', 'user') links, the last is ('user', 'movie') link.
links = [(1, 4), (1, 5), (0, 4), (5, 0)]
link_labels = [0] * len(links)
with pytest.raises(ValueError):
HinSAGELinkGenerator(
G,
batch_size=self.batch_size,
num_samples=self.num_samples,
head_node_types=["movie", "user"],
).flow(links, link_labels)
# all edges in G, which have multiple link types
links = G.edges()
link_labels = [0] * len(links)
with pytest.raises(ValueError):
HinSAGELinkGenerator(
G,
batch_size=self.batch_size,
num_samples=self.num_samples,
head_node_types=["user", "user"],
).flow(links, link_labels)
def test_HinSAGELinkGenerator_1(self):
G = example_HIN_1(self.n_feat)
links = [(1, 4), (1, 5), (0, 4), (0, 5)] # selected ('movie', 'user') links
data_size = len(links)
link_labels = [0] * data_size
mapper = HinSAGELinkGenerator(
G,
batch_size=self.batch_size,
num_samples=self.num_samples,
head_node_types=["movie", "user"],
).flow(links, link_labels)
assert len(mapper) == 2
for batch in range(len(mapper)):
nf, nl = mapper[batch]
assert len(nf) == 10
assert nf[0].shape == (self.batch_size, 1, self.n_feat["movie"])
assert nf[1].shape == (self.batch_size, 1, self.n_feat["user"])
assert nf[2].shape == (
self.batch_size,
self.num_samples[0],
self.n_feat["user"],
)
assert nf[3].shape == (
self.batch_size,
self.num_samples[0],
self.n_feat["user"],
)
assert nf[4].shape == (
self.batch_size,
self.num_samples[0],
self.n_feat["movie"],
)
assert nf[5].shape == (
self.batch_size,
np.multiply(*self.num_samples),
self.n_feat["user"],
)
assert nf[6].shape == (
self.batch_size,
np.multiply(*self.num_samples),
self.n_feat["movie"],
)
assert nf[7].shape == (
self.batch_size,
np.multiply(*self.num_samples),
self.n_feat["user"],
)
assert nf[8].shape == (
self.batch_size,
np.multiply(*self.num_samples),
self.n_feat["movie"],
)
assert nf[9].shape == (
self.batch_size,
np.multiply(*self.num_samples),
self.n_feat["user"],
)
with pytest.raises(IndexError):
nf, nl = mapper[2]
def test_HinSAGELinkGenerator_shuffle(self):
def test_edge_consistency(shuffle):
G = example_HIN_1({"user": 1, "movie": 1})
edges = [(1, 4), (1, 5), (0, 4), (0, 5)] # selected ('movie', 'user') links
data_size = len(edges)
edge_labels = np.arange(data_size)
mapper = HinSAGELinkGenerator(
G, batch_size=2, num_samples=[0], head_node_types=["movie", "user"]
).flow(edges, edge_labels, shuffle=shuffle)
assert len(mapper) == 2
for batch in range(len(mapper)):
nf, nl = mapper[batch]
e1 = edges[nl[0]]
e2 = edges[nl[1]]
assert nf[0][0, 0, 0] == e1[0]
assert nf[1][0, 0, 0] == e1[1]
assert nf[0][1, 0, 0] == e2[0]
assert nf[1][1, 0, 0] == e2[1]
test_edge_consistency(True)
test_edge_consistency(False)
def test_HinSAGELinkGenerator_no_targets(self):
"""
This tests link generator's iterator for prediction, i.e., without targets provided
"""
G = example_HIN_1(self.n_feat)
links = [(1, 4), (1, 5), (0, 4), (0, 5)] # selected ('movie', 'user') links
data_size = len(links)
gen = HinSAGELinkGenerator(
G,
batch_size=self.batch_size,
num_samples=self.num_samples,
head_node_types=["movie", "user"],
).flow(links)
for i in range(len(gen)):
assert gen[i][1] is None
def test_HinSAGELinkGenerator_isolates(self):
"""
This tests link generator's iterator for prediction, i.e., without targets provided
"""
n_batch = 2
n_samples = [2, 2]
feature_size_by_type = {"A": 4, "B": 2}
nodes_by_type = {"A": 5, "B": 5}
n_isolates_by_type = {"A": 0, "B": 2}
edges_by_type = {("A", "A"): 5, ("A", "B"): 10}
Gh, hnodes = example_hin_random(
feature_size_by_type, nodes_by_type, n_isolates_by_type, edges_by_type
)
# Non-isolate + isolate
head_links = [(hnodes["A"][0], hnodes["B"][-1])]
gen = HinSAGELinkGenerator(
Gh, batch_size=n_batch, num_samples=n_samples, head_node_types=["A", "B"]
)
flow = gen.flow(head_links)
ne, nl = flow[0]
assert len(gen._sampling_schema[0]) == len(ne)
assert pytest.approx([1, 1, 2, 2, 2, 4, 4, 4, 4, 4]) == [x.shape[1] for x in ne]
# Two isolates
head_links = [(hnodes["B"][-2], hnodes["B"][-1])]
gen = HinSAGELinkGenerator(
Gh, batch_size=n_batch, num_samples=n_samples, head_node_types=["B", "B"]
)
flow = gen.flow(head_links)
ne, nl = flow[0]
assert len(gen._sampling_schema[0]) == len(ne)
assert pytest.approx([1, 1, 2, 2, 4, 4, 4, 4]) == [x.shape[1] for x in ne]
# With two isolates, all features are zero
assert all(pytest.approx(0) == x for x in ne[2:])
class Test_Attri2VecLinkGenerator:
"""
Tests of Attri2VecLinkGenerator class
"""
n_feat = 4
batch_size = 2
def test_LinkMapper_constructor(self):
G = example_Graph_1(self.n_feat)
edge_labels = [0] * G.number_of_edges()
generator = Attri2VecLinkGenerator(G, batch_size=self.batch_size)
mapper = generator.flow(G.edges(), edge_labels)
assert generator.batch_size == self.batch_size
assert mapper.data_size == G.number_of_edges()
assert len(mapper.ids) == G.number_of_edges()
G = example_DiGraph_1(self.n_feat)
edge_labels = [0] * G.number_of_edges()
generator = Attri2VecLinkGenerator(G, batch_size=self.batch_size)
mapper = generator.flow(G.edges(), edge_labels)
assert generator.batch_size == self.batch_size
assert mapper.data_size == G.number_of_edges()
assert len(mapper.ids) == G.number_of_edges()
def test_Attri2VecLinkGenerator_1(self):
G = example_Graph_2(self.n_feat)
data_size = G.number_of_edges()
edge_labels = [0] * data_size
mapper = Attri2VecLinkGenerator(G, batch_size=self.batch_size).flow(
G.edges(), edge_labels
)
assert len(mapper) == 2
for batch in range(len(mapper)):
nf, nl = mapper[batch]
assert len(nf) == 2
assert nf[0].shape == (min(self.batch_size, data_size), self.n_feat)
assert nf[1].shape == (min(self.batch_size, data_size),)
assert len(nl) == min(self.batch_size, data_size)
assert all(nl == 0)
with pytest.raises(IndexError):
nf, nl = mapper[2]
def test_edge_consistency(self):
G = example_Graph_2(1)
edges = list(G.edges())
nodes = list(G.nodes())
edge_labels = list(range(len(edges)))
mapper = Attri2VecLinkGenerator(G, batch_size=2).flow(edges, edge_labels)
assert len(mapper) == 2
for batch in range(len(mapper)):
nf, nl = mapper[batch]
e1 = edges[nl[0]]
e2 = edges[nl[1]]
assert nf[0][0, 0] == e1[0]
assert nf[1][0] == nodes.index(e1[1])
assert nf[0][1, 0] == e2[0]
assert nf[1][1] == nodes.index(e2[1])
def test_Attri2VecLinkGenerator_not_Stellargraph(self):
G = nx.Graph()
elist = [(1, 2), (2, 3), (1, 4), (3, 2)]
G.add_edges_from(elist)
# Add example features
for v in G.nodes():
G.nodes[v]["feature"] = np.ones(1)
with pytest.raises(TypeError):
Attri2VecLinkGenerator(G, batch_size=self.batch_size)
def test_Attri2VecLinkGenerator_no_targets(self):
"""
This tests link generator's iterator for prediction, i.e., without targets provided
"""
G = example_Graph_2(self.n_feat)
gen = Attri2VecLinkGenerator(G, batch_size=self.batch_size).flow(G.edges())
for i in range(len(gen)):
assert gen[i][1] is None
def test_Attri2VecLinkGenerator_unsupervisedSampler_flow(self):
"""
This tests link generator's initialization for on demand link generation i.e. there is no pregenerated list of samples provided to it.
"""
n_feat = 4
n_batch = 2
# test graph
G = example_graph_random(
feature_size=n_feat, n_nodes=6, n_isolates=2, n_edges=10
)
unsupervisedSamples = UnsupervisedSampler(G, nodes=G.nodes)
gen = Attri2VecLinkGenerator(G, batch_size=n_batch).flow(unsupervisedSamples)
# The flow method is not passed UnsupervisedSampler object or a list of samples is not passed
with pytest.raises(KeyError):
gen = Attri2VecLinkGenerator(G, batch_size=n_batch).flow(
"not_a_list_of_samples_or_a_sample_generator"
)
# The flow method is not passed nothing
with pytest.raises(TypeError):
gen = Attri2VecLinkGenerator(G, batch_size=n_batch).flow()
def test_Attri2VecLinkGenerator_unsupervisedSampler_sample_generation(self):
G = example_Graph_2(self.n_feat)
unsupervisedSamples = UnsupervisedSampler(G)
mapper = Attri2VecLinkGenerator(G, batch_size=self.batch_size).flow(
unsupervisedSamples
)
assert mapper.data_size == 16
assert self.batch_size == 2
assert len(mapper) == 8
for batch in range(len(mapper)):
nf, nl = mapper[batch]
assert len(nf) == 2
assert nf[0].shape == (min(self.batch_size, mapper.data_size), self.n_feat)
assert nf[1].shape == (min(self.batch_size, mapper.data_size),)
assert len(nl) == min(self.batch_size, mapper.data_size)
assert sorted(nl) == [0, 1]
with pytest.raises(IndexError):
nf, nl = mapper[8]
|
xugangwu95/stellargraph
|
demos/link-prediction/hinsage/utils.py
|
# -*- coding: utf-8 -*-
#
# Copyright 2018 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utility functions for the movielens-recommender demo
"""
from numba import jit
import numpy as np
import pandas as pd
import networkx as nx
import os
@jit(nopython=True)
def remap_ids(data, uid_map, mid_map, uid_inx=0, mid_inx=1):
"""
Remap user and movie IDs
"""
Nm = mid_map.shape[0]
Nu = uid_map.shape[0]
for ii in range(data.shape[0]):
mid = data[ii, mid_inx]
uid = data[ii, uid_inx]
new_mid = np.searchsorted(mid_map, mid)
new_uid = np.searchsorted(uid_map, uid)
if new_mid < 0:
print(mid, new_mid)
# Only map to index if found, else map to zero
if new_uid < Nu and (uid_map[new_uid] == uid):
data[ii, uid_inx] = new_uid + Nm
else:
data[ii, uid_inx] = -1
data[ii, mid_inx] = new_mid
def ingest_graph(data_path, config):
"""Ingest a graph from user-movie ratings"""
edgelist_name = os.path.join(data_path, config["input_files"]["ratings"])
columns = config["ratings_params"]["columns"]
usecols = config["ratings_params"]["usecols"]
sep = config["ratings_params"]["sep"]
header = config["ratings_params"].get("header")
# Load the edgelist:
ratings = pd.read_csv(
edgelist_name,
names=columns,
sep=sep,
header=header,
usecols=usecols,
engine="python",
dtype="int",
)
# Enumerate movies & users
mids = np.unique(ratings["mId"])
uids = np.unique(ratings["uId"])
# Filter data and transform
remap_ids(ratings.values, uids, mids)
# Node ID map back to movie and user IDs
movie_id_map = {i: "m_{}".format(mId) for i, mId in enumerate(mids)}
user_id_map = {i + len(mids): "u_{}".format(uId) for i, uId in enumerate(uids)}
id_map = {**movie_id_map, **user_id_map}
inv_id_map = dict(zip(id_map.values(), id_map.keys()))
# Create networkx graph
g = nx.from_pandas_edgelist(
ratings, source="uId", target="mId", edge_attr=True, create_using=nx.DiGraph()
)
# Add node types:
node_types = {inv_id_map["m_" + str(v)]: "movie" for v in mids}
node_types.update({inv_id_map["u_" + str(v)]: "user" for v in uids})
nx.set_node_attributes(g, name="label", values=node_types)
print(
"Graph statistics: {} users, {} movies, {} ratings".format(
sum([v[1]["label"] == "user" for v in g.nodes(data=True)]),
sum([v[1]["label"] == "movie" for v in g.nodes(data=True)]),
g.number_of_edges(),
)
)
return g, id_map, inv_id_map
def ingest_features(data_path, config, node_type):
"""Ingest fatures for nodes of node_type"""
filename = os.path.join(data_path, config["input_files"][node_type])
if node_type == "users":
parameters = config["user_feature_params"]
elif node_type == "movies":
parameters = config["movie_feature_params"]
else:
raise ValueError("Unknown node type {}".format(node_type))
columns = parameters.get("columns")
formats = parameters.get("formats")
usecols = parameters.get("usecols")
sep = parameters.get("sep", ",")
feature_type = parameters.get("feature_type")
dtype = parameters.get("dtype", "float32")
header = parameters.get("header")
# Load Data
data = pd.read_csv(
filename,
index_col=0,
names=columns,
sep=sep,
header=header,
engine="python",
usecols=usecols,
)
return data
def add_features_to_nodes(g, inv_id_map, user_features, movie_features):
"""Add user and movie features to graph nodes"""
movie_features_dict = {
k: np.array(movie_features.loc[k]) for k in movie_features.index
}
user_features_dict = {
k: np.array(user_features.loc[k]) for k in user_features.index
}
node_features = {}
for v in movie_features.index:
node_features.update({inv_id_map["m_" + str(v)]: movie_features_dict[v]})
for v in user_features.index:
node_features.update({inv_id_map["u_" + str(v)]: user_features_dict[v]})
nx.set_node_attributes(g, name="feature", values=node_features)
return g
|
xugangwu95/stellargraph
|
stellargraph/layer/ppnp.py
|
<filename>stellargraph/layer/ppnp.py<gh_stars>1-10
from tensorflow.keras.layers import Dense, Lambda, Layer, Dropout, Input
import tensorflow.keras.backend as K
import tensorflow as tf
import numpy as np
from .misc import SqueezedSparseConversion
from ..mapper import FullBatchNodeGenerator
from .preprocessing_layer import GraphPreProcessingLayer
class PPNPPropagationLayer(Layer):
"""
Implementation of Personalized Propagation of Neural Predictions (PPNP)
as in https://arxiv.org/abs/1810.05997.
Notes:
- The inputs are tensors with a batch dimension of 1:
Keras requires this batch dimension, and for full-batch methods
we only have a single "batch".
- There are three inputs required, the node features, the output
indices (the nodes that are to be selected in the final layer)
and the graph personalized page rank matrix
- This class assumes that the personalized page rank matrix (specified in paper) matrix is passed as
input to the Keras methods.
- The output indices are used when ``final_layer=True`` and the returned outputs
are the final-layer features for the nodes indexed by output indices.
- If ``final_layer=False`` all the node features are output in the same ordering as
given by the adjacency matrix.
Args:
units (int): dimensionality of output feature vectors
final_layer (bool): If False the layer returns output for all nodes,
if True it returns the subset specified by the indices passed to it.
"""
def __init__(self, units, final_layer=False, **kwargs):
if "input_shape" not in kwargs and "input_dim" in kwargs:
kwargs["input_shape"] = (kwargs.get("input_dim"),)
super().__init__(**kwargs)
self.units = units
self.final_layer = final_layer
def get_config(self):
"""
Gets class configuration for Keras serialization.
Used by keras model serialization.
Returns:
A dictionary that contains the config of the layer
"""
config = {"units": self.units, "final_layer": self.final_layer}
base_config = super().get_config()
return {**base_config, **config}
def compute_output_shape(self, input_shapes):
"""
Computes the output shape of the layer.
Assumes the following inputs:
Args:
input_shapes (tuple of ints)
Shape tuples can include None for free dimensions, instead of an integer.
Returns:
An input shape tuple.
"""
feature_shape, out_shape, *As_shapes = input_shapes
batch_dim = feature_shape[0]
if self.final_layer:
out_dim = out_shape[1]
else:
out_dim = feature_shape[1]
return batch_dim, out_dim, self.units
def build(self, input_shapes):
"""
Builds the layer
Args:
input_shapes (list of int): shapes of the layer's inputs (node features and adjacency matrix)
"""
self.built = True
def call(self, inputs):
"""
Applies the layer.
Args:
inputs (list): a list of 3 input tensors that includes
node features (size 1 x N x F),
output indices (size 1 x M)
graph personalized page rank matrix (size N x N),
where N is the number of nodes in the graph, and
F is the dimensionality of node features.
Returns:
Keras Tensor that represents the output of the layer.
"""
features, out_indices, *As = inputs
batch_dim, n_nodes, _ = K.int_shape(features)
if batch_dim != 1:
raise ValueError(
"Currently full-batch methods only support a batch dimension of one"
)
# Remove singleton batch dimension
features = K.squeeze(features, 0)
out_indices = K.squeeze(out_indices, 0)
# Propagate the features
A = As[0]
output = K.dot(A, features)
# On the final layer we gather the nodes referenced by the indices
if self.final_layer:
output = K.gather(output, out_indices)
# Add batch dimension back if we removed it
if batch_dim == 1:
output = K.expand_dims(output, 0)
return output
class PPNP:
"""
Implementation of Personalized Propagation of Neural Predictions (PPNP)
as in https://arxiv.org/abs/1810.05997.
The model minimally requires specification of the fully connected layer sizes as a list of ints
corresponding to the feature dimensions for each hidden layer,
activation functions for each hidden layers, and a generator object.
To use this class as a Keras model, the features and pre-processed adjacency matrix
should be supplied using the :class:`FullBatchNodeGenerator` class. To have the appropriate
pre-processing the generator object should be instantiated as follows::
generator = FullBatchNodeGenerator(G, method="ppnp")
Notes:
- The inputs are tensors with a batch dimension of 1. These are provided by the \
:class:`FullBatchNodeGenerator` object.
- This assumes that the personalized page rank matrix is provided as input to
Keras methods. When using the :class:`FullBatchNodeGenerator` specify the
``method='ppnp'`` argument to do this pre-processing.
- ''method='ppnp'`` requires that use_sparse=False and generates a dense personalized page rank matrix
- The nodes provided to the :class:`FullBatchNodeGenerator.flow` method are
used by the final layer to select the predictions for those nodes in order.
However, the intermediate layers before the final layer order the nodes
in the same way as the adjacency matrix.
- The size of the final fully connected layer must be equal to the number of classes to predict.
Args:
layer_sizes (list of int): list of output sizes of fully connected layers in the stack
activations (list of str): list of activations applied to each fully connected layer's output
generator (FullBatchNodeGenerator): an instance of FullBatchNodeGenerator class constructed on the graph of interest
bias (bool): toggles an optional bias in fully connected layers
dropout (float): dropout rate applied to input features of each layer
kernel_regularizer (str): normalization applied to the kernels of fully connetcted layers
"""
def __init__(
self,
layer_sizes,
activations,
generator,
bias=True,
dropout=0.0,
kernel_regularizer=None,
):
if not isinstance(generator, FullBatchNodeGenerator):
raise TypeError("Generator should be a instance of FullBatchNodeGenerator")
if not len(layer_sizes) == len(activations):
raise ValueError(
"The number of layers should equal the number of activations"
)
self.layer_sizes = layer_sizes
self.activations = activations
self.bias = bias
self.dropout = dropout
self.kernel_regularizer = kernel_regularizer
self.generator = generator
self.support = 1
self.method = generator.method
# Check if the generator is producing a sparse matrix
self.use_sparse = generator.use_sparse
# Initialize a stack of fully connected layers
n_layers = len(self.layer_sizes)
self._layers = []
for ii in range(n_layers):
l = self.layer_sizes[ii]
a = self.activations[ii]
self._layers.append(Dropout(self.dropout))
self._layers.append(
Dense(
l,
activation=a,
use_bias=self.bias,
kernel_regularizer=self.kernel_regularizer,
)
)
self._layers.append(Dropout(self.dropout))
self._layers.append(
PPNPPropagationLayer(self.layer_sizes[-1], final_layer=True)
)
def __call__(self, x):
"""
Apply PPNP to the inputs.
The input tensors are expected to be a list of the following:
[
Node features shape (1, N, F),
Adjacency indices (1, E, 2),
Adjacency values (1, E),
Output indices (1, O)
]
where N is the number of nodes, F the number of input features,
E is the number of edges, O the number of output nodes.
Args:
x (Tensor): input tensors
Returns:
Output tensor
"""
x_in, out_indices, *As = x
# Currently we require the batch dimension to be one for full-batch methods
batch_dim, n_nodes, _ = K.int_shape(x_in)
if batch_dim != 1:
raise ValueError(
"Currently full-batch methods only support a batch dimension of one"
)
# Convert input indices & values to a sparse matrix
if self.use_sparse:
A_indices, A_values = As
Ainput = [
SqueezedSparseConversion(
shape=(n_nodes, n_nodes), dtype=A_values.dtype
)([A_indices, A_values])
]
# Otherwise, create dense matrix from input tensor
else:
Ainput = [Lambda(lambda A: K.squeeze(A, 0))(A) for A in As]
# TODO: Support multiple matrices?
if len(Ainput) != 1:
raise NotImplementedError(
"The APPNP method currently only accepts a single matrix"
)
h_layer = x_in
for layer in self._layers:
if isinstance(layer, PPNPPropagationLayer):
h_layer = layer([h_layer, out_indices] + Ainput)
else:
h_layer = layer(h_layer)
return h_layer
def node_model(self):
"""
Builds a PPNP model for node prediction
Returns:
tuple: `(x_inp, x_out)`, where `x_inp` is a list of two Keras input tensors for the PPNP model (containing node features and graph adjacency),
and `x_out` is a Keras tensor for the PPNP model output.
"""
# Placeholder for node features
N_nodes = self.generator.features.shape[0]
N_feat = self.generator.features.shape[1]
# Inputs for features & target indices
x_t = Input(batch_shape=(1, N_nodes, N_feat))
out_indices_t = Input(batch_shape=(1, None), dtype="int32")
# Create inputs for sparse or dense matrices
if self.use_sparse:
# Placeholders for the sparse adjacency matrix
A_indices_t = Input(batch_shape=(1, None, 2), dtype="int64")
A_values_t = Input(batch_shape=(1, None))
A_placeholders = [A_indices_t, A_values_t]
else:
# Placeholders for the dense adjacency matrix
A_m = Input(batch_shape=(1, N_nodes, N_nodes))
A_placeholders = [A_m]
# TODO: Support multiple matrices
x_inp = [x_t, out_indices_t] + A_placeholders
x_out = self(x_inp)
# Flatten output by removing singleton batch dimension
if x_out.shape[0] == 1:
self.x_out_flat = Lambda(lambda x: K.squeeze(x, 0))(x_out)
else:
self.x_out_flat = x_out
return x_inp, x_out
|
xugangwu95/stellargraph
|
demos/link-prediction/random-walks/utils/metapath2vec_feature_learning.py
|
# -*- coding: utf-8 -*-
#
# Copyright 2017-2018 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from gensim.models import Word2Vec
import time
import pandas as pd
from stellargraph.data import UniformRandomMetaPathWalk
class Metapath2VecFeatureLearning(object):
def __init__(self, nxG=None, embeddings_filename=r"metapath2vec_model.emb"):
self.nxG = nxG
self.G = None
self.model = None
self.embeddings_filename = embeddings_filename
#
# learn_embeddings() and fit() are based on the learn_embeddings() and main() functions in main.py of the
# reference implementation of Node2Vec.
#
def learn_embeddings(self, walks, d, k):
"""
Learn embeddings by optimizing the Skipgram objective using SGD.
Args:
walks:
d:
k:
Returns:
"""
time_b = time.time()
walks = [list(map(str, walk)) for walk in walks]
self.model = Word2Vec(
walks, size=d, window=k, min_count=0, sg=1, workers=2, iter=1
)
self.model.wv.save_word2vec_format(self.embeddings_filename)
print(
"({}) Time to learn embeddings {:.0f} seconds".format(
type(self).__name__, time.time() - time_b
)
)
def _assert_positive_int(self, val, msg=""):
"""
Raises ValueError exception if val is not a positive integer.
Args:
val: The value to check
msg: The message to return with the exception
Returns:
"""
if val <= 0 or not isinstance(val, int):
raise ValueError(msg)
def _assert_positive(self, val, msg=""):
"""
Raises ValueError exception if val is not a positive number.
Args:
val: The value to check
msg: The message to return with the exception
Returns:
"""
if val <= 0:
raise ValueError(msg)
def fit(self, metapaths=None, d=128, r=10, l=80, k=10):
"""
Pipeline for representational learning for all nodes in a graph.
:param k:
:return:
"""
self._assert_positive_int(d, msg="d should be positive integer")
self._assert_positive_int(r, msg="r should be positive integer")
self._assert_positive_int(l, msg="l should be positive integer")
self._assert_positive_int(k, msg="k should be positive integer")
start_time_fit = time.time()
# self.G = node2vec.Graph(self.nxG, False, p, q)
# self.G.preprocess_transition_probs()
metapath_walker = UniformRandomMetaPathWalk(self.nxG)
# walks = self.G.simulate_walks(r, l)
time_b = time.time()
walks = metapath_walker.run(
nodes=list(self.nxG.nodes()),
metapaths=metapaths,
length=l,
n=r,
node_type_attribute="label",
seed=None,
)
print(
"({}) Time for random walks {:.0f} seconds.".format(
type(self).__name__, time.time() - time_b
)
)
self.learn_embeddings(walks, d, k)
print("Total time for fit() was {:.0f}".format(time.time() - start_time_fit))
def from_file(self, filename):
"""
Helper function for loading a node2vec model from disk so that I can run experiments fast without having to
wait for node2vec to finish.
:param filename: The filename storing the model
:return: None
"""
self.model = pd.read_csv(filename, delimiter=" ", skiprows=1, header=None)
self.model.iloc[:, 0] = self.model.iloc[:, 0].astype(
str
) # this is so that indexing works the same as having
# trained the model with self.fit()
self.model.index = self.model.iloc[:, 0]
self.model = self.model.drop([0], 1)
print(self.model.head(2))
def select_operator_from_str(self, binary_operator):
if binary_operator == "l1":
return self.operator_l1
elif binary_operator == "l2":
return self.operator_l2
elif binary_operator == "avg":
return self.operator_avg
elif binary_operator == "h": # hadamard
return self.operator_hadamard
else:
raise ValueError("Invalid binary operator {}".format(binary_operator))
def operator_hadamard(self, u, v):
return u * v
def operator_avg(self, u, v):
return (u + v) / 2.0
def operator_l2(self, u, v):
return (u - v) ** 2
def operator_l1(self, u, v):
return np.abs(u - v)
def transform(self, edge_data, binary_operator="h"):
"""
It calculates edge features for the given binary operator applied to the node features in data_edge
:param edge_data: (2-tuple) It is a list of pairs of nodes that make an edge in the graph
:param binary_operator: The binary operator to apply to the node features to calculate an edge feature
:return: Features in X (Nxd array where N is the number of edges and d is the dimensionality of the edge
features that is the same as the dimensionality of the node features) and edge labels in y (0 for no edge
and 1 for edge).
"""
X = [] # data matrix, each row is a d-dimensional feature of an edge
func_bin_operator = self.select_operator_from_str(binary_operator)
for ids in edge_data[0]:
u_str = str(ids[0])
v_str = str(ids[1])
if type(self.model) is Word2Vec:
X.append(func_bin_operator(self.model[u_str], self.model[v_str]))
else: # Pandas Dataframe
X.append(
func_bin_operator(self.model.loc[u_str], self.model.loc[v_str])
)
return np.array(X), edge_data[1]
|
xugangwu95/stellargraph
|
stellargraph/layer/hinsage.py
|
# -*- coding: utf-8 -*-
#
# Copyright 2018 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Heterogeneous GraphSAGE and compatible aggregator layers
"""
__all__ = ["HinSAGE", "MeanHinAggregator"]
import tensorflow as tf
from tensorflow.keras.layers import Layer
from tensorflow.keras import backend as K, Input
from tensorflow.keras.layers import Lambda, Dropout, Reshape
from tensorflow.keras.utils import Sequence
from tensorflow.keras import activations, initializers, regularizers, constraints
from typing import List, Callable, Tuple, Dict, Union, AnyStr
import itertools as it
import operator as op
import warnings
from ..mapper import HinSAGENodeGenerator, HinSAGELinkGenerator
HinSAGEAggregator = Layer
class MeanHinAggregator(HinSAGEAggregator):
"""Mean Aggregator for HinSAGE implemented with Keras base layer
Args:
output_dim (int): Output dimension
bias (bool): Use bias in layer or not (Default False)
act (Callable or str): name of the activation function to use (must be a Keras
activation function), or alternatively, a TensorFlow operation.
kernel_initializer (str or func): The initialiser to use for the weights;
defaults to 'glorot_uniform'.
kernel_regularizer (str or func): The regulariser to use for the weights;
defaults to None.
kernel_constraint (str or func): The constraint to use for the weights;
defaults to None.
bias_initializer (str or func): The initialiser to use for the bias;
defaults to 'zeros'.
bias_regularizer (str or func): The regulariser to use for the bias;
defaults to None.
bias_constraint (str or func): The constraint to use for the bias;
defaults to None.
"""
def __init__(
self,
output_dim: int = 0,
bias: bool = False,
act: Union[Callable, AnyStr] = "relu",
**kwargs
):
self.output_dim = output_dim
if output_dim % 2 != 0:
raise ValueError("The output_dim must be a multiple of two.")
self.half_output_dim = output_dim // 2
self.has_bias = bias
self.act = activations.get(act)
self.nr = None
self.w_neigh = []
self.w_self = None
self.bias = None
self._get_regularisers_from_keywords(kwargs)
super().__init__(**kwargs)
def _get_regularisers_from_keywords(self, kwargs):
self.kernel_initializer = initializers.get(
kwargs.pop("kernel_initializer", "glorot_uniform")
)
self.kernel_regularizer = regularizers.get(
kwargs.pop("kernel_regularizer", None)
)
self.kernel_constraint = constraints.get(kwargs.pop("kernel_constraint", None))
self.bias_initializer = initializers.get(
kwargs.pop("bias_initializer", "zeros")
)
self.bias_regularizer = regularizers.get(kwargs.pop("bias_regularizer", None))
self.bias_constraint = constraints.get(kwargs.pop("bias_constraint", None))
def get_config(self):
"""
Gets class configuration for Keras serialization
"""
config = {
"output_dim": self.output_dim,
"bias": self.has_bias,
"act": activations.serialize(self.act),
"kernel_initializer": initializers.serialize(self.kernel_initializer),
"kernel_regularizer": regularizers.serialize(self.kernel_regularizer),
"kernel_constraint": constraints.serialize(self.kernel_constraint),
"bias_initializer": initializers.serialize(self.bias_initializer),
"bias_regularizer": regularizers.serialize(self.bias_regularizer),
"bias_constraint": constraints.serialize(self.bias_constraint),
}
base_config = super().get_config()
return {**base_config, **config}
def build(self, input_shape):
"""
Builds layer
Args:
input_shape (list of list of int): Shape of input per neighbour type.
"""
# Weight matrix for each type of neighbour
# If there are no neighbours (input_shape[x][2]) for an input
# then do not create weights as they are not used.
self.nr = len(input_shape) - 1
self.w_neigh = [
self.add_weight(
name="w_neigh_" + str(r),
shape=(int(input_shape[1 + r][3]), self.half_output_dim),
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
)
if input_shape[1 + r][2] > 0
else None
for r in range(self.nr)
]
# Weight matrix for self
self.w_self = self.add_weight(
name="w_self",
shape=(int(input_shape[0][2]), self.half_output_dim),
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
)
# Optional bias
if self.has_bias:
self.bias = self.add_weight(
name="bias",
shape=[self.output_dim],
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
)
super().build(input_shape)
def call(self, x, **kwargs):
"""
Apply MeanAggregation on input tensors, x
Args:
x: List of Keras Tensors with the following elements
- x[0]: tensor of self features shape (n_batch, n_head, n_feat)
- x[1+r]: tensors of neighbour features each of shape (n_batch, n_head, n_neighbour[r], n_feat[r])
Returns:
Keras Tensor representing the aggregated embeddings in the input.
"""
# Calculate the mean vectors over the neigbours of each relation (edge) type
neigh_agg_by_relation = []
for r in range(self.nr):
# The neighbour input tensors for relation r
z = x[1 + r]
# If there are neighbours aggregate over them
if z.shape[2] > 0:
z_agg = K.dot(K.mean(z, axis=2), self.w_neigh[r])
# Otherwise add a synthetic zero vector
else:
z_shape = K.shape(z)
w_shape = self.half_output_dim
z_agg = tf.zeros((z_shape[0], z_shape[1], w_shape))
neigh_agg_by_relation.append(z_agg)
# Calculate the self vector shape (n_batch, n_head, n_out_self)
from_self = K.dot(x[0], self.w_self)
# Sum the contributions from all neighbour averages shape (n_batch, n_head, n_out_neigh)
from_neigh = sum(neigh_agg_by_relation) / self.nr
# Concatenate self + neighbour features, shape (n_batch, n_head, n_out)
total = K.concatenate(
[from_self, from_neigh], axis=2
) # YT: this corresponds to concat=Partial
# TODO: implement concat=Full and concat=False
return self.act((total + self.bias) if self.has_bias else total)
def compute_output_shape(self, input_shape):
"""
Computes the output shape of the layer.
Assumes that the layer will be built to match that input shape provided.
Args:
input_shape (tuple of ints)
Shape tuples can include `None` for free dimensions, instead of an integer.
Returns:
An input shape tuple.
"""
return input_shape[0][0], input_shape[0][1], self.output_dim
class HinSAGE:
"""
Implementation of the GraphSAGE algorithm extended for heterogeneous graphs with Keras layers.
To use this class as a Keras model, the features and graph should be supplied using the
:class:`HinSAGENodeGenerator` class for node inference models or the
:class:`HinSAGELinkGenerator` class for link inference models. The `.build` method should
be used to create a Keras model from the `GraphSAGE` object.
Currently the class supports node or link prediction models which are built depending on whether
a `HinSAGENodeGenerator` or `HinSAGELinkGenerator` object is specified.
The models are built for a single node or link type. For example if you have nodes of types 'A' and 'B'
you can build a link model for only a single pair of node types, for example ('A', 'B'), which should
be specified in the `HinSAGELinkGenerator`.
If you feed links into the model that do not have these node types (in correct order) an error will be
raised.
Examples:
Creating a two-level GrapSAGE node classification model on nodes of type 'A' with hidden node sizes of 8 and 4
and 10 neighbours sampled at each layer using an existing :class:`StellarGraph` object `G`
containing the graph and node features::
generator = HinSAGENodeGenerator(
G, batch_size=50, num_samples=[10,10], head_node_type='A'
)
gat = HinSAGE(
layer_sizes=[8, 4],
activations=["relu","softmax"],
generator=generator,
)
x_inp, predictions = gat.build()
Creating a two-level GrapSAGE link classification model on nodes pairs of type ('A', 'B')
with hidden node sizes of 8 and 4 and 5 neighbours sampled at each layer::
generator = HinSAGELinkGenerator(
G, batch_size=50, num_samples=[5,5], head_node_types=('A','B')
)
gat = HinSAGE(
layer_sizes=[8, 4],
activations=["relu","softmax"],
generator=generator,
)
x_inp, predictions = gat.build()
Note that passing a `NodeSequence` or `LinkSequence` object from the `generator.flow(...)` method
as the `generator=` argument is now deprecated and the base generator object should be passed instead.
For more details, please see the HinSAGE demo notebooks:
demos/node-classification/hinsage/yelp-example.py
Args:
layer_sizes (list): Hidden feature dimensions for each layer
generator (HinSAGENodeGenerator or HinSAGELinkGenerator):
If specified, required model arguments such as the number of samples
will be taken from the generator object. See note below.
aggregator (HinSAGEAggregator): The HinSAGE aggregator to use; defaults to the `MeanHinAggregator`.
bias (bool): If True (default), a bias vector is learnt for each layer.
dropout (float): The dropout supplied to each layer; defaults to no dropout.
normalize (str): The normalization used after each layer; defaults to L2 normalization.
activations (list): Activations applied to each layer's output;
defaults to ['relu', ..., 'relu', 'linear'].
kernel_regularizer (str or func): The regulariser to use for the weights of each layer;
defaults to None.
Note::
If a generator is not specified, then additional keyword arguments must be supplied:
* n_samples (list): The number of samples per layer in the model.
* input_neighbor_tree: A list of (node_type, [children]) tuples that specify the
subtree to be created by the HinSAGE model.
* input_dim (dict): The input dimensions for each node type as a dictionary of the form
`{node_type: feature_size}`.
* multiplicity (int): The number of nodes to process at a time. This is 1 for a node inference
and 2 for link inference (currently no others are supported).
"""
def __init__(
self,
layer_sizes,
generator=None,
aggregator=None,
bias=True,
dropout=0.0,
normalize="l2",
activations=None,
**kwargs
):
# Set the aggregator layer used in the model
if aggregator is None:
self._aggregator = MeanHinAggregator
elif issubclass(aggregator, Layer):
self._aggregator = aggregator
else:
raise TypeError("Aggregator should be a subclass of Keras Layer")
# Set the normalization layer used in the model
if normalize == "l2":
self._normalization = Lambda(lambda x: K.l2_normalize(x, axis=-1))
elif normalize is None or normalize == "none" or normalize == "None":
self._normalization = Lambda(lambda x: x)
else:
raise ValueError(
"Normalization should be either 'l2' or 'none'; received '{}'".format(
normalize
)
)
# Get the sampling tree, input_dim, and num_samples from the generator
# if no generator these must be supplied in kwargs
if generator is not None:
self._get_sizes_from_generator(generator)
else:
self._get_sizes_from_keywords(kwargs)
# Set parameters for the model
self.n_layers = len(self.n_samples)
self.bias = bias
self.dropout = dropout
# Neighbourhood info per layer
self.neigh_trees = self._eval_neigh_tree_per_layer(
[li for li in self.subtree_schema if len(li[1]) > 0]
)
# Depth of each input tensor i.e. number of hops from root nodes
self._depths = [
self.n_layers
+ 1
- sum([1 for li in [self.subtree_schema] + self.neigh_trees if i < len(li)])
for i in range(len(self.subtree_schema))
]
# Dict of {node type: dimension} per layer
self.dims = [
dim
if isinstance(dim, dict)
else {k: dim for k, _ in ([self.subtree_schema] + self.neigh_trees)[layer]}
for layer, dim in enumerate([self.input_dims] + layer_sizes)
]
# Activation function for each layer
if activations is None:
activations = ["relu"] * (self.n_layers - 1) + ["linear"]
elif len(activations) != self.n_layers:
raise ValueError(
"Invalid number of activations; require one function per layer"
)
self.activations = activations
# Optional regulariser, etc. for weights and biases
self._get_regularisers_from_keywords(kwargs)
# Aggregator functions for each layer
self._build_aggregators()
def _get_sizes_from_generator(self, generator):
"""
Sets n_samples and input_feature_size from the generator.
Args:
generator: The supplied generator.
"""
if not isinstance(generator, (HinSAGELinkGenerator, HinSAGENodeGenerator)):
errmsg = "Generator should be an instance of HinSAGELinkGenerator or HinSAGENodeGenerator"
if isinstance(generator, (NodeSequence, LinkSequence)):
errmsg = (
"Passing a Sequence object as the generator to HinSAGE is no longer supported. "
+ errmsg
)
raise TypeError(errmsg)
self.n_samples = generator.num_samples
self.subtree_schema = generator.schema.type_adjacency_list(
generator.head_node_types, len(self.n_samples)
)
self.input_dims = generator.graph.node_feature_sizes()
self.multiplicity = generator.multiplicity
def _get_sizes_from_keywords(self, kwargs):
"""
Sets n_samples and input_feature_size from the keywords.
Args:
kwargs: The additional keyword arguments.
"""
try:
self.n_samples = kwargs["n_samples"]
self.input_dims = kwargs["input_dim"]
self.multiplicity = kwargs["multiplicity"]
self.subtree_schema = kwargs["input_neighbor_tree"]
except KeyError:
raise ValueError(
"Generator not provided: "
"n_samples, input_dim, multiplicity, and input_neighbour_tree must be specified."
)
@staticmethod
def _eval_neigh_tree_per_layer(input_tree):
"""
Function to evaluate the neighbourhood tree structure for every layer. The tree
structure at each layer is a truncated version of the previous layer.
Args:
input_tree: Neighbourhood tree for the input batch
Returns:
List of neighbourhood trees
"""
reduced = [
li
for li in input_tree
if all(li_neigh < len(input_tree) for li_neigh in li[1])
]
return (
[input_tree]
if len(reduced) == 0
else [input_tree] + HinSAGE._eval_neigh_tree_per_layer(reduced)
)
def _get_regularisers_from_keywords(self, kwargs):
regularisers = {}
for param_name in [
"kernel_initializer",
"kernel_regularizer",
"kernel_constraint",
"bias_initializer",
"bias_regularizer",
"bias_constraint",
]:
param_value = kwargs.pop(param_name, None)
if param_value is not None:
regularisers[param_name] = param_value
self._regularisers = regularisers
def _build_aggregators(self):
# Dict of {node type: aggregator} per layer
self._aggs = [
{
node_type: self._aggregator(
output_dim,
bias=self.bias,
act=self.activations[layer],
**self._regularisers
)
for node_type, output_dim in self.dims[layer + 1].items()
}
for layer in range(self.n_layers)
]
def __call__(self, xin: List):
"""
Apply aggregator layers
Args:
x (list of Tensor): Batch input features
Returns:
Output tensor
"""
def apply_layer(x: List, layer: int):
"""
Compute the list of output tensors for a single HinSAGE layer
Args:
x (List[Tensor]): Inputs to the layer
layer (int): Layer index
Returns:
Outputs of applying the aggregators as a list of Tensors
"""
layer_out = []
for i, (node_type, neigh_indices) in enumerate(self.neigh_trees[layer]):
# The shape of the head node is used for reshaping the neighbour inputs
head_shape = K.int_shape(x[i])[1]
# Aplly dropout and reshape neighbours per node per layer
neigh_list = [
Dropout(self.dropout)(
Reshape(
(
head_shape,
self.n_samples[self._depths[i]],
self.dims[layer][self.subtree_schema[neigh_index][0]],
)
)(x[neigh_index])
)
for neigh_index in neigh_indices
]
# Apply dropout to head inputs
x_head = Dropout(self.dropout)(x[i])
# Apply aggregator to head node and reshaped neighbour nodes
layer_out.append(self._aggs[layer][node_type]([x_head] + neigh_list))
return layer_out
# Form HinSAGE layers iteratively
self.layer_tensors = []
h_layer = xin
for layer in range(0, self.n_layers):
h_layer = apply_layer(h_layer, layer)
self.layer_tensors.append(h_layer)
# Remove neighbourhood dimension from output tensors
# note that at this point h_layer contains the output tensor of the top (last applied) layer of the stack
h_layer = [
Reshape(K.int_shape(x)[2:])(x) for x in h_layer if K.int_shape(x)[1] == 1
]
# Return final layer output tensor with optional normalization
return (
self._normalization(h_layer[0])
if len(h_layer) == 1
else [self._normalization(xi) for xi in h_layer]
)
def _input_shapes(self) -> List[Tuple[int, int]]:
"""
Returns the input shapes for the tensors of the supplied neighbourhood type tree
Returns:
A list of tuples giving the shape (number of nodes, feature size) for
the corresponding item in the neighbourhood type tree (self.subtree_schema)
"""
neighbor_sizes = list(it.accumulate([1] + self.n_samples, op.mul))
def get_shape(stree, cnode, level=0):
adj = stree[cnode][1]
size_dict = {
cnode: (neighbor_sizes[level], self.input_dims[stree[cnode][0]])
}
if len(adj) > 0:
size_dict.update(
{
k: s
for a in adj
for k, s in get_shape(stree, a, level + 1).items()
}
)
return size_dict
input_shapes = dict()
for ii in range(len(self.subtree_schema)):
input_shapes_ii = get_shape(self.subtree_schema, ii)
# Update input_shapes if input_shapes_ii.keys() are not already in input_shapes.keys():
if (
len(set(input_shapes_ii.keys()).intersection(set(input_shapes.keys())))
== 0
):
input_shapes.update(input_shapes_ii)
return [input_shapes[ii] for ii in range(len(self.subtree_schema))]
def build(self):
"""
Builds a HinSAGE model for node or link/node pair prediction, depending on the generator used to construct
the model (whether it is a node or link/node pair generator).
Returns:
tuple: (x_inp, x_out), where ``x_inp`` is a list of Keras input tensors
for the specified HinSAGE model (either node or link/node pair model) and ``x_out`` contains
model output tensor(s) of shape (batch_size, layer_sizes[-1]).
"""
# Create tensor inputs
x_inp = [Input(shape=s) for s in self._input_shapes()]
# Output from HinSAGE model
x_out = self(x_inp)
return x_inp, x_out
def default_model(self, flatten_output=True):
warnings.warn(
"The .default_model() method will be deprecated in future versions. "
"Please use .build() method instead.",
PendingDeprecationWarning,
)
return self.build()
|
pletzer/crush
|
sum_forces.py
|
<filename>sum_forces.py
import vtk
from argparse import ArgumentParser
from glob import glob
import numpy
import os.path
def main(vtk_file_patterns):
for filename in glob(vtk_file_patterns):
print(f'file: {filename}')
reader = vtk.vtkPolyDataReader()
reader.SetFileName(filename)
reader.Update()
polydata = reader.GetOutput()
npoints = polydata.GetNumberOfPoints()
ncells = polydata.GetNumberOfCells()
print(f'number of points: {npoints}')
print(f'number of cells : {ncells}')
# create a point array
sum_forces = vtk.vtkDoubleArray()
sum_forces.SetNumberOfComponents(3)
sum_forces.SetNumberOfTuples(npoints)
sum_forces.SetName('sum_forces')
polydata.GetPointData().AddArray(sum_forces)
sum_forces_data = numpy.zeros((npoints, 3), numpy.float64)
# add the contribution from each two sets of points
p0 = numpy.zeros(3, numpy.float64)
p1 = numpy.zeros(3, numpy.float64)
points = polydata.GetPoints()
force_array = polydata.GetCellData().GetScalars('force')
for i in range(ncells):
cell = polydata.GetCell(i)
ptids = cell.GetPointIds()
npts = ptids.GetNumberOfIds()
assert(npts == 2)
pi0 = ptids.GetId(0)
pi1 = ptids.GetId(1)
points.GetPoint(pi0, p0)
points.GetPoint(pi1, p1)
# force
force = p1 - p0
force /= numpy.sqrt(force.dot(force))
force *= force_array.GetTuple(i)[0]
sum_forces_data[pi0, :] -= force
sum_forces_data[pi1, :] += force
sum_forces.SetVoidArray(sum_forces_data, npoints, 1)
# save the new files
writer = vtk.vtkPolyDataWriter()
ofilename = 'sum_forces_' + os.path.basename(filename)
writer.SetFileName(ofilename)
writer.SetInputData(polydata)
writer.Update()
if __name__ == '__main__':
parser = ArgumentParser(description='Compute the total force at each node.')
parser.add_argument('-i', dest='vtk_file_patterns', help='Specify intrs VTK file pattern (e.g. where_data/triax-intrs-\*.vtk)')
args = parser.parse_args()
main(args.vtk_file_patterns)
|
devangi2000/coding-concepts
|
Coding Questions with Solutions/arrayAsHill.py
|
<gh_stars>0
/*
Array of integers is a hill, if:
it is strictly increasing in the beginning;
after that it is constant;
after that it is strictly decreasing.
The first block (increasing) and the last block (decreasing) may be absent. It is allowed that both of this blocks are absent.
For example, the following three arrays are a hill: [5, 7, 11, 11, 2, 1], [4, 4, 2], [7],
but the following three are not unimodal: [5, 5, 6, 6, 1], [1, 2, 1, 2], [4, 5, 5, 6].
Write a program that checks if an array is a hill.
Input Format
The first line contains integer n (1 ≤ n ≤ 100) — the number of elements in the array.
The second line contains n integers a1, a2, ..., an (1 ≤ ai ≤ 1000) — the elements of the array.
Output Format
Print "yes" if the given array is a hill. Otherwise, print "no".
*/
num = int(input())
arr = list(map(int, input().split()))
i=0
while i<num-1 and arr[i]<arr[i+1]:
i+=1
while i<num-1 and arr[i]==arr[i+1]:
i+=1
while i<num-1 and arr[i]>arr[i+1]:
i+=1
if(i==num-1):
print('yes')
else:
print('no')
|
Maddin-619/dotfiles
|
.config/qtile/config.py
|
<gh_stars>0
# Copyright (c) 2010 <NAME>
# Copyright (c) 2010, 2014 dequis
# Copyright (c) 2012 <NAME>
# Copyright (c) 2012-2014 <NAME>
# Copyright (c) 2012 <NAME>
# Copyright (c) 2013 horsik
# Copyright (c) 2013 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import subprocess
import socket
import requests
from Xlib import display as xdisplay
from libqtile import qtile
from libqtile.config import Key, Screen, Group, Drag, Click, Match
from libqtile.command import lazy
from libqtile.log_utils import logger
from libqtile import layout, bar, widget, hook
from typing import List # noqa: F401
##### DEFINING SOME VARIABLES #####
mod = "mod4" # Sets mod key to SUPER/WINDOWS
myTerm = "alacritty" # My terminal of choice
# The Qtile config file location
myConfig = "/home/martin/.config/qtile/config.py"
batteryNames = ['BAT0', 'cw2015-battery']
backlightNames = ['intel_backlight', 'edp-backlight']
#### HELPER FUNCTIONS ####
batteryName = ''
for name in batteryNames:
if os.path.exists("/sys/class/power_supply/" + name):
batteryName = name
backlightName = ''
for name in backlightNames:
if os.path.exists("/sys/class/backlight/" + name):
backlightName = name
def get_num_monitors():
num_monitors = 0
try:
display = xdisplay.Display()
screen = display.screen()
resources = screen.root.xrandr_get_screen_resources()
for output in resources.outputs:
monitor = display.xrandr_get_output_info(
output, resources.config_timestamp)
preferred = False
if hasattr(monitor, "preferred"):
preferred = monitor.preferred
elif hasattr(monitor, "num_preferred"):
preferred = monitor.num_preferred
if preferred:
num_monitors += 1
except Exception:
# always setup at least one monitor
return 1
else:
return num_monitors
[445, 480, 481, 482, 483, 484, 485]
def getBatteryCapacity():
icons = ['', '', '', '', '', '', '', '', '', '']
capacity = int(subprocess.check_output(
["cat", "/sys/class/power_supply/" + batteryName + "/capacity"]).decode("utf-8").strip())
charging = subprocess.check_output(
["cat", "/sys/class/power_supply/" + batteryName + "/status"]).decode("utf-8").strip()
icon = ''
if charging == 'Charging':
icon = ''
if charging == 'Full':
icon = ''
else:
icon = icons[capacity // 10]
return '{0} {1} %'.format(icon, capacity)
##### KEYBINDINGS #####
keys = [
# The essentials
Key(
[mod], "Return",
lazy.spawn(myTerm) # Open terminal
),
Key(
[mod, "shift"], "Return", # Dmenu Run Launcher
lazy.spawn("dmenu_run -p 'Run: '")
),
Key(
[mod], "Tab",
lazy.next_layout() # Toggle through layouts
),
Key(
[mod, "shift"], "c",
lazy.window.kill() # Kill active window
),
Key(
[mod, "shift"], "r",
lazy.restart() # Restart Qtile
),
Key(
[mod, "shift"], "q",
lazy.shutdown() # Shutdown Qtile
),
# Switch focus to specific monitor (out of three)
Key([mod], "w",
# Keyboard focus to screen(0)
lazy.to_screen(0)
),
Key([mod], "e",
# Keyboard focus to screen(1)
lazy.to_screen(1)
),
Key([mod], "r",
# Keyboard focus to screen(2)
lazy.to_screen(2)
),
# Switch focus of monitors
Key([mod], "period",
lazy.next_screen() # Move monitor focus to next screen
),
Key([mod], "comma",
lazy.prev_screen() # Move monitor focus to prev screen
),
# Treetab controls
Key([mod, "control"], "k",
lazy.layout.section_up() # Move up a section in treetab
),
Key([mod, "control"], "j",
lazy.layout.section_down() # Move down a section in treetab
),
# Window controls
Key(
[mod], "k",
lazy.layout.down() # Switch between windows in current stack pane
),
Key(
[mod], "j",
lazy.layout.up() # Switch between windows in current stack pane
),
Key(
[mod, "shift"], "k",
lazy.layout.shuffle_down() # Move windows down in current stack
),
Key(
[mod, "shift"], "j",
lazy.layout.shuffle_up() # Move windows up in current stack
),
Key(
[mod], "h",
lazy.layout.grow(), # Grow size of current window (XmonadTall)
lazy.layout.increase_nmaster(), # Increase number in master pane (Tile)
),
Key(
[mod], "l",
lazy.layout.shrink(), # Shrink size of current window (XmonadTall)
lazy.layout.decrease_nmaster(), # Decrease number in master pane (Tile)
),
Key(
[mod], "n",
lazy.layout.normalize() # Restore all windows to default size ratios
),
Key(
[mod], "m",
# Toggle a window between minimum and maximum sizes
lazy.layout.maximize()
),
Key(
[mod, "shift"], "f",
lazy.window.toggle_floating() # Toggle floating
),
# Stack controls
Key(
[mod, "shift"], "space",
lazy.layout.rotate(), # Swap panes of split stack (Stack)
# Switch which side main pane occupies (XmonadTall)
lazy.layout.flip()
),
Key(
[mod], "space",
# Switch window focus to other pane(s) of stack
lazy.layout.next()
),
Key(
[mod, "control"], "Return",
# Toggle between split and unsplit sides of stack
lazy.layout.toggle_split()
),
# Dmenu scripts launched with ALT + CTRL + KEY
Key(
["mod1", "control"], "e",
lazy.spawn("./.dmenu/dmenu-edit-configs.sh")
),
# My applications launched with SUPER + ALT + KEY
Key(
[mod], "s",
lazy.spawn('pavucontrol-qt')
),
# Special Keybindings
Key(
[], "XF86AudioRaiseVolume",
lazy.spawn(
'sh -c "pactl set-sink-mute @DEFAULT_SINK@ false ; pactl set-sink-volume @DEFAULT_SINK@ +5%"')
),
Key(
[], "XF86AudioLowerVolume",
lazy.spawn(
'sh -c "pactl set-sink-mute @DEFAULT_SINK@ false ; pactl set-sink-volume @DEFAULT_SINK@ -5%"')
),
Key(
[], "XF86AudioMute",
lazy.spawn("pactl set-sink-mute @DEFAULT_SINK@ toggle")
),
Key(
[], "XF86MonBrightnessUp",
lazy.spawn('light -A 10')
),
Key(
[], "XF86MonBrightnessDown",
lazy.spawn(
'light -U 10')
),
]
##### GROUPS #####
group_names = [("WWW", {'layout': 'monadtall'}),
("DEV", {'layout': 'monadtall'}),
("SYS", {'layout': 'monadtall'}),
("DOC", {'layout': 'monadtall'}),
("VBOX", {'layout': 'monadtall'}),
("CHAT", {'layout': 'monadtall'}),
("MUS", {'layout': 'monadtall'}),
("VID", {'layout': 'monadtall'}),
("GFX", {'layout': 'floating'})]
groups = [Group(name, **kwargs) for name, kwargs in group_names]
for i, (name, kwargs) in enumerate(group_names, 1):
# Switch to another group
keys.append(Key([mod], str(i), lazy.group[name].toscreen()))
# Send current window to another group
keys.append(Key([mod, "shift"], str(i), lazy.window.togroup(name)))
##### DEFAULT THEME SETTINGS FOR LAYOUTS #####
layout_theme = {"border_width": 2,
"margin": 4,
"border_focus": "#1793D1",
"border_normal": "#1D2330"
}
##### THE LAYOUTS #####
layouts = [
layout.Max(),
layout.Stack(num_stacks=2),
# layout.MonadWide(**layout_theme),
# layout.Bsp(**layout_theme),
#layout.Stack(stacks=2, **layout_theme),
# layout.Columns(**layout_theme),
# layout.RatioTile(**layout_theme),
# layout.VerticalTile(**layout_theme),
#layout.Tile(shift_windows=True, **layout_theme),
# layout.Matrix(**layout_theme),
# layout.Zoomy(**layout_theme),
layout.MonadTall(**layout_theme),
layout.Floating(**layout_theme)
]
##### COLORS #####
colors = [["#282a36", "#282a36"], # panel background
["#434758", "#434758"], # background for current screen tab
["#ffffff", "#ffffff"], # font color for group names
["#ff5555", "#ff5555"], # background color for layout widget
["#3C6D7E", "#3C6D7E"], # dark green gradiant for other screen tabs
["#0093DD", "#0093DD"]] # background color for pacman widget
##### PROMPT #####
prompt = "{0}@{1}: ".format(os.environ["USER"], socket.gethostname())
##### DEFAULT WIDGET SETTINGS #####
widget_defaults = dict(
font="MesloLGM Nerd Font",
fontsize=12,
padding=2,
background=colors[2]
)
extension_defaults = widget_defaults.copy()
try:
public_ip = requests.get('https://ifconfig.me').text
except:
public_ip = "Heumaden"
##### WIDGETS #####
def init_widgets_list():
widgets_list = [
widget.Sep(
linewidth=0,
padding=6,
foreground=colors[2],
background=colors[0]
),
widget.GroupBox(
fontsize=12,
margin_y=4,
margin_x=0,
padding_y=4,
padding_x=4,
borderwidth=3,
active=colors[2],
inactive=colors[2],
rounded=False,
highlight_method="block",
this_current_screen_border=colors[4],
this_screen_border=colors[1],
other_current_screen_border=colors[0],
other_screen_border=colors[0],
foreground=colors[2],
background=colors[0]
),
widget.Prompt(
prompt=prompt,
padding=10,
foreground=colors[3],
background=colors[1]
),
widget.Sep(
linewidth=0,
padding=10,
foreground=colors[2],
background=colors[0]
),
widget.WindowName(
foreground=colors[4],
background=colors[0],
padding=5
),
widget.Notify(
padding=5,
default_timeout=5,
background=colors[5],
foreground=colors[2]
),
widget.TextBox(
text='\ue0b2',
font='Hack Nerd Font',
background=colors[0],
foreground=colors[5],
padding=0,
fontsize=18
),
widget.TextBox(
text=" 🖬",
foreground=colors[2],
background=colors[5],
padding=0,
fontsize=14
),
widget.Memory(
foreground=colors[2],
background=colors[5],
padding=5
),
widget.TextBox(
text='\ue0b2',
font='Hack Nerd Font',
background=colors[5],
foreground=colors[4],
padding=0,
fontsize=18
),
widget.CPUGraph(
background=colors[4],
samples=90,
),
widget.TextBox(
text='\ue0b2',
font='Hack Nerd Font',
background=colors[4],
foreground=colors[5],
padding=0,
fontsize=18
),
widget.TextBox(
text=" ↯",
foreground=colors[2],
background=colors[5],
padding=0,
fontsize=14
),
widget.Net(
foreground=colors[2],
background=colors[5],
padding=5
),
widget.TextBox(
text='\ue0b2',
font='Hack Nerd Font',
background=colors[5],
foreground=colors[4],
padding=0,
fontsize=18
),
widget.GenPollText(
func=getBatteryCapacity,
background=colors[4],
),
widget.TextBox(
text='\ue0b2',
font='Hack Nerd Font',
background=colors[4],
foreground=colors[5],
padding=0,
fontsize=18
),
widget.TextBox(
text=" 🔊",
foreground=colors[2],
background=colors[5],
padding=0,
fontsize=14
),
widget.PulseVolume(
foreground=colors[2],
background=colors[5],
padding=5,
limit_max_volume=True,
volume_app="pavucontrol-qt"
),
widget.TextBox(
text='\ue0b2',
font='Hack Nerd Font',
background=colors[5],
foreground=colors[4],
padding=0,
fontsize=18
),
widget.Backlight(
backlight_name=backlightName,
change_command='light -S {0}',
fmt=' {}',
foreground=colors[2],
background=colors[4],
padding=5,
),
widget.TextBox(
text='\ue0b2',
font='Hack Nerd Font',
background=colors[4],
foreground=colors[5],
padding=0,
fontsize=18
),
widget.CurrentLayoutIcon(
custom_icon_paths=[os.path.expanduser("~/.config/qtile/icons")],
foreground=colors[2],
background=colors[5],
padding=0,
scale=0.7
),
widget.CurrentLayout(
foreground=colors[2],
background=colors[5],
padding=5
),
widget.TextBox(
text='\ue0b2',
font='Hack Nerd Font',
background=colors[5],
foreground=colors[4],
padding=0,
fontsize=18
),
widget.Wttr(
foreground=colors[2],
background=colors[4],
format=1,
location={
public_ip: public_ip,
'Stuttgart': 'Stuttgart',
},
mouse_callbacks={
'Button1': lambda: qtile.cmd_spawn(myTerm + ' --hold -e wttr ' + public_ip)
}
),
widget.TextBox(
text='\ue0b2',
font='Hack Nerd Font',
background=colors[4],
foreground=colors[5],
padding=0,
fontsize=18
),
widget.TextBox(
text=" 🕒",
foreground=colors[2],
background=colors[5],
padding=5,
fontsize=14
),
widget.Clock(
foreground=colors[2],
background=colors[5],
format="%a, %b %d - %H:%M"
),
widget.TextBox(
text='\ue0b2',
font='Hack Nerd Font',
background=colors[5],
foreground=colors[4],
padding=0,
fontsize=18
),
widget.KeyboardLayout(
configured_keyboards=['us', 'de'],
foreground=colors[2],
background=colors[4],
padding=5
),
widget.Sep(
linewidth=0,
padding=5,
foreground=colors[0],
background=colors[4]
),
widget.Systray(
background=colors[0],
padding=5
),
widget.Sep(
linewidth=0,
padding=7,
foreground=colors[0],
background=colors[0]
),
]
return widgets_list
# SCREENS ##### (TRIPLE MONITOR SETUP or ONE MONITOR)
def init_widgets_primary_screen():
widgets = init_widgets_list()
return widgets
def init_widgets_secoundary_screen():
widgets = init_widgets_list()
return widgets[:-2]
def init_screens(num_monitors):
if num_monitors == 1:
return [Screen(top=bar.Bar(widgets=init_widgets_primary_screen(), opacity=0.95, size=20))]
else:
screens = [Screen(top=bar.Bar(
widgets=init_widgets_primary_screen(), opacity=0.95, size=20))]
for _ in range(num_monitors - 1):
screens.append(Screen(top=bar.Bar(
widgets=init_widgets_secoundary_screen(), opacity=0.95, size=20)))
return screens
if __name__ in ["config", "__main__"]:
num_monitors = get_num_monitors()
logger.warning('number of screens: {0}'.format(num_monitors))
screens = init_screens(num_monitors)
##### DRAG FLOATING WINDOWS #####
mouse = [
Drag([mod], "Button1", lazy.window.set_position_floating(),
start=lazy.window.get_position()),
Drag([mod], "Button3", lazy.window.set_size_floating(),
start=lazy.window.get_size()),
Click([mod], "Button2", lazy.window.bring_to_front())
]
dgroups_key_binder = None
dgroups_app_rules = [] # type: List
main = None
follow_mouse_focus = True
bring_front_click = False
cursor_warp = False
floating_layout = layout.Floating(float_rules=[
*layout.Floating.default_float_rules,
Match(title="emulator"),
])
auto_fullscreen = True
focus_on_window_activation = "smart"
##### STARTUP APPLICATIONS #####
@hook.subscribe.startup_once
def start_once():
home = os.path.expanduser('~')
subprocess.call([home + '/.config/qtile/autostart.sh'])
# XXX: Gasp! We're lying here. In fact, nobody really uses or cares about this
# string besides java UI toolkits; you can see several discussions on the
# mailing lists, GitHub issues, and other WM documentation that suggest setting
# this string if your java app doesn't work correctly. We may as well just lie
# and say that we're a working one by default.
#
# We choose LG3D to maximize irony: it is a 3D non-reparenting WM written in
# java that happens to be on java's whitelist.
wmname = "LG3D"
|
Maddin-619/dotfiles
|
.config/qtile/custom_widgets.py
|
import os
import shlex
from libqtile.widget import base
from typing import Dict # noqa: F401
class Backlight(base.InLoopPollText):
"""A simple widget to show the current brightness of a monitor"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
('get_brightness_cmd',
"xrandr --verbose | awk '/Brightness/ { print $2; exit }'", 'The current brightness in percent'),
('update_interval', .2, 'The delay in seconds between updates'),
('step', 10, 'Percent of backlight every scroll changed'),
('format', '{icon}{percent: 2.0%}', 'Display format'),
('change_command',
'xrandr --output eDP-1 --brightness {0}', 'Execute command to change value')
]
icons = ['', '', '', '', '', '', '', '']
def __init__(self, **config):
base.InLoopPollText.__init__(self, **config)
self.add_defaults(Backlight.defaults)
self.future = None
def _get_brightness(self):
brightness = self.call_process(
self.get_brightness_cmd, **{"shell": True})
return float(brightness)
def poll(self):
try:
brightness = self._get_brightness()
except RuntimeError as e:
return 'Error: {}'.format(e)
return self.format.format(percent=brightness, icon=Backlight.icons[max(int(brightness*100//12.5)-1, 0)])
def change_backlight(self, value):
self.call_process(shlex.split(self.change_command.format(value)))
def button_press(self, x, y, button):
if self.future and not self.future.done():
return
try:
brightness = self._get_brightness()
new = now = brightness * 100
except RuntimeError as e:
new = now = 100
return 'Error: {}'.format(e)
if button == 5: # down
new = max(now - self.step, 0)
elif button == 4: # up
new = min(now + self.step, 100)
if new != now:
self.future = self.qtile.run_in_executor(self.change_backlight,
new / 100)
|
lukemerrett/PythonPet
|
Helpers/datehelper.py
|
__author__ = '<NAME>'
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
def add_seconds_to_date(date, seconds):
return date + timedelta(seconds=seconds)
def is_date_earlier_than_today(date):
return date <= todays_date()
def get_difference_as_relative_delta(latest_date, earliest_date):
return relativedelta(latest_date, earliest_date)
def get_total_seconds_difference(latest_date, earliest_date):
return (latest_date - earliest_date).total_seconds()
def todays_date():
return datetime.now()
|
lukemerrett/PythonPet
|
PetObjects/pet.py
|
<filename>PetObjects/pet.py
__author__ = '<NAME>'
from PetObjects.status import PetStatus
from PetObjects.age import Age
from PetObjects.bladder import Bladder
from PetObjects.hunger import HungerLevel
class Pet:
Age = None
HungerLevel = None
Name = None
Bladder = None
def __init__(self):
pass
def hatch(self, environment, name):
"""
Sets the original details for the pet
"""
self.Age = Age()
self.Bladder = Bladder(environment)
self.HungerLevel = HungerLevel(self.Bladder)
self.Name = name
def get_pets_status(self):
"""
Returns an object outlining the status of the pet
:return: An object showing the status of the pet
"""
return PetStatus(
self.Age.has_pet_reached_its_lifespan(),
self.HungerLevel.is_hungry(),
self.Bladder.needs_the_toilet(),
self.Name)
|
lukemerrett/PythonPet
|
pythonpet.py
|
<reponame>lukemerrett/PythonPet
__author__ = '<NAME>'
from EnvironmentObjects.environment import Environment
from PetObjects.pet import Pet
console_options = None
environment = None
myPet = None
def run_option(x):
if x in console_options:
console_options[x]()
else:
print("You didn\'t enter an option")
wait()
def wait():
print("Waiting")
def console_loop():
pets_status = myPet.get_pets_status()
if pets_status.has_reached_its_lifespan:
print('Your pet has died!')
exit()
print('\n%s\'s status' % myPet.Name)
print('----------\n')
print(myPet.Age.current_age_string())
print('Hungry: ' + ('Yes! Feed Me!' if pets_status.is_hungry else 'Not yet'))
print('Needs the Toilet: ' + ('Bursting!' if pets_status.needs_the_toilet else 'Nope'))
print('Poop in the Cage: ' + environment.current_poop_level().__str__())
print('')
print('What would you like to do:\n')
print('1. Feed Pet')
print('2. Go to Toilet')
print('3. Clean up the Poop')
print('4. Wait')
print('5. Exit')
number_chosen = input('Choose a number (1,2,3 etc): ')
run_option(number_chosen)
# Entry Point
if __name__ == '__main__':
environment = Environment()
myPet = Pet()
print('What would you like to call your pet? ')
pets_name = input('Name: ')
myPet.hatch(environment, pets_name)
console_options = {
'1': myPet.HungerLevel.feed_pet,
'2': myPet.Bladder.go_to_toilet,
'3': environment.clean_up_poop,
'4': wait,
'5': exit
}
print('Your pet has hatched; welcome it to the new world!')
while(True):
console_loop()
exit()
|
lukemerrett/PythonPet
|
PetObjects/age.py
|
__author__ = '<NAME>'
from random import randint
from Helpers import datehelper
class Age:
__minimum_potential_lifespan_in_seconds = 86400 # 1 day
__total_potential_lifespan_in_seconds = 31536000 # 1 year
__birth_date = None
__lifespan_in_seconds = None
def __init__(self):
self.__birth_date = datehelper.todays_date()
self.__lifespan_in_seconds = randint(
self.__minimum_potential_lifespan_in_seconds,
self.__total_potential_lifespan_in_seconds)
def current_age_in_seconds(self):
"""
Gets the current age of the pet in seconds
:return: The total age of the pet in seconds
"""
return datehelper.get_total_seconds_difference(datehelper.todays_date(),self.__birth_date)
def current_age_string(self):
"""
Gets the age of the pet in a human readable string
:return: A human readable form of the pets age
"""
current_age = datehelper.get_difference_as_relative_delta(datehelper.todays_date(), self.__birth_date)
return "Your pet is currently %d years %d months %d days %d hours %d minutes old" % (
current_age.years,
current_age.months,
current_age.days,
current_age.hours,
current_age.minutes)
def has_pet_reached_its_lifespan(self):
"""
Returns a value indicating whether the pet has reached it's current lifespan
:return: True if the pet is dead, false if the pet is still kicking around.
"""
time_of_death = datehelper.add_seconds_to_date(self.__birth_date, self.__lifespan_in_seconds)
return datehelper.is_date_earlier_than_today(time_of_death)
|
lukemerrett/PythonPet
|
EnvironmentObjects/environment.py
|
__author__ = '<NAME>'
class Environment:
__poop_level = None
def __init__(self):
self.__poop_level = 0
def current_poop_level(self):
return self.__poop_level
def pet_has_pooped(self):
self.__poop_level += 1
def clean_up_poop(self):
self.__poop_level = 0
|
lukemerrett/PythonPet
|
PetObjects/hunger.py
|
<gh_stars>0
__author__ = '<NAME>'
from Helpers import datehelper
class HungerLevel:
__seconds_until_hungry = 120 # 2 minutes
__last_fed = None
__bladder = None
def __init__(self, bladder):
self.__last_fed = datehelper.todays_date()
self.__bladder = bladder
def is_hungry(self):
date_pet_is_hungry = datehelper.add_seconds_to_date(self.__last_fed, self.__seconds_until_hungry)
return datehelper.is_date_earlier_than_today(date_pet_is_hungry)
def feed_pet(self):
self.__last_fed = datehelper.todays_date()
self.__bladder.has_been_fed()
print("Your pet has been fed")
|
lukemerrett/PythonPet
|
PetObjects/status.py
|
__author__ = '<NAME>'
class PetStatus:
has_reached_its_lifespan = None
is_hungry = None
needs_the_toilet = None
name = None
def __init__(self, has_reached_its_lifespan, is_hungry, needs_the_toilet, name):
self.has_reached_its_lifespan = has_reached_its_lifespan
self.is_hungry = is_hungry
self.needs_the_toilet = needs_the_toilet
self.name = name
|
lukemerrett/PythonPet
|
PetObjects/bladder.py
|
__author__ = '<NAME>'
class Bladder:
__bladder_level = None
__bursting_level = 5
__environment = None
def __init__(self, environment):
self.__bladder_level = 0
self.__environment = environment
def has_been_fed(self):
self.__bladder_level += 1
def needs_the_toilet(self):
return self.__bladder_level >= self.__bursting_level
def go_to_toilet(self):
self.__bladder_level = 0
self.__environment.pet_has_pooped()
|
araAxelFT/QRgen
|
src/main.py
|
<filename>src/main.py
from email.mime import image
import tkinter
import qrcode
from tkinter import *
from tkinter import ttk
import platform
import os
from PIL import ImageTk, Image
if(platform.system() == "Darwin"):
save_path = "../images/"
files = os.listdir(save_path)
else:
save_path = "../images/"
files = os.listdir(save_path)
file_count = len(files)
def gen():
global file_count
file_count = file_count + 1
input_data = input_entry.get()
qr = qrcode.QRCode(
version=1,
box_size=10,
border=5)
qr.add_data(input_data)
qr.make(fit=True)
img = qr.make_image(fill='black', back_color='white')
img.save(f"{save_path}qr{file_count}.png")
root.python_image = tkinter.PhotoImage(file=f"{save_path}qr{file_count}.png")
ttk.Label(root, image=root.python_image).pack()
root = Tk()
input_entry = Entry(root, width=50)
input_entry.pack()
gen_button = Button(root, text="Gen", command=gen)
gen_button.pack()
root.mainloop()
|
CoryK8nn8dy/clist-dashboard
|
post.py
|
<gh_stars>0
class Post:
def __init__(self, title, price, date, time):
self.title = title
self.price = price
self.date = date
self.time = time
def display(self):
print('Title:', self.title)
print('Price:', self.price)
print('Date:', self.date)
print('Time:', self.time)
|
CoryK8nn8dy/clist-dashboard
|
clist.py
|
from requests import get
from bs4 import BeautifulSoup
from post import Post
def read_posts(url_filename):
posts_file = open(url_filename, "r")
post_urls = posts_file.readlines()
posts = []
for post_url in post_urls:
# remove new line from URL
post_url = post_url.rstrip('\n')
response = get(post_url) # HTTP GET for c.l. post
# extract HTML from page
html_soup = BeautifulSoup(response.text, 'html.parser')
# get post title
post_title = html_soup.h2.span.find("span", {"id":"titletextonly"}).text
# get post price
post_price = html_soup.h2.span.find("span", {"class": "price"}).text
# get post date and time
post_datetime = html_soup\
.find("p", {"class": "postinginfo reveal"})\
.find("time", {"class": "date timeago"})\
.text
# separate date and time
datetime_list = post_datetime.split(' ') # split string on spaces
datetime_list.remove('\n') # remove all lone newline chars
# remove all empty strings from the list
while '' in datetime_list:
datetime_list.remove('')
# remove all newline characters that may succede the date or time
for i in range(len(datetime_list)):
datetime_list[i] = datetime_list[i].rstrip()
# get date
post_date = datetime_list[0]
# get time
post_time = datetime_list[1]
# instantiate post object and append it to the list
posts.append(Post(post_title, post_price, post_date, post_time))
return posts
def display_posts(posts_list):
print("-"*80)
for post in posts_list:
# display the post
post.display()
print("-"*80)
if __name__ == "__main__":
posts_objs = read_posts('posts.txt')
display_posts(posts_objs)
|
parroit/editablejqgrid
|
models/plugin_editable_jqgrid.py
|
from gluon.storage import Storage
class plugin_editable_jqgrid_Description(object):
def __init__(self,table_name,field_name):
self.table_name=table_name
self.field_name=field_name
def description(self):
try:
description=self.__getattribute__(self.table_name)[self.field_name]
except:
description="Error"
return description
from gluon.html import XmlComponent
class plugin_editable_jqgrid(XmlComponent):
text="""
<script type="text/javascript">
jQuery(document).ready(function(){
create_grid("#%(grid_name)s",'#pager-%(grid_name)s','%(url)s',%(col_names)s,%(col_models)s,'%(caption)s','%(edit_url)s');
});
new_lookups=%(lookups)s;
for(key in new_lookups)
lookups[key]=new_lookups[key];
</script>
<table id="%(grid_name)s"></table>
<div id="pager-%(grid_name)s"></div>
"""
@staticmethod
def set_description_column(table,field_name):
table.virtualfields.append(plugin_editable_jqgrid_Description(table._tablename,field_name))
def get_custom_element_function(self,field):
from gluon.sqlhtml import OptionsWidget
if OptionsWidget.has_options(field):
widget=SQLFORM.widgets.options.widget(field,'no_selection').xml()
script= """
function (value, options) {
var el = document.createElement('div');
el.innerHTML='%s'.replace('>'+value+'<',' selected="selected">'+value+'<');
el.children[0].style.width="100%%";
return el;
}""" % widget;
return script;
elif field.type=='boolean':
return "get_bool_widget"
else:
if field.type=='time':
calendar="el.children[0].onfocus=function(){time_setup(this.attributes['id'].value);};"
elif field.type=='date':
calendar="el.children[0].onfocus=function(){calendar_setup_date(this.attributes['id'].value);};"
elif field.type=='datetime':
calendar="el.children[0].onfocus=function(){calendar_setup_datetime(this.attributes['id'].value);};"
elif field.type=='double':
calendar="el.children[0].onfocus=function(){double_setup(this);};"
elif field.type=='integer':
calendar="el.children[0].onfocus=function(){integer_setup(this);};"
else:
calendar=""
if field.widget:
widget=field.widget(field,'a_value').xml().replace('<','\<').replace('>','\>').replace("'","\\'")
else:
widget=SQLFORM.widgets[field.type].widget(field,'a_value').xml()
str="""
function (value, options) {var el = document.createElement('div'); el.innerHTML='%s'.replace('a_value',value);
%s
el.children[0].style.width="100%%";
return el;
}"""
return str% (widget,calendar);
def get_custom_value_function(self,field):
if field.type=='boolean':
return "get_bool_value"
elif field.widget and field.widget.func_name=='nicEdit':
return "function (elem) {return nicEditors.findEditor('%s').getContent();}" % (field._tablename+"_"+field.name)
else:
return "get_normal_value"
def get_custom_formatter_function(self,grid_name,field):
if field.type=='boolean':
return """function (cellvalue, options, rowObject) {
var checked;
if (cellvalue)
checked="checked='checked'";
else
checked='';
return "<input type='checkbox' " + checked + " disabled='disabled'/>";
}"""
elif self.has_lookups(field):
return """
function (cellvalue, options, rowObject)
{
if (cellvalue ==null)
text='';
else {
text=lookups['%s-%s'][cellvalue];
if (text+''=='undefined')
text='unknown:'+cellvalue;
}
return text;
}
""" % (grid_name,field.name)
else:
return "undefined"
def get_lookups(self,field):
requires = field.requires
if not isinstance(requires, (list, tuple)):
requires = [requires]
if requires:
if hasattr(requires[0], 'options'):
options = requires[0].options()
return ','.join(["'%s':'%s'" % o for o in options])
def has_lookups(self,field):
from gluon.sqlhtml import OptionsWidget
return OptionsWidget.has_options(field)
def __init__(self,table,fieldname=None,fieldvalue=None,grid_name='grid',columns=None,col_width=80,height=300,db_name='db',caption='',id_represent=['id',None]):
if not columns:
columns=table.fields
if not 'id' in columns:
columns.insert(0,'id')
if fieldname and fieldname in columns:
columns.remove(fieldname)
self.params=Storage()
if caption=='':
caption=table._tablename
self.params.grid_name=grid_name
self.params.caption=caption
import urllib
columns_param='@'.join(columns)
if id_represent[1]:
import cgi
from StringIO import StringIO
id_represent_url=''
out=StringIO(id_represent_url)
cgi.mimetools.encode(StringIO(cgi.urlparse.unquote(str(id_represent[1]))),out,'base64')
id_represent_url=out.getvalue()
else:
id_represent_url="None"
self.params.url=URL(a=request.application,c='plugin_editable_jqgrid',f='call',args=['json','get_rows',table._tablename,db_name,columns_param,fieldname,fieldvalue,id_represent[0],id_represent_url])
self.params.edit_url=URL(a=request.application,c='plugin_editable_jqgrid',f='call',args=['json','save_row',table._tablename,db_name,fieldname,fieldvalue])
self.params.lookups='{'+','.join(["'"+grid_name+'-'+f+"':{"+self.get_lookups(table[f])+"}" for f in columns if self.has_lookups(table[f])])+'}'
def get_col_header(col_name):
if col_name=='id':
return id_represent[0]
else:
return table[col_name].label
self.params.col_names='['+','.join(["'"+get_col_header(f)+"'" for f in columns])+']'
self.params.col_models= "[{name:'id',index:'id', width:85, editable: false},\n"
def options():
return (self.get_custom_formatter_function(grid_name,table[f]),f,f,self.get_custom_element_function(table[f]),self.get_custom_value_function(table[f]))
self.params.col_models+=','.join(["{formatter:%s,name:'%s',index:'%s',editable:true,edittype:'custom',editoptions:{custom_element: %s, custom_value:%s}}\n" % options() for f in columns if f!='id'])+']'
response.files.append(URL(r=request,c='static/plugin_editable_jqgrid',f='jquery-ui-1.10.3.custom.min.js'))
response.files.append(URL(r=request,c='static/plugin_editable_jqgrid',f='jquery-ui-1.10.3.custom.min.css'))
response.files.append(URL(r=request,c='static/plugin_editable_jqgrid/i18n',f='grid.locale-it.js'))
response.files.append(URL(r=request,c='static/plugin_editable_jqgrid/js',f='jquery.jqGrid.js'))
response.files.append(URL(r=request,c='static/plugin_editable_jqgrid',f='ui.jqgrid.css'))
response.files.append(URL(r=request,c='static/plugin_editable_jqgrid',f='custom.jqgrid.css'))
response.files.append(URL(r=request,c='static/plugin_editable_jqgrid',f='plugin_editable_jqgrid.js'))
def xml(self):
return self.text % self.params
def __str__(self):
return self.xml()
|
parroit/editablejqgrid
|
controllers/plugin_editable_jqgrid.py
|
<filename>controllers/plugin_editable_jqgrid.py
import cgi
def contains_html(field):
return field.__dict__.has_key('contains_html') and field.contains_html
def non_textfield(field):
return field.type!="string" and field.type!="text"
def call():
"""
exposes services. for example:
http://..../[app]/default/call/jsonrpc
decorate with @services.jsonrpc the functions to expose
supports xml, json, xmlrpc, jsonrpc, amfrpc, rss, csv
"""
session.forget()
return service()
@service.json
def save_row():
table=get_db()[request.args[2]]
if request.vars.oper=='edit':
from gluon.storage import Storage
cust=Storage()
for f in table.fields:
if request.vars.has_key(f) and f!='id':
#here non-html fields are escaped
field=table[f]
value=request.vars[f]
if value=='' and field.default and request.vars.id=='new':
value=field.default
if non_textfield(field) or contains_html(field):
cust[f]=value
else:
cust[f]=cgi.escape(value)
if request.args[4]!='None':
cust[request.args[4]]=request.args[5]
if request.vars.id=='new':
return dict(Id=table.insert(**cust))
else:
get_db()(table.id==request.vars.id).update(**cust)
return dict(Id=request.vars.id)
else:
if request.vars.oper=='del':
for del_id in request.vars.id.split(','):
get_db()(table.id==del_id).delete()
def get_db():
return eval(request.args[3])
@service.json
def get_rows():
table=get_db()[request.args[2]]
fields = table.fields
rows = []
page = int(request.vars.page)
pagesize = int(request.vars.rows)
limitby = (page * pagesize - pagesize,page * pagesize)
orderby = table[request.vars.sidx]
if orderby and request.vars.sord == 'desc': orderby = ~orderby
filter_field_name=request.args[5]
filter_field_value=request.args[6]
id_represent=''
from StringIO import StringIO
out=StringIO(id_represent)
import cgi
if request.args[8]=='None':
id_represent='%(id)s'
else:
cgi.mimetools.decode(StringIO(request.args[8]),out,'base64')
id_represent=out.getvalue()
id_represent='<a href="%s">%s</a>' %(id_represent,request.args[7])
field_names=request.args[4].split('@')
fields=[table[f] for f in field_names]
searchField=request.vars.searchField
searchString=request.vars.searchString
searchOper={'eq':lambda a,b: a==b,
'nq':lambda a,b: a!=b,
'gt':lambda a,b: a>b,
'ge':lambda a,b: a>=b,
'lt':lambda a,b: a<b,
'le':lambda a,b: a<=b,
'bw':lambda a,b: a.like(b+'%'),
'bn':lambda a,b: ~a.like(b+'%'),
'ew':lambda a,b: a.like('%'+b),
'en':lambda a,b: ~a.like('%'+b),
'cn':lambda a,b: a.like('%'+b+'%'),
'nc':lambda a,b: ~a.like('%'+b+'%'),
'in':lambda a,b: a.belongs(b.split()),
'ni':lambda a,b: ~a.belongs(b.split())}\
[request.vars.searchOper or 'eq']
if filter_field_name!='None':
dbset = get_db()(table[filter_field_name]==filter_field_value)
else:
dbset = get_db()(table.id>0)
if searchField: dbset=dbset(searchOper(table[searchField],searchString))
for r in dbset.select(limitby=limitby,orderby=orderby):
vals = []
for f in fields:
if f.name =='id':
vals.append(id_represent % r)
else:
rep = f.represent
#if rep:
# vals.append(rep(r[f.name]))
#else:
vals.append(r[f.name])
rows.append(dict(id=r.id,cell=vals))
total = get_db()(table.id>0).count()
pages = int(total/pagesize)
if total % pagesize > 0: pages += 1
data = dict(total=pages,page=page,rows=rows)
return data
|
visr/Py2Jl.jl
|
ast_to_json.py
|
<gh_stars>10-100
import ast
import typing as t
import numbers
import json
from wisepy.talking import Talking
from Redy.Tools.PathLib import Path
talking = Talking()
def to_dict(node: t.Union[ast.AST, str, numbers.Number, list]):
if isinstance(node, complex):
return {"class": "complex", "real": node.real, "imag": node.imag}
elif isinstance(node, str):
return node
elif isinstance(node, numbers.Number):
return node
elif isinstance(node, list):
return [to_dict(each) for each in node]
elif isinstance(node, ast.AST):
data = {
"class": node.__class__.__name__,
**{
field: to_dict(value)
for field, value in ast.iter_fields(node)
}
}
if hasattr(node, 'lineno'):
data['lineno'] = node.lineno
if hasattr(node, 'col_offset'):
data['colno'] = node.col_offset
return data
return node
@talking.alias('file')
def from_file(input: 'filename', to: 'filename'):
"""
from python source to json file
"""
path = Path(input)
with path.open('r') as fr, Path(to).open('w') as fw:
try:
data = to_dict(ast.parse(fr.read()))
data['name'] = path.relative()[:-3] # remove `.py`
json.dump([str(path), data], fw, indent=2)
except SyntaxError as e:
print(e)
pass
@talking.alias('text')
def from_code(input: 'text', to: 'filename'):
"""
from python source code to json file
"""
with Path(to).open('w') as fw:
try:
data = to_dict(ast.parse(input))
data['name'] = 'Default'
json.dump(['<stdin>', data], fw, indent=2)
except SyntaxError:
pass
if __name__ == '__main__':
talking.on()
|
Svenskithesource/Streamable-finder
|
main.py
|
<reponame>Svenskithesource/Streamable-finder
# Made by svenskithesource#2815
import requests, time, random, string, os
def find():
with open("streamable_failed.txt", "r") as f:
lines = [line.rstrip() for line in f.readlines() if line != "\n"]
random_code = "".join(random.choices(string.ascii_lowercase + string.digits, k=6))
url = "https://streamable.com/" + random_code
if url not in lines:
req = requests.get(url)
return {"url": url, "status_code": req.status_code}
else:
return {"url": url, "status_code": 400}
def main():
if not os.path.exists("./streamable.txt"):
open("streamable.txt", "w").close()
if not os.path.exists("./streamable_failed.txt"):
open("streamable_failed.txt", "w").close()
while True:
find_streamable = find()
if find_streamable["status_code"] == 200:
with open("streamable.txt", "a") as f:
f.write(find_streamable["url"] + "\n")
print(find_streamable["url"])
elif find_streamable["status_code"] == 429:
print("Getting rate limited! Waiting 1 min.")
time.sleep(60)
print("Starting again!")
elif find_streamable["status_code"] == 404:
with open("streamable_failed.txt", "a") as f:
f.write(find_streamable["url"] + "\n")
else:
print("Failed", find_streamable["url"])
if __name__ == '__main__':
main()
|
ids1024/cannon-bluetooth-remote
|
remote.py
|
#!/usr/bin/env python3
import sys
from subprocess import Popen, PIPE
DEVICE_NAME = "PY"
BUTTON_RELEASE = 0b10000000
BUTTON_FOCUS = 0b01000000
BUTTON_TELE = 0b00100000
BUTTON_WIDE = 0b00010000
MODE_IMMEDIATE = 0b00001100
MODE_DELAY = 0b00000100
MODE_MOVIE = 0b00001000
device = sys.argv[1]
cmd = sys.argv[2]
if cmd != 'pair':
if cmd[0] == 'r':
button = BUTTON_RELEASE
elif cmd[0] == 'f':
button = BUTTON_FOCUS
elif cmd[0] == 't':
button = BUTTON_TELE
elif cmd[0] == 'w':
button = BUTTON_WIDE
else:
print("No command")
if cmd[1] == 'i':
mode = MODE_IMMEDIATE
elif cmd[1] == 'm':
mode = MODE_MOVIE
elif cmd[1] == 'd':
mode = MODE_DELAY
else:
print("No Mode")
p = Popen(['btgatt-client', '-d', device],
stdout=PIPE, stdin=PIPE, universal_newlines=True)
def wait_contain(s):
while True:
line = p.stdout.readline()
if s in line:
break
def write_value(*values):
p.stdin.write("write-value " + ' '.join(map(str, values)) + '\n')
p.stdin.flush()
wait_contain("Write successful")
wait_contain("GATT discovery procedures complete")
write_value(0xf504, 3, *map(ord, DEVICE_NAME))
if cmd != 'pair':
write_value(0xf506, button | mode)
|
rwgardner/reconchess-strangefish
|
strangefish/strangefish.py
|
<filename>strangefish/strangefish.py
import multiprocessing as mp
import os
from collections import defaultdict
from functools import partial
from time import time
from tqdm import tqdm
from typing import Optional, List, Tuple, Set, Callable
import chess.engine
from reconchess import Player, Color, GameHistory, WinReason, Square
from strangefish import defaults
from strangefish.strategies.auxiliary_strategies import contingency_strategy
from strangefish.utilities import board_matches_sense, move_would_happen_on_board, populate_next_board_set
from strangefish.utilities.player_logging import create_main_logger
from strangefish.utilities.timing import Timer
# Parameters for minor bot behaviors
RC_DISABLE_PBAR = os.getenv('RC_DISABLE_PBAR', 'false').lower() == 'true' # Flag to disable the tqdm progress bars
WAIT_LOOP_RATE_LIMIT = 1 # minimum seconds spent looping in self.while_we_wait()
# Parameters for switching to the emergency backup plan
BOARD_SET_LIMIT = 1_000_000 # number of boards in set at which we stop processing and revert to backup plan
TIMEOUT_RESIGN_LIMIT = 10 # number of seconds left at which we stop processing and revert to backup plan
AVG_BOARD_EXP = 33 # number of moves on each board: mean 33, std 10 according to ~320k boards in logs
class StrangeFish(Player):
"""
StrangeFish is the main skeleton of our reconchess-playing bot. Its primary role is to manage the set of all
possible board states based on the given information. Decision making for sense and move choices are handed off to
one of our strategy functions.
StrangeFish alone does not use the Stockfish chess engine, but most of our strategies do use it to make sensing and
moving decisions. In order to run StrangeFish with one of those strategies, you'll need to download Stockfish from
https://stockfishchess.org/download/ and create an environment variable called STOCKFISH_EXECUTABLE that is the path
to the downloaded Stockfish executable.
"""
def __init__(
self,
choose_sense: Callable[[Set[str], bool, List[Square], List[chess.Move], float], Square] = defaults.choose_sense,
choose_move: Callable[[Set[str], bool, List[chess.Move], float], chess.Move] = defaults.choose_move,
while_we_wait: Optional[Callable[[Set[str], bool], None]] = defaults.while_we_wait,
end_game: Optional[Callable[[Set[str]], None]] = defaults.end_game,
pool_size: Optional[int] = 2,
log_to_file=True,
save_debug_history=False,
rc_disable_pbar=RC_DISABLE_PBAR,
):
"""
Set up StrangeFish with decision-making capabilities inherited from another function.
:param choose_sense: A callable produced by the strategy function which chooses and returns the sense square
:param choose_move: A callable produced by the strategy function which chooses and returns the move
:param while_we_wait: An optional callable produced by the strategy function which uses time between our turns
:param end_game: An optional callable produced by the strategy function which (typically) shuts down StockFish
:param pool_size: Number of processes to use when multiprocessing board set expansion and filtering
:param log_to_file: A boolean flag to turn on/off logging to file gameLogs/StrangeFish.log
:param save_debug_history: A boolean flag to turn on/off the generation of a turn-by-turn internal history
:param rc_disable_pbar: A boolean flag to turn on/off the tqdm progress bars
"""
self._choose_sense = choose_sense
self._choose_move = choose_move
self._while_we_wait = while_we_wait
self._end_game = end_game
self.boards: Set[str] = set()
self.next_turn_boards: defaultdict[Set] = defaultdict(set)
self.next_turn_boards_unsorted: Set[str] = set()
self.color = None
self.turn_num = None
self.pool = mp.Pool(pool_size)
self.save_debug_history = save_debug_history
self.debug_memory = []
self.rc_disable_pbar = rc_disable_pbar
self.timeout_resign = None # flag used to skip later turn processes if we have run out of time
self.logger = create_main_logger(log_to_file=log_to_file)
self.logger.debug("A new StrangeFish player was initialized.")
def _game_state_log(self, step_name='-'): # Save game state for advanced replay
if self.save_debug_history:
info = {
'name': __name__,
'color': chess.COLOR_NAMES[self.color],
'turn': self.turn_num,
'step': step_name,
'boards': list(self.boards),
}
self.debug_memory.append(info)
def _emergency_plan(self): # Switch to emergency backup plan
self.boards = set()
self.next_turn_boards = {None: set()}
self._choose_sense, self._choose_move = contingency_strategy()
setattr(self, 'while_we_wait', None)
def get_debug_history(self): # Get possible board states from each turn
return self.debug_memory
def handle_game_start(self, color: Color, board: chess.Board, opponent_name: str):
color_name = chess.COLOR_NAMES[color]
self.logger.info('Starting a new game as %s against %s.', color_name, opponent_name)
self.boards = {board.epd(en_passant='xfen')}
self.color = color
self.turn_num = 0
self.timeout_resign = False
# Save game state for advanced replay
if self.color == chess.BLACK:
self._game_state_log()
self._game_state_log()
def handle_opponent_move_result(self, captured_my_piece: bool, capture_square: Optional[Square]):
self.turn_num += 1
self.logger.debug("Starting turn %d.", self.turn_num)
# Do not "handle_opponent_move_result" if no one has moved yet
if self.turn_num == 1 and self.color == chess.WHITE:
self._game_state_log()
return
if captured_my_piece:
self.logger.debug('Opponent captured my piece at %s.', chess.SQUARE_NAMES[capture_square])
else:
self.logger.debug("Opponent's move was not a capture.")
self.logger.debug('Already calculated scores for %d possible boards, '
'approximately %d x %d = %d boards left to analyse.',
len(self.next_turn_boards[None]), len(self.boards),
AVG_BOARD_EXP, (AVG_BOARD_EXP * len(self.boards)))
# Check for board set over-growth and switch to emergency plan if needed
if not captured_my_piece and \
(len(self.next_turn_boards[None]) + (AVG_BOARD_EXP * len(self.boards))) > BOARD_SET_LIMIT:
self.logger.warning("Board set grew too large, switching to contingency plan. "
"Set size expected to grow to %d; limit is %d",
len(self.next_turn_boards[None]) + (AVG_BOARD_EXP * len(self.boards)),
BOARD_SET_LIMIT)
self._emergency_plan()
# If creation of new board set didn't complete during op's turn (self.boards will not be empty)
if self.boards:
new_board_set = populate_next_board_set(self.boards, self.color, self.pool,
rc_disable_pbar=self.rc_disable_pbar)
for square in new_board_set.keys():
self.next_turn_boards[square] |= new_board_set[square]
# Get this turn's board set from a dictionary keyed by the possible capture squares
self.boards = self.next_turn_boards[capture_square]
self.logger.debug('Finished expanding and filtering the set of possible board states. '
'There are %d possible boards at the start of our turn %d.',
len(self.boards), self.turn_num)
# Save game state for advanced replay
self._game_state_log('post-op-move')
def choose_sense(self, sense_actions: List[Square], move_actions: List[chess.Move], seconds_left: float
) -> Optional[Square]:
# Check if time is up (or if we already changed to the emergency plan)
if not self.timeout_resign and seconds_left <= TIMEOUT_RESIGN_LIMIT:
self.logger.warning(f'Time is nearly up, go to backup plan.')
self._emergency_plan()
self.timeout_resign = True
self.logger.debug('Choosing a sensing square for turn %d with %d boards and %.0f seconds remaining.',
self.turn_num, len(self.boards), seconds_left)
# The option to pass isn't included in the reconchess input
move_actions += [chess.Move.null()]
with Timer(self.logger.debug, 'choosing sense location'):
# Pass the needed information to the decision-making function to choose a sense square
sense_choice = self._choose_sense(self.boards, self.color, sense_actions, move_actions, seconds_left)
self.logger.debug('Chose to sense %s', chess.SQUARE_NAMES[sense_choice] if sense_choice else 'nowhere')
return sense_choice
def handle_sense_result(self, sense_result: List[Tuple[Square, Optional[chess.Piece]]]):
# Filter the possible board set to only boards which would have produced the observed sense result
num_before = len(self.boards)
i = tqdm(
self.boards,
disable=self.rc_disable_pbar,
desc=f'{chess.COLOR_NAMES[self.color]} Filtering {len(self.boards)} boards by sense results',
unit='boards',
)
self.boards = {board_epd for board_epd in
self.pool.imap_unordered(partial(board_matches_sense, sense_result=sense_result), i)
if board_epd is not None}
self.logger.debug('There were %d possible boards before sensing and %d after.', num_before, len(self.boards))
# Save game state for advanced replay
self._game_state_log('post-sense')
def choose_move(self, move_actions: List[chess.Move], seconds_left: float) -> Optional[chess.Move]:
# Currently, move_actions is passed by reference, so if we add the null move here it will be in the list twice
# since we added it in choose_sense also. Instead of removing this line altogether, I'm leaving a check so we
# are prepared in the case that reconchess is updated to pass a copy of the move_actions list instead.
if chess.Move.null() not in move_actions:
move_actions += [chess.Move.null()]
self.logger.debug('Choosing move for turn %d from %d moves over %d boards with %.2f seconds remaining.',
self.turn_num, len(move_actions), len(self.boards), seconds_left)
with Timer(self.logger.debug, 'choosing move'):
# Pass the needed information to the decision-making function to choose a move
move_choice = self._choose_move(self.boards, self.color, move_actions, seconds_left)
self.logger.debug('The chosen move was %s', move_choice)
# reconchess uses None for the null move, so correct the function output if that was our choice
return move_choice if move_choice != chess.Move.null() else None
def handle_move_result(self, requested_move: Optional[chess.Move], taken_move: Optional[chess.Move],
captured_opponent_piece: bool, capture_square: Optional[Square]):
self.logger.debug('The requested move was %s and the taken move was %s.', requested_move, taken_move)
if captured_opponent_piece:
self.logger.debug('Move %s was a capture!', taken_move)
num_boards_before_filtering = len(self.boards)
if requested_move is None:
requested_move = chess.Move.null()
if taken_move is None:
taken_move = chess.Move.null()
# Filter the possible board set to only boards on which the requested move would have resulted in the taken move
i = tqdm(
self.boards,
disable=self.rc_disable_pbar,
desc=f'{chess.COLOR_NAMES[self.color]} Filtering {len(self.boards)} boards by move results',
unit='boards',
)
self.boards = {
board_epd for board_epd in
self.pool.imap_unordered(partial(move_would_happen_on_board, requested_move, taken_move,
captured_opponent_piece, capture_square), i)
if board_epd is not None
}
self.logger.debug('There were %d possible boards before filtering and %d after.',
num_boards_before_filtering, len(self.boards))
# Save game state for advanced replay
self._game_state_log('post-move')
self._game_state_log()
# Re-initialize the set of boards for next turn (filled in while_we_wait and/or handle_opponent_move_result)
self.next_turn_boards = defaultdict(set)
self.next_turn_boards_unsorted = set()
def while_we_wait(self):
start_time = time()
self.logger.debug('Running the "while_we_wait" method. '
f'{len(self.boards)} boards left to expand for next turn.')
our_king_square = chess.Board(tuple(self.boards)[0]).king(self.color) if len(self.boards) else None
while time() - start_time < WAIT_LOOP_RATE_LIMIT:
# If there are still boards in the set from last turn, remove one and expand it by all possible moves
if len(self.boards):
new_board_set = populate_next_board_set({self.boards.pop()}, self.color, rc_disable_pbar=True)
for square in new_board_set.keys():
self.next_turn_boards[square] |= new_board_set[square]
if square != our_king_square:
self.next_turn_boards_unsorted |= new_board_set[square]
# If all of last turn's boards have been expanded, pass to the sense/move function's waiting method
elif self._while_we_wait:
self._while_we_wait(self.next_turn_boards_unsorted, self.color)
def handle_game_end(self, winner_color: Optional[Color], win_reason: Optional[WinReason], game_history: GameHistory
):
self.logger.info('I %s by %s', "won" if winner_color == self.color else "lost",
win_reason.name if hasattr(win_reason, "name") else win_reason)
self.pool.terminate()
self._end_game()
|
rwgardner/reconchess-strangefish
|
strangefish/strategies/multiprocessing_strategies.py
|
<gh_stars>1-10
import json
import multiprocessing as mp
import random
from collections import defaultdict
from dataclasses import dataclass
from time import sleep, time
from typing import List, Set, Callable, Tuple
import chess.engine
import numpy as np
from reconchess import Square
from tqdm import tqdm
from strangefish.utilities import SEARCH_SPOTS, stockfish, simulate_move, simulate_sense, \
generate_rbc_moves, generate_moves_without_opponent_pieces, force_promotion_to_queen
from strangefish.utilities.rbc_move_score import calculate_score, ScoreConfig
from strangefish.utilities.player_logging import create_sub_logger
SCORE_ROUNDOFF = 1e-5
@dataclass
class MoveConfig:
mean_score_factor: float = 0.9 # relative contribution of a move's average outcome on its compound score
min_score_factor: float = 0.1 # relative contribution of a move's worst outcome on its compound score
max_score_factor: float = 0.0 # relative contribution of a move's best outcome on its compound score
threshold_score_factor: float = 0.01 # fraction below best compound score in which any move will be considered
sense_by_move: bool = True # Use bonus score to encourage board set reduction by attempted moves
force_promotion_queen: bool = True # change all pawn-promotion moves to choose queen, otherwise it's often a knight
@dataclass
class TimeConfig:
turns_to_plan_for: int = 7 # fixed number of turns over which the remaining time will be divided
min_time_for_turn: float = 1.0 # minimum time to allocate for a turn
time_for_sense: float = 0.8 # fraction of turn spent in choose_sense
time_for_move: float = 0.2 # fraction of turn spent in choose_move
calc_time_per_move: float = 0.001 # starting time estimate for move score calculation
# Add a hash method for chess.Board objects so that they can be tested for uniqueness. For our purposes, a unique EPD
# string is adequate; it contains piece positions, castling rights, and en-passant square, but not turn counters.
chess.Board.__hash__ = lambda self: hash(self.epd(en_passant='xfen'))
# Create a cache key for the requested board and move (keyed based on the move that would result from that request)
def make_cache_key(board: chess.Board, move: chess.Move = chess.Move.null(), prev_turn_score: int = None):
move = simulate_move(board, move) or chess.Move.null()
return (board.epd(en_passant="xfen") + ' ' + move.uci() + ' ' +
(str(prev_turn_score) if prev_turn_score is not None else '-'))
# Each "worker" runs a StockFish engine and waits for requested scores
def worker(request_queue, response_queue, score_config, num_threads):
logger = create_sub_logger('stockfish_queue_worker')
engine = stockfish.create_engine()
if num_threads:
engine.configure({'Threads': num_threads})
while True:
if not request_queue.empty():
board, move, prev_turn_score = request_queue.get()
try:
score = calculate_score(board=board, move=move, prev_turn_score=prev_turn_score or 0,
engine=engine, score_config=score_config)
except chess.engine.EngineTerminatedError:
logger.error('Stockfish engine died while analysing (%s).',
make_cache_key(board, move, prev_turn_score))
# If the analysis crashes the engine, something went really wrong. This tends to happen when the
# board is not valid, but that is meant to be filtered in calculate_score. Just in case, do not
# re-query the engine, instead assign the move a conservative score (here: as though into check).
response_queue.put({make_cache_key(board, move, prev_turn_score): score_config.into_check_score})
engine = stockfish.create_engine()
if num_threads:
engine.configure({'Threads': num_threads})
else:
response_queue.put({make_cache_key(board, move, prev_turn_score): score})
else:
sleep(0.001)
def create_strategy(
move_config: MoveConfig = MoveConfig(),
score_config: ScoreConfig = ScoreConfig(),
time_config: TimeConfig = TimeConfig(),
board_weight_90th_percentile: float = 5000,
boards_per_centipawn: int = 30,
num_workers: int = 2,
num_threads: int = None,
checkmate_sense_override: bool = True,
while_we_wait_extension: bool = True,
load_cache_data: bool = False,
rc_disable_pbar: bool = False
)\
-> Tuple[Callable[[Set[str], bool, List[Square], List[chess.Move], float], Square],
Callable[[Set[str], bool, List[chess.Move], float], chess.Move],
Callable[[Set[str], bool], None],
Callable[[None], None]]:
"""
Constructs callable functions corresponding to input configurations for parallelized decision-impact based sensing
and compound score based moving decisions.
Before sensing, all possible moves are scored on each board by Stockfish with a set of heuristics for evaluating
board states unique to RBC, then each move is ranked based on a weighted-average score and on best- and worst-case
scores among possible boards. Move scores are computed for sub-sets of boards corresponding to each possible sense
result, and sensing choices are made to maximize the expected change in move scores before and after the sense
result. Move scores are re-computed based on the observed sense result and the highest-scored move is made.
Additionally, both sense and move strategies have small score incentives to reduce the set of possible boards. When
time does not allow all possible boards to be evaluated, a random sample is taken.
:param move_config: A dataclass of parameters which determine the move strategy's compound score
:param score_config: A dataclass of parameters which determine the centi-pawn score assigned to a board's strength
:param time_config: A dataclass of parameters which determine how time is allocated between turns
:param board_weight_90th_percentile: The centi-pawn score associated with a 0.9 weight in the board set
:param boards_per_centipawn: The scaling factor for combining decision-impact and set-reduction sensing
:param num_workers: The number of StockFish engines to create for scoring moves
:param num_threads: The number of threads for StockFish engine configuration (config skipped if None)
:param checkmate_sense_override: A bool which toggles the corner-case sensing strategy for winning from checkmate
:param while_we_wait_extension: A bool that toggles the scoring of boards that could be reached two turns ahead
:param load_cache_data: A bool that tells whether to "warm up" the cache from a file of pre-calculated scores
:param rc_disable_pbar: A bool which turns off tqdm progress bars if True
:return: A tuple of callable functions (sense, move, ponder, exit)
"""
logger = create_sub_logger('multiprocessing_strategies')
logger.debug('Creating new instance of multiprocessing strategies.')
# Initialize a list to store calculation time data for dynamic time management
score_calc_times = []
# Estimate calculation time based on data stored so far this game (and a provided starting datum)
def calc_time_per_move() -> float:
n0 = 100
t0 = time_config.calc_time_per_move * n0
total_num = n0 + sum(n for n, t in score_calc_times)
total_time = t0 + sum(t for n, t in score_calc_times)
return total_time / total_num
# Determine how much of the remaining time should be spent on (the rest of) the current turn.
def allocate_time(seconds_left: float, fraction_turn_passed: float = 0):
turns_left = time_config.turns_to_plan_for - fraction_turn_passed # account for previous parts of turn
equal_time_split = seconds_left / turns_left
return max(equal_time_split, time_config.min_time_for_turn)
# Convert a board strength score into a probability for use in weighted averages (here using the logistic function)
def weight_board_probability(score):
return 1 / (1 + np.exp(-2 * np.log(3) / board_weight_90th_percentile * score))
# If requested, pre-load the board/move score cache from a file
if load_cache_data:
logger.debug('Loading cached scores from file.')
with open('strangefish/score_cache.json', 'r') as file:
score_data = json.load(file)
score_cache = score_data['cache']
boards_in_cache = set(score_data['boards'])
else:
score_cache = dict()
boards_in_cache = set()
# Create the multiprocessing queues for communication with multiple StockFish engines
request_queue = mp.Queue()
response_queue = mp.Queue()
# Memoized calculation of the score associated with one move on one board
def memo_calc_score(board: chess.Board, move: chess.Move = chess.Move.null(), prev_turn_score: int = None):
key = make_cache_key(board, move, prev_turn_score)
if key in score_cache:
return score_cache[key]
request_queue.put((board, move, prev_turn_score))
return None
# Handler for requested scores. Filters for unique requests, then gets cached or calculated results.
def memo_calc_set(requests):
filtered_requests = {(board, simulate_move(board, move) or chess.Move.null(), prev_turn_score)
for board, move, prev_turn_score in requests}
start = time()
results = {make_cache_key(board, move, prev_turn_score):
memo_calc_score(board, move, prev_turn_score)
for board, move, prev_turn_score in filtered_requests}
num_new = sum(1 for score in results.values() if score is None)
while any(score is None for score in results.values()):
response = response_queue.get()
score_cache.update(response)
results.update(response)
duration = time() - start
if num_new:
score_calc_times.append((num_new, duration))
return results
# Add a new board to the cache (evaluate the board's strength and relative score for every possible move).
def cache_board(board: chess.Board):
board.turn = not board.turn
op_score = memo_calc_set([(board, chess.Move.null(), None)])[make_cache_key(board)]
board.turn = not board.turn
boards_in_cache.add(board.epd(en_passant="xfen"))
memo_calc_set([(board, move, -op_score) for move in generate_moves_without_opponent_pieces(board)])
# Randomly sample from the board set, but also include all of the boards which are already in the cache.
def cache_favored_random_sample(board_set: Set[str], sample_size):
prescored_boards = board_set & boards_in_cache
return list(prescored_boards) + random.sample(board_set - prescored_boards,
min(len(board_set) - len(prescored_boards), sample_size))
# Randomly choose one board from the board set, excluding boards which are already in the cache.
def choose_uncached_board(board_set: Set[str]):
uncached_boards = board_set - boards_in_cache
return random.choice(tuple(uncached_boards)) if uncached_boards else None
# Create and start the requested number of StockFish "worker" processes
workers = [mp.Process(target=worker, args=(request_queue, response_queue, score_config, num_threads)) for _ in range(num_workers)]
for process in workers:
process.start()
def sense_strategy(board_set: Set[str], our_color: bool,
sense_actions: List[Square], moves: List[chess.Move],
seconds_left: float):
"""
Choose a sense square to maximize the expected effect on move scores (to best inform the next move decision).
This strategy randomly samples from the current board set, then weights the likelihood of each board being the
true state by an estimate of the opponent's position's strength. All possible moves are scored on these boards,
and the combinations of scores for each possible sense result (since each sense would validate/invalidate
different boards) are calculated. The sense square is chosen to maximize the change in move scores from before
to after the sense.
Centipawn points are also added per board for an expected board set size reduction by a sense choice. If the
board set size is large enough, this becomes the dominant decision-making influence.
Finally, a corner case is added to pinpoint the opponent's king in cases where we are (nearly) sure that their
king is in check on all possible boards.
"""
# Don't sense if there is nothing to learn from it
if len(board_set) == 1:
return None
# Allocate remaining time and use that to determine the sample_size for this turn
time_for_turn = allocate_time(seconds_left)
time_for_phase = time_for_turn * time_config.time_for_sense
time_per_move = calc_time_per_move()
time_per_board = time_per_move * len(moves)
sample_size = max(num_workers, int(time_for_phase / time_per_board))
logger.debug('In sense phase with %.2f seconds left. Allocating %.2f seconds for this turn and %.2f seconds '
'for this sense step. Estimating %.4f seconds per calc over %d moves is %.4f seconds per '
'board score so we have time for %d boards.',
seconds_left, time_for_turn, time_for_phase, time_per_move,
len(moves), time_per_board, sample_size)
# Initialize some parameters for tracking information about possible sense results
num_occurances = defaultdict(lambda: defaultdict(float))
weighted_probability = defaultdict(lambda: defaultdict(float))
total_weighted_probability = 0
sense_results = defaultdict(lambda: defaultdict(set))
sense_possibilities = defaultdict(set)
king_locations = defaultdict(lambda: defaultdict(set))
# Get a random sampling of boards from the board set
board_sample = cache_favored_random_sample(board_set, sample_size)
# Initialize arrays for board and move data (dictionaries work here, too, but arrays were faster)
board_sample_weights = np.zeros(len(board_sample))
move_scores = np.zeros([len(moves), len(board_sample)])
logger.debug('Sampled %d boards out of %d for sensing.', len(board_sample), len(board_set))
# Get board position strengths before move for all boards in sample (to take advantage of parallel processing)
board_score_reqs = []
for board_epd in board_sample:
board = chess.Board(board_epd)
board.turn = not our_color
board_score_reqs.append((board, chess.Move.null(), None))
board_score_dict = memo_calc_set(board_score_reqs)
for num_board, board_epd in enumerate(tqdm(board_sample, disable=rc_disable_pbar,
desc=f'{chess.COLOR_NAMES[our_color]} '
'Calculating choose_sense scores '
f'{len(moves)} moves in {len(board_set)} boards',
unit='boards')):
board = chess.Board(board_epd)
board.turn = not our_color
op_score = board_score_dict[make_cache_key(board)]
board_sample_weights[num_board] = weight_board_probability(op_score)
total_weighted_probability += board_sample_weights[num_board]
board.turn = our_color
boards_in_cache.add(board.epd(en_passant="xfen")) # Record that this board (and all moves) are in our cache
move_score_dict = memo_calc_set([(board, move, -op_score) for move in moves]) # Score all moves
# Place move scores into array for later logical indexing
for num_move, move in enumerate(moves):
move_scores[num_move, num_board] = move_score_dict[make_cache_key(board, move, -op_score)]
# Gather information about sense results for each square on each board (and king locations)
for square in SEARCH_SPOTS:
sense_result = simulate_sense(board, square)
num_occurances[square][sense_result] += 1
weighted_probability[square][sense_result] += board_sample_weights[num_board]
sense_results[board_epd][square] = sense_result
sense_possibilities[square].add(sense_result)
king_locations[square][sense_result].add(board.king(not our_color))
# Take a different strategy if we are sure they are in checkmate (the usual board weight math fails there)
if checkmate_sense_override and \
all(board_sample_weights == weight_board_probability(score_config.into_check_score)):
logger.debug("All scores indicate checkmate, therefore sensing based on king location.")
num_king_squares = {square: np.mean([len(n) for n in king_locations[square].values()])
for square in SEARCH_SPOTS}
min_num_king_squares = min(num_king_squares.values())
sense_choice = random.choice([square for square, n in num_king_squares.items()
if n == min_num_king_squares])
return sense_choice
# Calculate the mean, min, and max scores for each move across the board set (or at least the random sample)
full_set_mean_scores = (np.average(move_scores, axis=1, weights=board_sample_weights))
full_set_min_scores = (np.min(move_scores, axis=1))
full_set_max_scores = (np.max(move_scores, axis=1))
# Find the expected change in move scores caused by any sense choice
sense_impact = defaultdict(lambda: defaultdict(float))
for square in tqdm(SEARCH_SPOTS, disable=rc_disable_pbar,
desc=f'{chess.COLOR_NAMES[our_color]} Evaluating sense impacts '
f'for {len(board_set)} boards', unit='squares'):
possible_results = sense_possibilities[square]
for sense_result in possible_results:
if len(possible_results) > 1:
subset_index = [i for i, board_epd in enumerate(board_sample)
if sense_result == sense_results[board_epd][square]]
subset_move_scores = move_scores[:, subset_index]
subset_board_weights = board_sample_weights[subset_index]
# Calculate the mean, min, and max scores for each move across the board sub-set
sub_set_mean_scores = (np.average(subset_move_scores, axis=1, weights=subset_board_weights))
sub_set_min_scores = (np.min(subset_move_scores, axis=1))
sub_set_max_scores = (np.max(subset_move_scores, axis=1))
# Subtract the full set scores from the sub-set scores (and take the absolute value)
change_in_mean_scores = np.abs(sub_set_mean_scores - full_set_mean_scores)
change_in_min_scores = np.abs(sub_set_min_scores - full_set_min_scores)
change_in_max_scores = np.abs(sub_set_max_scores - full_set_max_scores)
# Combine the mean, min, and max changes in scores based on the config settings
change_in_compound_score = (
change_in_mean_scores * move_config.mean_score_factor +
change_in_min_scores * move_config.min_score_factor +
change_in_max_scores * move_config.max_score_factor
)
# The impact of this sense result is the resulting average change in absolute value of move scores
sense_impact[square][sense_result] = float(np.mean(change_in_compound_score))
else:
sense_impact[square][sense_result] = 0
# Calculate the expected mean change in centipawn score for each sense square
mean_sense_impact = {
square:
sum([sense_impact[square][sense_result] * weighted_probability[square][sense_result]
for sense_result in sense_possibilities[square]]) / total_weighted_probability
for square in SEARCH_SPOTS
}
# Also calculate the expected board set reduction for each sense square (scale from board sample to full set)
expected_set_reduction = {
square:
len(board_set) *
(1 - (1 / len(board_sample) / total_weighted_probability) *
sum([num_occurances[square][sense_result] * weighted_probability[square][sense_result]
for sense_result in sense_possibilities[square]]))
for square in SEARCH_SPOTS
}
# Combine the decision-impact and set-reduction estimates
sense_score = {square:
mean_sense_impact[square] + (expected_set_reduction[square] / boards_per_centipawn)
for square in SEARCH_SPOTS}
max_sense_score = max(sense_score.values())
sense_choice = random.choice([square for square, score in sense_score.items()
if abs(score - max_sense_score) < SCORE_ROUNDOFF])
return sense_choice
def move_strategy(board_set: Set[str], our_color: bool,
moves: List[chess.Move],
seconds_left: float):
"""
Choose the move with the maximum score calculated from a combination of mean, min, and max possibilities.
This strategy randomly samples from the current board set, then weights the likelihood of each board being the
true state by an estimate of the opponent's position's strength. Each move is scored on each board, and the
resulting scores are assessed together by looking at the worst-case score, the average score, and the best-case
score. The relative contributions of these components to the compound score are determined by a config object.
If requested by the config, bonus points are awarded to moves based on the expected number of boards removed
from the possible set by attempting that move. Deterministic move patterns are reduced by randomly choosing a
move that is within a few percent of the maximum score.
"""
# Allocate remaining time and use that to determine the sample_size for this turn
time_for_turn = allocate_time(seconds_left, fraction_turn_passed=1-time_config.time_for_move)
time_for_phase = time_for_turn * time_config.time_for_move
time_per_move = calc_time_per_move()
time_per_board = time_per_move * len(moves)
sample_size = max(1, int(time_for_phase / time_per_board))
logger.debug('In move phase with %.2f seconds left. Allowing up to %.2f seconds for this move step. '
'Estimating %.4f seconds per calc over %d moves is %.4f seconds per '
'board score so we have time for %d boards.',
seconds_left, time_for_turn, time_per_move,
len(moves), time_per_board, sample_size)
move_scores = defaultdict(list)
weighted_sum_move_scores = defaultdict(float)
# Initialize some parameters for tracking information about possible move results
num_occurances = defaultdict(lambda: defaultdict(int))
weighted_probability = defaultdict(lambda: defaultdict(float))
move_possibilities = defaultdict(set)
total_weighted_probability = 0
# Get a random sampling of boards from the board set
board_sample = cache_favored_random_sample(board_set, sample_size)
logger.debug('Sampled %d boards out of %d for moving.', len(board_sample), len(board_set))
# Get board position strengths before move for all boards in sample (to take advantage of parallel processing)
board_score_reqs = []
for board_epd in board_sample:
board = chess.Board(board_epd)
board.turn = not our_color
board_score_reqs.append((board, chess.Move.null(), None))
board_score_dict = memo_calc_set(board_score_reqs)
for board_epd in tqdm(board_sample, disable=rc_disable_pbar,
desc=f'{chess.COLOR_NAMES[our_color]} Calculating choose_move scores '
f'{len(moves)} moves in {len(board_set)} boards', unit='boards'):
board = chess.Board(board_epd)
board.turn = not our_color
op_score = board_score_dict[make_cache_key(board)]
board_weight = weight_board_probability(op_score)
total_weighted_probability += board_weight
board.turn = our_color
boards_in_cache.add(board.epd(en_passant="xfen")) # Record that this board (and all moves) are in our cache
move_score_dict = memo_calc_set([(board, move, -op_score) for move in moves]) # Score all moves
# Gather scores and information about move results for each requested move on each board
for move in moves:
score = move_score_dict[make_cache_key(board, move, -op_score)]
move_scores[move].append(score)
weighted_sum_move_scores[move] += score * board_weight
sim_move = simulate_move(board, move) or chess.Move.null()
move_result = (sim_move, board.is_capture(sim_move))
move_possibilities[move].add(move_result)
num_occurances[move][move_result] += 1
weighted_probability[move][move_result] += board_weight
# Combine the mean, min, and max possible scores based on config settings
compound_score = {move: (
weighted_sum_move_scores[move] / total_weighted_probability * move_config.mean_score_factor +
min(scores) * move_config.min_score_factor +
max(scores) * move_config.max_score_factor
) for (move, scores) in move_scores.items()}
# Add centipawn points to a move based on an estimate of the board set reduction caused by that move
if move_config.sense_by_move:
compound_score = {move: score + 1 / boards_per_centipawn * len(board_set) *
(1 - (1 / len(board_sample) / total_weighted_probability) *
sum([num_occurances[move][move_result] * weighted_probability[move][move_result]
for move_result in move_possibilities[move]]))
for move, score in compound_score.items()}
# Determine the minimum score a move needs to be considered
highest_score = max(compound_score.values())
threshold_score = highest_score - abs(highest_score) * move_config.threshold_score_factor
# Create a list of all moves which scored above the threshold
move_options = [move for move, score in compound_score.items() if score >= threshold_score]
# Eliminate move options which we know to be illegal (mainly for replay clarity)
move_options = [move for move in move_options
if move in {taken_move for taken_move, _ in move_possibilities[move]}]
# Randomly choose one of the remaining moves
move_choice = random.choice(move_options)
return force_promotion_to_queen(move_choice) if move_config.force_promotion_queen else move_choice
def while_we_wait(board_set: Set[str], our_color: bool):
"""
Calculate scores for moves on next turn's boards. Store to cache for later processing acceleration.
"""
if board_set:
uncached_board_epd = choose_uncached_board(board_set)
# If there are still boards for next turn without scores calculated, calculate move scores for one
if uncached_board_epd:
board = chess.Board(uncached_board_epd)
cache_board(board)
# Otherwise, calculate move scores for a random board that could be reached in two turns
elif while_we_wait_extension:
board = chess.Board(random.choice(tuple(board_set)))
board.push(random.choice(list(generate_rbc_moves(board))))
board.push(random.choice(list(generate_rbc_moves(board))))
if board.king(chess.WHITE) is not None and board.king(chess.BLACK) is not None:
cache_board(board)
else:
sleep(0.001)
def end_game():
"""
Quit the StockFish engine instance(s) associated with this strategy once the game is over.
"""
logger.debug("During this game, averaged %.5f seconds per score using search depth %d and %d workers.",
calc_time_per_move(), score_config.search_depth, num_workers)
# Shut down the StockFish "workers"
[process.terminate() for process in workers]
[process.join() for process in workers]
# Return the callable functions so they can be used by StrangeFish
return sense_strategy, move_strategy, while_we_wait, end_game
|
rwgardner/reconchess-strangefish
|
strangefish/utilities/modified_rc_connect.py
|
<reponame>rwgardner/reconchess-strangefish<gh_stars>1-10
import multiprocessing
import signal
import sys
import time
import traceback
import yaml
import click
import requests
import chess
from reconchess import Player, RemoteGame, play_turn, ChessJSONDecoder
from reconchess.scripts.rc_connect import RBCServer, ranked_mode, unranked_mode, check_package_version
from strangefish import StrangeFish
from strangefish.strategies import multiprocessing_strategies
from strangefish.strategies.multiprocessing_strategies import MoveConfig, TimeConfig, ScoreConfig
from strangefish.utilities import ignore_one_term
from strangefish.utilities.player_logging import create_main_logger, create_sub_logger
class OurRemoteGame(RemoteGame):
def __init__(self, *args, **kwargs):
self.logger = create_sub_logger('game_communications')
super().__init__(*args, **kwargs)
def is_op_turn(self):
status = self._get('game_status')
return not status['is_over'] and not status['is_my_turn']
def _get(self, endpoint, decoder_cls=ChessJSONDecoder):
self.logger.debug(f"Getting '{endpoint}'")
return super()._get(endpoint, decoder_cls)
def _post(self, endpoint, obj):
self.logger.debug(f"Posting '{endpoint}' -> {obj}")
return super()._post(endpoint, obj)
def our_play_remote_game(server_url, game_id, auth, player: Player):
game = OurRemoteGame(server_url, game_id, auth)
logger = create_sub_logger('game_moderator')
op_name = game.get_opponent_name()
our_color = game.get_player_color()
logger.debug("Setting up remote game %d playing %s against %s.",
game_id, chess.COLOR_NAMES[our_color], op_name)
player.handle_game_start(our_color, game.get_starting_board(), op_name)
game.start()
turn_num = 0
while not game.is_over():
turn_num += 1
logger.info("Playing turn %2d. (%3.0f seconds left.)", turn_num, game.get_seconds_left())
play_turn(game, player, end_turn_last=False)
logger.info(" Done turn %2d.", turn_num)
if hasattr(player, 'while_we_wait') and getattr(player, 'while_we_wait'):
while game.is_op_turn():
player.while_we_wait()
winner_color = game.get_winner_color()
win_reason = game.get_win_reason()
game_history = game.get_game_history()
logger.debug("Ending remote game %d against %s.", game_id, op_name)
player.handle_game_end(winner_color, win_reason, game_history)
return winner_color, win_reason, game_history
def accept_invitation_and_play(server_url, auth, invitation_id, finished):
# make sure this process doesn't react to the first interrupt signal
signal.signal(signal.SIGINT, ignore_one_term)
player = get_player_from_config()
logger = create_sub_logger('invitations')
logger.debug('Accepting invitation %d.', invitation_id)
server = RBCServer(server_url, auth)
game_id = server.accept_invitation(invitation_id)
logger.info('Invitation %d accepted. Playing game %d.', invitation_id, game_id)
try:
our_play_remote_game(server_url, game_id, auth, player)
logger.debug('Finished game %d.', game_id)
except:
logger.error('Fatal error in game %d.', game_id)
traceback.print_exc()
server.error_resign(game_id)
player.handle_game_end(None, None, None)
logger.critical('Game %d closed on account of error.', game_id)
finally:
server.finish_invitation(invitation_id)
finished.value = True
logger.debug('Game %d ended. Invitation %d closed.', game_id, invitation_id)
def listen_for_invitations(server, max_concurrent_games):
logger = create_sub_logger('server_manager')
connected = False
process_by_invitation = {}
finished_by_invitation = {}
while True:
try:
# get unaccepted invitations
invitations = server.get_invitations()
# set max games on server if this is the first successful connection after being disconnected
if not connected:
logger.info('Connected successfully to server!')
connected = True
server.set_max_games(max_concurrent_games)
# filter out finished processes
finished_invitations = []
for invitation in process_by_invitation.keys():
if not process_by_invitation[invitation].is_alive() or finished_by_invitation[invitation].value:
finished_invitations.append(invitation)
for invitation in finished_invitations:
logger.info(f'Terminating process for invitation {invitation}')
process_by_invitation[invitation].terminate()
del process_by_invitation[invitation]
del finished_by_invitation[invitation]
# accept invitations until we have #max_concurrent_games processes alive
for invitation in invitations:
# only accept the invitation if we have room and the invite doesn't have a process already
if invitation not in process_by_invitation:
logger.debug(f'Received invitation {invitation}.')
if len(process_by_invitation) < max_concurrent_games:
# start the process for playing a game
finished = multiprocessing.Value('b', False)
process = multiprocessing.Process(
target=accept_invitation_and_play,
args=(server.server_url, server.session.auth, invitation, finished))
process.start()
# store the process so we can check when it finishes
process_by_invitation[invitation] = process
finished_by_invitation[invitation] = finished
else:
logger.info(f'Not enough game slots to play invitation {invitation}.')
unranked_mode(server)
max_concurrent_games += 1
except requests.RequestException as e:
connected = False
logger.exception('Failed to connect to server')
print(e)
except Exception:
logger.exception("Error in invitation processing: ")
traceback.print_exc()
time.sleep(5)
def get_player_from_config():
create_main_logger(log_to_file=True)
logger = create_sub_logger('config_loading')
file_loaded = False
while not file_loaded:
logger.debug("Loading config.yaml for player settings.")
try:
with open('config.yml') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
player = player_configuration(config)
file_loaded = True
except Exception:
logger.exception("Something went wrong loading config.yaml. Attempting to load backup_config.yaml next.")
try:
with open('backup_config.yml') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
player = player_configuration(config)
file_loaded = True
except Exception:
logger.exception("Also failed to load backup_config.yaml. Repeating loading loop.")
time.sleep(0.1)
return player
def player_configuration(config):
logger = create_sub_logger('config_loading')
strategy_config = config.pop('multiprocessing_strategies')
score_config = ScoreConfig(**strategy_config.pop('score_config'))
move_config = MoveConfig(**strategy_config.pop('move_config'))
time_config = TimeConfig(**strategy_config.pop('time_config'))
strategy = multiprocessing_strategies.create_strategy(
**strategy_config,
score_config=score_config,
move_config=move_config,
time_config=time_config,
)
strangefish = StrangeFish(*strategy, **config)
logger.debug(
"Created a StrangeFish player using multiprocessing_strategies with the following configuration: "
f"score_config = {score_config}, "
f"move_config = {move_config}, "
f"time_config = {time_config}, "
f"other strategy arguments = {strategy_config}, "
f"other player arguments = {config}, "
)
return strangefish
@click.command()
@click.argument('username')
@click.argument('password')
@click.option('--server-url', 'server_url', default='https://rbc.jhuapl.edu', help='URL of the server.')
@click.option('--max-concurrent-games', 'max_concurrent_games', type=int, default=1, help='Maximum games to play at once.')
@click.option('--ranked', 'ranked', type=bool, default=False, help='Play for leaderboard ELO.')
@click.option('--keep-version', 'keep_version', type=bool, default=True, help='Keep existing leaderboard version num.')
def main(username, password, server_url, max_concurrent_games, ranked, keep_version):
create_main_logger(log_to_file=True)
logger = create_sub_logger('modified_rc_connect')
auth = username, password
server = RBCServer(server_url, auth)
# verify we have the correct version of reconchess package
check_package_version(server)
def handle_term(signum, frame):
# reset to default response to interrupt signals
signal.signal(signal.SIGINT, signal.SIG_DFL)
logger.warning('Received terminate signal, waiting for games to finish and then exiting.')
unranked_mode(server)
sys.exit(0)
signal.signal(signal.SIGINT, handle_term)
# tell the server whether we want to do ranked matches or not
if ranked:
ranked_mode(server, keep_version)
else:
unranked_mode(server)
listen_for_invitations(server, max_concurrent_games)
if __name__ == '__main__':
main()
|
rwgardner/reconchess-strangefish
|
strangefish/strategies/auxiliary_strategies.py
|
import random
def _choose_randomly(_, __, choices, *args, **kwargs):
return random.choice(choices)
def _do_nothing(*args, **kwargs):
pass
def random_strategy():
choose_sense = _choose_randomly
choose_move = _choose_randomly
while_we_wait = _do_nothing
end_game = _do_nothing
return choose_sense, choose_move, while_we_wait, end_game
def idle_strategy():
choose_sense = _do_nothing
choose_move = _do_nothing
while_we_wait = _do_nothing
end_game = _do_nothing
return choose_sense, choose_move, while_we_wait, end_game
def contingency_strategy():
choose_sense = _do_nothing
choose_move = _choose_randomly
return choose_sense, choose_move
|
rwgardner/reconchess-strangefish
|
strangefish/utilities/player_logging.py
|
<filename>strangefish/utilities/player_logging.py<gh_stars>1-10
import logging
import sys
import os
TAGS = [
'\N{four leaf clover}',
'\N{skull}',
'\N{bacon}',
'\N{spouting whale}',
'\N{fire}',
'\N{eagle}',
'\N{smiling face with sunglasses}',
'\N{beer mug}',
'\N{rocket}',
'\N{snake}',
'\N{butterfly}',
'\N{jack-o-lantern}',
'\N{white medium star}',
'\N{hot beverage}',
'\N{earth globe americas}',
'\N{red apple}',
'\N{robot face}',
'\N{sunflower}',
'\N{doughnut}',
'\N{crab}',
'\N{soccer ball}',
'\N{hibiscus}',
]
concise_format = '%(process)-5d %(asctime)8s {} %(message)s'
verbose_format = logging.Formatter('%(name)s - %(process)d - %(asctime)s - '
'%(levelname)s from %(module)s.%(funcName)s, line %(lineno)d: '
'"%(message)s"')
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setLevel(logging.INFO)
LOG_DIR = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'gameLogs')
if not os.path.exists(LOG_DIR):
os.mkdir(LOG_DIR)
def create_main_logger(log_to_file: bool = False) -> logging.Logger:
logger = logging.getLogger(name=f"strangefish.{os.getpid()}")
if not logger.handlers:
logger.setLevel(logging.DEBUG)
stdout_handler.setFormatter(logging.Formatter(concise_format.format(TAGS[os.getpid() % len(TAGS)]), "%I:%M:%S"))
logger.addHandler(stdout_handler)
if log_to_file and len(logger.handlers) < 2:
file_handler = logging.FileHandler(os.path.join(LOG_DIR, f'StrangeFish.log'))
file_handler.setFormatter(verbose_format)
file_handler.setLevel(logging.DEBUG)
logger.addHandler(file_handler)
return logger
def create_sub_logger(name: str) -> logging.Logger:
logger = logging.getLogger(name=f"strangefish.{os.getpid()}.{name}")
logger.setLevel(logging.DEBUG)
if not logger.handlers:
logger.addHandler(logging.NullHandler())
return logger
|
rwgardner/reconchess-strangefish
|
strangefish/utilities/rbc_move_score.py
|
<reponame>rwgardner/reconchess-strangefish<filename>strangefish/utilities/rbc_move_score.py<gh_stars>1-10
from dataclasses import dataclass
import chess.engine
from reconchess import is_psuedo_legal_castle
from reconchess.utilities import capture_square_of_move
from strangefish.utilities import simulate_move
@dataclass
class ScoreConfig:
capture_king_score: float = 50_000 # bonus points for a winning move
checkmate_score: int = 30_000 # point value of checkmate
into_check_score: float = -40_000 # point penalty for moving into check
search_depth: int = 8 # Stockfish engine search ply
reward_attacker: float = 300 # Bonus points if move sets up attack on enemy king
require_sneak: bool = True # Only reward bonus points to aggressive moves if they are sneaky (aren't captures)
def calculate_score(engine: chess.engine.SimpleEngine,
board, move=chess.Move.null(),
prev_turn_score=0,
score_config: ScoreConfig = ScoreConfig()):
pov = board.turn
if move != chess.Move.null() and not is_psuedo_legal_castle(board, move):
if not board.is_pseudo_legal(move):
# check for sliding move alternate results, and score accordingly
revised_move = simulate_move(board, move)
if revised_move is not None:
return calculate_score(engine, board, revised_move, prev_turn_score, score_config)
return calculate_score(engine, board, chess.Move.null(), prev_turn_score, score_config)
if board.is_capture(move):
if board.piece_at(capture_square_of_move(board, move)).piece_type is chess.KING:
return score_config.capture_king_score
next_board = board.copy()
next_board.push(move)
next_board.clear_stack()
if next_board.was_into_check():
return score_config.into_check_score
engine_result = engine.analyse(next_board, chess.engine.Limit(depth=score_config.search_depth))
score = engine_result['score'].pov(pov).score(mate_score=score_config.checkmate_score)
# Add bonus board position score if king is attacked
king_attackers = next_board.attackers(pov, next_board.king(not pov)) # list of pieces that can reach the enemy king
if king_attackers: # if there are any such pieces...
if not score_config.require_sneak: # and we don't require the attackers to be sneaky
score += score_config.reward_attacker # add the bonus points
# or if we do require the attackers to be sneaky, either the last move was not a capture (which would give away
# our position) or there are now attackers other than the piece that moves (discovered check)
elif not next_board.is_capture(move) or any([square != move.to_square for square in king_attackers]):
score += score_config.reward_attacker # add the bonus points
score -= prev_turn_score
return score
|
rwgardner/reconchess-strangefish
|
strangefish/utilities/timing.py
|
from time import time
class Timer:
def __init__(self, log_func, message: str):
self.log_func = log_func
self.message = message
self.start = None
def __enter__(self):
self.start = time()
# self.log_func('Starting ' + self.message)
def __exit__(self, exc_type, exc_val, exc_tb):
duration = time() - self.start
self.log_func('Finished ' + self.message + f' in {duration:,.4g} seconds.')
|
rwgardner/reconchess-strangefish
|
strangefish/__init__.py
|
<filename>strangefish/__init__.py
from .strangefish import StrangeFish
|
rwgardner/reconchess-strangefish
|
strangefish/utilities/stockfish.py
|
<filename>strangefish/utilities/stockfish.py
import os
import chess.engine
# make sure stockfish environment variable exists
if "STOCKFISH_EXECUTABLE" not in os.environ:
raise KeyError('This bot requires an environment variable called "STOCKFISH_EXECUTABLE"'
' pointing to the Stockfish executable')
# make sure there is actually a file
STOCKFISH_EXECUTABLE = os.getenv('STOCKFISH_EXECUTABLE')
if not os.path.exists(STOCKFISH_EXECUTABLE):
raise ValueError('No stockfish executable found at "{}"'.format(STOCKFISH_EXECUTABLE))
def create_engine():
engine = chess.engine.SimpleEngine.popen_uci(STOCKFISH_EXECUTABLE)
# engine.configure({'Threads': os.cpu_count()})
return engine
|
rwgardner/reconchess-strangefish
|
scripts/example.py
|
<filename>scripts/example.py
import traceback
from datetime import datetime
import chess
from reconchess import LocalGame, play_local_game
from reconchess.bots.trout_bot import TroutBot
from strangefish import StrangeFish
from strangefish.strategies import multiprocessing_strategies
def main():
white_bot_name, black_bot_name = 'TroutBot', 'StrangeFish'
game = LocalGame()
try:
winner_color, win_reason, history = play_local_game(
TroutBot(),
StrangeFish(*multiprocessing_strategies.create_strategy()),
game=game
)
winner = 'Draw' if winner_color is None else chess.COLOR_NAMES[winner_color]
except:
traceback.print_exc()
game.end()
winner = 'ERROR'
history = game.get_game_history()
print('Game Over!')
print('Winner: {}!'.format(winner))
timestamp = datetime.now().strftime('%Y_%m_%d-%H_%M_%S')
replay_path = '{}-{}-{}-{}.json'.format(white_bot_name, black_bot_name, winner, timestamp)
print('Saving replay to {}...'.format(replay_path))
history.save(replay_path)
if __name__ == '__main__':
main()
|
rwgardner/reconchess-strangefish
|
strangefish/utilities/__init__.py
|
from functools import partial
from typing import Iterable, Optional, Set
from collections import defaultdict
from tqdm import tqdm
import signal
import chess
from reconchess.utilities import without_opponent_pieces, is_illegal_castle, is_psuedo_legal_castle, slide_move, \
moves_without_opponent_pieces, pawn_capture_moves_on, capture_square_of_move
# These are the possible squares to search–all squares that aren't on the edge of the board.
SEARCH_SPOTS = [
9, 10, 11, 12, 13, 14,
17, 18, 19, 20, 21, 22,
25, 26, 27, 28, 29, 30,
33, 34, 35, 36, 37, 38,
41, 42, 43, 44, 45, 46,
49, 50, 51, 52, 53, 54,
]
SEARCH_OFFSETS = [-9, -8, -7, -1, 0, 1, 7, 8, 9]
# Generate all RBC-legal moves for a board
def generate_rbc_moves(board: chess.Board) -> Iterable[chess.Move]:
for move in board.pseudo_legal_moves:
yield move
for move in without_opponent_pieces(board).generate_castling_moves():
if not is_illegal_castle(board, move):
yield move
yield chess.Move.null()
# Generate all possible moves from just our own pieces
def generate_moves_without_opponent_pieces(board: chess.Board) -> Iterable[chess.Move]:
for move in moves_without_opponent_pieces(board):
yield move
for move in pawn_capture_moves_on(board):
yield move
yield chess.Move.null()
# Produce a sense result from a hypothetical true board and a sense square
def simulate_sense(board, square): # copied (with modifications) from LocalGame
if square is None:
# don't sense anything
sense_result = []
else:
if square not in list(chess.SQUARES):
raise ValueError('LocalGame::sense({}): {} is not a valid square.'.format(square, square))
rank, file = chess.square_rank(square), chess.square_file(square)
sense_result = []
for delta_rank in [1, 0, -1]:
for delta_file in [-1, 0, 1]:
if 0 <= rank + delta_rank <= 7 and 0 <= file + delta_file <= 7:
sense_square = chess.square(file + delta_file, rank + delta_rank)
sense_result.append((sense_square, board.piece_at(sense_square)))
return tuple(sense_result)
# test an attempted move on a board to see what move is actually taken
def simulate_move(board, move):
if move == chess.Move.null():
return None
# if its a legal move, don't change it at all (generate_pseudo_legal_moves does not include pseudo legal castles)
if move in board.generate_pseudo_legal_moves() or is_psuedo_legal_castle(board, move):
return move
if is_illegal_castle(board, move):
return None
# if the piece is a sliding piece, slide it as far as it can go
piece = board.piece_at(move.from_square)
if piece.piece_type in [chess.PAWN, chess.ROOK, chess.BISHOP, chess.QUEEN]:
move = slide_move(board, move)
return move if move in board.generate_pseudo_legal_moves() else None
# check if a taken move would have happened on a board
def validate_move_on_board(epd, requested_move: Optional[chess.Move], taken_move: Optional[chess.Move],
captured_opponent_piece: bool, capture_square: Optional[chess.Square]) -> bool:
board = chess.Board(epd)
# if the taken move was a capture...
if captured_opponent_piece:
# the board is invalid if the capture would not have happened
if not board.is_capture(taken_move):
return False
# the board is invalid if the captured piece would have been the king
# (wrong if it really was the king, but then the game is over)
captured_piece = board.piece_at(capture_square)
if captured_piece and captured_piece.piece_type == chess.KING:
return False
# if the taken move was not a capture...
elif taken_move != chess.Move.null():
# the board is invalid if a capture would have happened
if board.is_capture(taken_move):
return False
# invalid if the requested move would have not resulted in the taken move
if (simulate_move(board, requested_move) or chess.Move.null()) != taken_move:
return False
# otherwise the board is still valid
return True
# Expand one turn's boards into next turn's set by all possible moves. Store as dictionary keyed by capture square.
def populate_next_board_set(board_set: Set[str], my_color, pool=None, rc_disable_pbar: bool = False):
next_turn_boards = defaultdict(set)
iter_boards = tqdm(board_set, disable=rc_disable_pbar, unit='boards',
desc=f'{chess.COLOR_NAMES[my_color]} Expanding {len(board_set)} boards into new set')
all_pairs = (pool.imap_unordered(partial(get_next_boards_and_capture_squares, my_color), iter_boards)
if pool else map(partial(get_next_boards_and_capture_squares, my_color), iter_boards))
for pairs in all_pairs:
for capture_square, next_epd in pairs:
next_turn_boards[capture_square].add(next_epd)
return next_turn_boards
# Check if a board could have produced these sense results
def board_matches_sense(board_epd, sense_result):
board = chess.Board(board_epd)
for square, piece in sense_result:
if board.piece_at(square) != piece:
return None
return board_epd
# Check if a requested move - taken move pair would have been produced on this board
def move_would_happen_on_board(requested_move, taken_move, captured_opponent_piece, capture_square, board_epd):
if validate_move_on_board(board_epd, requested_move, taken_move, captured_opponent_piece, capture_square):
return push_move_to_epd(board_epd, taken_move)
# Change an EPD string to reflect a move
def push_move_to_epd(epd, move):
board = chess.Board(epd)
board.push(move)
return board.epd(en_passant='xfen')
# Generate tuples of next turn's boards and capture squares for one current board
def get_next_boards_and_capture_squares(my_color, board_epd):
board = chess.Board(board_epd)
# Calculate all possible opponent moves from this board state
board.turn = not my_color
pairs = []
for move in generate_rbc_moves(board):
next_board = board.copy()
next_board.push(move)
capture_square = capture_square_of_move(board, move)
next_epd = next_board.epd(en_passant='xfen')
pairs.append((capture_square, next_epd))
return pairs
# Change any promotion moves to choose queen
def force_promotion_to_queen(move: chess.Move):
return move if len(move.uci()) == 4 else chess.Move.from_uci(move.uci()[:4] + 'q')
def ignore_one_term(signum, frame): # Let a sub-process survive the first ctrl-c call for graceful game exiting
# reset to default response to interrupt signals
signal.signal(signal.SIGTERM, signal.SIG_DFL)
signal.signal(signal.SIGINT, signal.SIG_DFL)
|
rwgardner/reconchess-strangefish
|
strangefish/defaults.py
|
<gh_stars>1-10
import random
def _choose_randomly(_, __, choices, *args, **kwargs):
return random.choice(choices)
def _do_nothing(*args, **kwargs):
pass
choose_move = _choose_randomly
choose_sense = _choose_randomly
while_we_wait = _do_nothing
end_game = _do_nothing
|
rwgardner/reconchess-strangefish
|
scripts/cache_warmup.py
|
<gh_stars>1-10
import json
from typing import Set
from tqdm import tqdm
import chess.engine
from reconchess.utilities import moves_without_opponent_pieces, pawn_capture_moves_on
from strangefish.utilities import stockfish, populate_next_board_set, simulate_move
from strangefish.utilities.rbc_move_score import calculate_score, ScoreConfig
score_config: ScoreConfig = ScoreConfig(search_depth=16)
engine = stockfish.create_engine()
score_cache = dict()
boards_in_cache = set()
# Create a cache key for the requested board and move (keyed based on the move that would result from that request)
def make_cache_key(board: chess.Board, move: chess.Move = chess.Move.null(), prev_turn_score: int = None):
move = simulate_move(board, move) or chess.Move.null()
return (board.epd(en_passant="xfen") + ' ' + move.uci() + ' ' +
(str(prev_turn_score) if prev_turn_score is not None else '-'))
# Memoized calculation of the score associated with one move on one board
def memo_calc_score(board: chess.Board, move: chess.Move = chess.Move.null(), prev_turn_score=None):
key = make_cache_key(board, move, prev_turn_score)
if key in score_cache:
return score_cache[key]
result = calculate_score(board=board, move=move, prev_turn_score=prev_turn_score or 0,
score_config=score_config, engine=engine)
score_cache[key] = result
return result
def main():
# Initialize the board set as the opening position
board_set: Set[str] = set()
board_set.add(chess.Board().epd(en_passant="xfen"))
# Find all boards that can be reached in num_half_turns moves
num_half_turns = 1
color_to_play = chess.WHITE
for i in range(num_half_turns):
next_turn_boards = populate_next_board_set(board_set, not color_to_play)
board_set |= set().union(*next_turn_boards.values())
color_to_play = not color_to_play
# Calculate and cache scores for all moves on all boards in the set
for board_epd in tqdm(board_set, desc=f'Scoring moves from {len(board_set)} early-game board states', unit='boards'):
board = chess.Board(board_epd)
if board.king(chess.WHITE) and board.king(chess.BLACK):
board.turn = not board.turn
score = memo_calc_score(board=board)
board.turn = not board.turn
boards_in_cache.add(board.epd(en_passant="xfen"))
for move in moves_without_opponent_pieces(board) + pawn_capture_moves_on(board) + [chess.Move.null()]:
memo_calc_score(board=board, move=move, prev_turn_score=-score)
# Store the cache as a json file
with open('strangefish/score_cache.json', 'w') as file:
json.dump({
'cache': score_cache,
'boards': list(boards_in_cache)
}, file)
# Shut down Stockfish
try:
engine.quit()
except chess.engine.EngineTerminatedError:
pass
if __name__ == '__main__':
main()
|
moshe742/weight
|
weight/db/queries.py
|
<gh_stars>1-10
from datetime import datetime
from sqlalchemy import desc
from weight.db import SessionScope
from weight.db.models import WeightData
def get_weight_data(session, id):
return session.query(WeightData).filter_by(id=id).first()
def get_weights_data(session):
return session.query(WeightData).order_by(desc(WeightData.date)).all()
def add_weight_data(data, session):
date = datetime.now()
if data['date']:
date = data['date']
weight_data = WeightData(date=date,
old_weight=data['old_weight'],
new_weight=data['new_weight'])
session.add(weight_data)
return weight_data
def edit_weight_data(data, session, id):
weight_to_edit = session.query(WeightData).filter_by(id=id).first()
for key, val in data.items():
setattr(weight_to_edit, key, val)
return weight_to_edit
def delete_weight_data(data, session):
weight_to_delete = session.query(WeightData).filter_by(id=data['id']).first()
session.delete(weight_to_delete)
return weight_to_delete
|
moshe742/weight
|
weight/home.py
|
<reponame>moshe742/weight
import logging
from datetime import date
from flask import request, render_template, jsonify, redirect, url_for
from weight import app
from weight.db import SessionScope
from weight.forms import WeightForm
from weight.db.queries import (
add_weight_data,
edit_weight_data,
delete_weight_data,
get_weights_data,
get_weight_data,
)
logging.basicConfig(filename='weight.log', level=logging.DEBUG)
logger = logging.getLogger(__name__)
@app.route('/', methods=['GET'])
def get_weight():
logger.info('start get weight')
with SessionScope() as session:
weights = get_weights_data(session)
dates = []
weight_data_old = []
weight_data_new = []
for item in weights:
dates.append(item.date.strftime('%Y-%m-%d'))
weight_data_old.append(item.old_weight)
weight_data_new.append(item.new_weight)
logger.info('return render template')
return render_template('weight/index.html', weights=weights, weight_data_old=weight_data_old,
dates=dates, weight_data_new=weight_data_new)
@app.route('/get_weight_data')
def get_data_weight():
logger.info('start get weight data')
with SessionScope() as session:
weights = get_weights_data(session)
dates = []
weight_data_old = []
weight_data_new = []
for item in weights:
dates.append(item.date.isoformat())
weight_data_old.append(item.old_weight)
weight_data_new.append(item.new_weight)
weight_data_old = {
'x': dates,
'y': weight_data_old,
'mode': 'lines+markers',
'name': 'old weight',
}
weight_data_new = {
'x': dates,
'y': weight_data_new,
'mode': 'lines+markers',
'name': 'new weight',
}
logger.info('return jsonify')
return jsonify({
'old_weight': weight_data_old,
'new_weight': weight_data_new,
})
@app.route('/add_weight', methods=['GET', 'POST'])
def add_weight():
logger.info('start add weight')
form = WeightForm()
if form.validate_on_submit():
with SessionScope() as session:
add_weight_data(request.form, session)
return redirect(url_for('get_weight', _external=True))
else:
logger.info('in else')
form.date.data = date.today()
logger.info('return render template')
return render_template('weight/add_weight.html', form=form, change='add')
@app.route('/edit_weight/<id>', methods=['GET', 'POST'])
def edit_weight(id):
form = WeightForm()
if form.validate_on_submit():
with SessionScope() as session:
edit_weight_data(request.form, session, id)
return redirect(url_for('get_weight', _external=True))
else:
with SessionScope() as session:
weight_data = get_weight_data(session, id)
form.date.data = weight_data.date
form.old_weight.data = weight_data.old_weight
form.new_weight.data = weight_data.new_weight
return render_template('weight/add_weight.html', form=form, change='edit')
|
moshe742/weight
|
weight/db/models.py
|
<filename>weight/db/models.py
from weight.db import Base
from sqlalchemy import Column, Integer, Date, Float
class WeightData(Base):
__tablename__ = 'weight_data'
id = Column(Integer, primary_key=True)
date = Column(Date)
old_weight = Column(Float)
new_weight = Column(Float, nullable=True)
def __repr__(self):
return f"<WeightData(id={self.id}, date={self.date}, " \
f"old weight={self.old_weight})>"
def __str__(self):
return f"date: {self.date}, old weight: {self.old_weight}"
|
moshe742/weight
|
weight/db/__init__.py
|
from sqlalchemy import create_engine
from sqlalchemy.orm import Session
from sqlalchemy.ext.declarative import declarative_base
class SessionScope:
def __init__(self):
pass
def __enter__(self):
self.session = Session(engine)
return self.session
def __exit__(self, exc_type, exc_val, exc_tb):
try:
self.session.commit()
except:
self.session.rollback()
finally:
self.session.close()
engine = create_engine("postgresql://moshe:He1!oworld@localhost/weight", echo=True)
Base = declarative_base()
|
moshe742/weight
|
weight/forms.py
|
from flask_wtf import FlaskForm
from wtforms import DateField, FloatField, SubmitField
from wtforms.validators import DataRequired
class WeightForm(FlaskForm):
date = DateField('date', validators=[DataRequired()])
old_weight = FloatField('old weight')
new_weight = FloatField('new weight')
submit = SubmitField('add weight')
|
moshe742/weight
|
weight/db/create_db.py
|
<gh_stars>1-10
import os
from weight.db import Base, engine
from weight.db.models import WeightData
Base.metadata.create_all(engine)
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
# from alembic.config import Config
# from alembic import command
# alembic_cfg = Config(f"{BASE_PATH}/alembic.ini")
# command.stamp(alembic_cfg, "head")
|
DivinaThomas/dropBoxReplica_CloudComputing
|
sharefile.py
|
import webapp2
import jinja2
from google.appengine.api import users
from google.appengine.ext import ndb
import os
from user import User
from directory import Directory
import random
JINJA_ENVIRONMENT = jinja2.Environment(
loader = jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions = ['jinja2.ext.autoescape'],
autoescape = True
)
class ShareFile(webapp2.RequestHandler):
def post(self):
self.response.headers['Content-Type'] = 'text/html'
if self.request.get('button') == 'Share':
directory_id = self.request.get('directory_id')
share_file_name = self.request.get('share_file_name')
index = self.request.get('index')
owner_email_id = self.request.get('owner_email_id')
user = users.get_current_user()
logout = users.create_logout_url('/')
template_values = {
'share_file_name' : share_file_name,
'directory_id': directory_id,
'index' : index,
'owner_email_id' : owner_email_id,
'user': user,
'logout': logout
# 'upload_url': blobstore.create_upload_url('/uploadfilehandler'),
}
template = JINJA_ENVIRONMENT.get_template('sharefile.html')
self.response.write(template.render(template_values))
if self.request.get('button') == 'Check email_id' :
directory_id = self.request.get('directory_id')
share_file_name = self.request.get('share_file_name')
index = int(self.request.get('index'))
email_id = self.request.get('email_id')
directory_key = ndb.Key(Directory,directory_id)
directory = directory_key.get()
blob_key = directory.blobs[index]
owner_email_id = self.request.get('owner_email_id')
user_counter = 0
id = email_id + '/'
shared_user_key = ndb.Key(Directory,id)
shared_user = shared_user_key.get()
user = users.get_current_user()
logout = users.create_logout_url('/')
if shared_user == None :
error_message = 'Sorry a user with this email id does not exists. Please check the email id'
template_values = {
'error_message' : error_message,
'user' : user,
'logout' : logout
}
template = JINJA_ENVIRONMENT.get_template('error.html')
self.response.write(template.render(template_values))
#all_directories = Directory.query.fetch()
#for each_directory in all_directories :
# if each_directory.id == key :
# user_counter = user_counter + 1
# break
else :
shared_user.shared_files.append(share_file_name)
shared_user.shared_files_blobs.append(blob_key)
shared_user.shared_file_owner.append(owner_email_id)
shared_user.put()
self.redirect('/main')
#if user_counter > 0 :
|
DivinaThomas/dropBoxReplica_CloudComputing
|
main.py
|
<gh_stars>0
import webapp2
import jinja2
from google.appengine.api import users
from google.appengine.ext import ndb
import os
from user import User
from directory import Directory
JINJA_ENVIRONMENT = jinja2.Environment(
loader = jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions = ['jinja2.ext.autoescape'],
autoescape = True
)
class Main(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/html'
directory = ' '
user = users.get_current_user()
logout = users.create_logout_url('/')
directory_key=''
list_directories = []
previous_directory_id = ''
previous_directory_name = ''
list_files = []
SharedFiles = []
SharedFileOwner = []
if user:
url = users.create_logout_url(self.request.uri)
url_string = 'logout'
myuser_key = ndb.Key(User, user.user_id())
myuser = myuser_key.get()
if myuser == None:
welcome = 'Welcome to the application'
myuser = User(id=user.user_id())
myuser.email = user.email()
myuser.put()
id_of_root_directory = user.email()+"/"
directory = Directory(id=id_of_root_directory)
directory.directory_path = '/'
directory.directory_name = '/'
directory.put()
else :
id_of_root_directory = user.email()+"/"
directory_key = ndb.Key(Directory,id_of_root_directory)
directory = directory_key.get()
for each in directory.list_of_directories :
preprocess = each.replace('u','').replace(' ','').replace('\'','')
sub_directory_key = ndb.Key(Directory,preprocess)
sub_directory = sub_directory_key.get()
list_directories.append(sub_directory.directory_name)
for each in directory.list_of_files :
list_files.append(each)
for index,each in enumerate(directory.shared_files):
SharedFiles.append(each)
owner = directory.shared_file_owner[index]
SharedFileOwner.append(owner)
path = directory.directory_path
template_values = {
'id_of_root_directory' : id_of_root_directory,
'list_directories' : list_directories,
'user' : user,
'logout' : logout,
'previous_directory_id' : directory.prev_directory,
'previous_directory_name' : previous_directory_name,
'list_files' : list_files,
'shared_files': SharedFiles,
'shared_file_owner' : SharedFileOwner,
'path' : path
}
template = JINJA_ENVIRONMENT.get_template('main.html')
self.response.write(template.render(template_values))
def post(self):
if self.request.get('button') == '../' :
current_directory_id = self.request.get('directory_id')
current_directory_key = ndb.Key(Directory,current_directory_id)
current_directory = current_directory_key.get()
list_directories = []
list_files = []
for each in current_directory.list_of_directories :
preprocess = each.replace('u','').replace(' ','').replace('\'','')
sub_directory_key = ndb.Key(Directory,preprocess)
sub_directory = sub_directory_key.get()
list_directories.append(sub_directory.directory_name)
for each in current_directory.list_of_files :
list_files.append(each)
SharedFiles = []
SharedFileOwner = []
for index, each in enumerate(current_directory.shared_files):
SharedFiles.append(each)
owner = current_directory.shared_file_owner[index]
SharedFileOwner.append(owner)
path = current_directory.directory_path
logout = users.create_logout_url('/')
user = users.get_current_user()
previous_directory_name = ''
template_values = {
'id_of_root_directory' : current_directory_id,
'check' : current_directory.prev_directory,
'list_directories' : list_directories,
'user' : user,
'logout' : logout,
'previous_directory_id' : current_directory.prev_directory,
'previous_directory_name' : previous_directory_name,
'list_files' : list_files,
'shared_files': SharedFiles,
'shared_file_owner': SharedFileOwner,
'path': path
}
template = JINJA_ENVIRONMENT.get_template('main.html')
self.response.write(template.render(template_values))
else :
self.response.headers['Content-Type'] = 'text/html'
list_directories = []
list_files = []
directory_name = self.request.get('hidden_directory_name')
current_directory_id = self.request.get('directory_id')
current_directory_key = ndb.Key(Directory,current_directory_id)
current_directory = current_directory_key.get()
id_of_clicked_directory = ''
for each in current_directory.list_of_directories :
preprocess = each.replace('u','').replace(' ','').replace('\'','')
sub_directory_key = ndb.Key(Directory,preprocess)
sub_directory = sub_directory_key.get()
if sub_directory.directory_name == directory_name :
id_of_clicked_directory = preprocess
break
key_of_clicked_directory = ndb.Key(Directory,id_of_clicked_directory)
clicked_directory = key_of_clicked_directory.get()
for each in clicked_directory.list_of_directories :
preprocess = each.replace('u','').replace(' ','').replace('\'','')
sub_directory_key = ndb.Key(Directory,preprocess)
sub_directory = sub_directory_key.get()
list_directories.append(sub_directory.directory_name)
for each in clicked_directory.list_of_files :
list_files.append(each)
SharedFiles = []
SharedFileOwner = []
for index, each in enumerate(clicked_directory.shared_files):
SharedFiles.append(each)
owner = clicked_directory.shared_file_owner[index]
SharedFileOwner.append(owner)
path = clicked_directory.directory_path
logout = users.create_logout_url('/')
clicked_directory_id = clicked_directory.key.id() #change the name to id
user = users.get_current_user()
template_values = {
'id_of_root_directory' : clicked_directory_id,
'check' : current_directory.prev_directory,
'list_directories' : list_directories,
'user' : user,
'logout' : logout,
'previous_directory_id' : clicked_directory.prev_directory,
'previous_directory_name' : clicked_directory.prev_directory,
'list_files' : list_files,
'shared_files' : SharedFiles,
'shared_file_owner' : SharedFileOwner,
'path': path
}
template = JINJA_ENVIRONMENT.get_template('main.html')
self.response.write(template.render(template_values))
|
DivinaThomas/dropBoxReplica_CloudComputing
|
restore.py
|
<reponame>DivinaThomas/dropBoxReplica_CloudComputing
from google.appengine.ext import ndb
class Restore(ndb.Model):
user = ndb.StringProperty()
file_name = ndb.StringProperty()
blob_key = ndb.BlobKeyProperty()
|
DivinaThomas/dropBoxReplica_CloudComputing
|
user.py
|
from google.appengine.ext import ndb
from directory import Directory
class User(ndb.Model):
email = ndb.StringProperty()
#directory_info = ndb.StructuredProperty(Directory, repeated=True)
|
DivinaThomas/dropBoxReplica_CloudComputing
|
uploadfilehandler.py
|
from google.appengine.ext import blobstore
from google.appengine.ext import ndb
from google.appengine.ext.webapp import blobstore_handlers
from directory import Directory
import webapp2
import jinja2
import os
from google.appengine.api import users
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True
)
class UploadFileHandler(blobstore_handlers.BlobstoreUploadHandler):
def post(self):
error_message = ''
upload = self.get_uploads()[0]
blobinfo = blobstore.BlobInfo(upload.key())
filename = blobinfo.filename
directory_id = self.request.get('directory_id')
directory_key = ndb.Key(Directory,directory_id)
directory = directory_key.get()
file_exists_counter = 0
for each in directory.list_of_files :
if each == filename :
error_message = 'Sorry a file with this name exists'
file_exists_counter = file_exists_counter+1
break
if file_exists_counter == 0 :
directory.list_of_files.append(filename)
directory.blobs.append(upload.key())
directory.put()
self.redirect('/')
else :
user = users.get_current_user()
logout = users.create_logout_url('/')
template_values = {
'user': user,
'logout': logout,
'error_message': error_message,
}
template = JINJA_ENVIRONMENT.get_template('error.html')
self.response.write(template.render(template_values))
|
DivinaThomas/dropBoxReplica_CloudComputing
|
restorefile.py
|
import webapp2
import jinja2
import os
from google.appengine.ext import ndb
from google.appengine.ext import blobstore
from directory import Directory
from uploadfilehandler import UploadFileHandler
from directory import Directory
from restore import Restore
from google.appengine.api import users
import random
from downloadfilehandler import DownloadFileHandler
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True
)
class RestoreFile(webapp2.RequestHandler):
def post(self):
self.response.headers['Content-Type'] = 'text/html'
if self.request.get('button') == 'Bin' :
email_id = self.request.get('email_id')
all_files = Restore.query(Restore.user == email_id).fetch()
user = users.get_current_user()
logout = users.create_logout_url('/')
template_values = {
'all_files': all_files,
'email_id': email_id,
'user': user,
'logout': logout
}
template = JINJA_ENVIRONMENT.get_template('restorefile.html')
self.response.write(template.render(template_values))
if self.request.get('button') == 'Restore' :
email_id = self.request.get('email_id')
file_id = self.request.get('file_id')
file_key = ndb.Key(Restore,file_id)
file = file_key.get()
directory_id = email_id +'/'
directory_key = ndb.Key(Directory,directory_id)
directory = directory_key.get()
directory.list_of_files.append(file.file_name)
directory.blobs.append(file.blob_key)
directory.put()
file_key.delete()
self.redirect('/main')
|
DivinaThomas/dropBoxReplica_CloudComputing
|
login.py
|
import webapp2
import jinja2
from google.appengine.api import users
from google.appengine.ext import ndb
import os
from user import User
from main import Main
from adddirectory import AddDirectory
from deletedirectory import DeleteDirectory
from uploadfilehandler import UploadFileHandler
from addfile import AddFile
from downloadfilehandler import DownloadFileHandler
from deletefile import DeleteFile
from sharefile import ShareFile
from restorefile import RestoreFile
from sharedfiles import SharedFiles
JINJA_ENVIRONMENT = jinja2.Environment(
loader = jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions = ['jinja2.ext.autoescape'],
autoescape = True
)
class Login(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/html'
user = users.get_current_user()
if user :
self.redirect('/main')
else :
self.redirect(users.create_login_url(self.request.uri))
app = webapp2.WSGIApplication ([
('/main',Main),
('/',Login),
('/adddirectory',AddDirectory),
('/deletedirectory',DeleteDirectory),
('/uploadfilehandler',UploadFileHandler),
('/addfile',AddFile),
('/downloadfilehandler',DownloadFileHandler),
('/deletefile',DeleteFile),
('/sharefile',ShareFile),
('/restorefile',RestoreFile),
('/sharedfiles',SharedFiles),
], debug=True)
|
DivinaThomas/dropBoxReplica_CloudComputing
|
addfile.py
|
import webapp2
import jinja2
import os
from google.appengine.ext import ndb
from google.appengine.ext import blobstore
from google.appengine.api import users
from directory import Directory
from uploadfilehandler import UploadFileHandler
from directory import Directory
import random
from downloadfilehandler import DownloadFileHandler
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True
)
class AddFile(webapp2.RequestHandler):
def post(self):
self.response.headers['Content-Type'] = 'text/html'
directory_id = self.request.get('directory_id')
directory_key = ndb.Key('Directory', directory_id)
directory = directory_key.get()
logout = users.create_logout_url('/')
user = users.get_current_user()
template_values = {
'directory_id' : directory_id,
'user' : user,
'logout' : logout,
'upload_url' : blobstore.create_upload_url('/uploadfilehandler'),
}
template = JINJA_ENVIRONMENT.get_template('addfile.html')
self.response.write(template.render(template_values))
|
DivinaThomas/dropBoxReplica_CloudComputing
|
downloadfilehandler.py
|
import webapp2
import jinja2
from google.appengine.ext import ndb
from google.appengine.ext import blobstore
from google.appengine.ext.webapp import blobstore_handlers
from directory import Directory
class DownloadFileHandler(blobstore_handlers.BlobstoreDownloadHandler):
def post(self):
self.response.headers['Content-Type'] = 'text/html'
if self.request.get('button') == 'Download Shared File':
index = int(self.request.get('index'))
directory_id = self.request.get('directory_id')
directory_key = ndb.Key(Directory, directory_id)
directory = directory_key.get()
file_name = self.request.get('file_name')
self.send_blob(directory.shared_files_blobs[index])
if self.request.get('button') == 'Download':
index = int(self.request.get('index'))
directory_id = self.request.get('directory_id')
directory_key = ndb.Key(Directory, directory_id)
directory = directory_key.get()
file_name = self.request.get('file_name')
directory.put()
self.send_blob(directory.blobs[index])
|
DivinaThomas/dropBoxReplica_CloudComputing
|
sharedfiles.py
|
<gh_stars>0
import webapp2
import jinja2
from google.appengine.api import users
from google.appengine.ext import ndb
import os
from user import User
from directory import Directory
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True
)
class SharedFiles(webapp2.RequestHandler):
def post(self):
email_id =self.request.get('email_id')
SharedFiles = []
SharedFileOwner = []
id = email_id + '/'
key = ndb.Key(Directory, id)
dir = key.get()
for index, each in enumerate(dir.shared_files):
SharedFiles.append(each)
owner = dir.shared_file_owner[index]
SharedFileOwner.append(owner)
user = users.get_current_user()
logout = users.create_logout_url('/')
template_values = {
'id_of_root_directory': id,
'logout': logout,
'user' : user,
'shared_files' : SharedFiles,
'shared_file_owner' : SharedFileOwner
}
template = JINJA_ENVIRONMENT.get_template('sharedfiles.html')
self.response.write(template.render(template_values))
|
DivinaThomas/dropBoxReplica_CloudComputing
|
deletefile.py
|
import webapp2
import jinja2
import os
from google.appengine.ext import ndb
from google.appengine.ext import blobstore
from directory import Directory
from google.appengine.api import users
from uploadfilehandler import UploadFileHandler
from directory import Directory
import random
from downloadfilehandler import DownloadFileHandler
from restore import Restore
import random
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True
)
class DeleteFile(webapp2.RequestHandler):
def post(self):
self.response.headers['Content-Type'] = 'text/html'
if self.request.get('button') == 'Delete' :
delete_file_name = self.request.get('delete_file_name')
directory_id = self.request.get('directory_id')
directory_key = ndb.Key(Directory,directory_id)
directory = directory_key.get()
index = int(self.request.get('index'))
blob_key = directory.blobs[index]
del directory.list_of_files[index]
del directory.blobs[index]
directory.put()
random_id = str(random.randint(000000,999999))
random_key = ndb.Key(Directory, random_id)
random_value = random_key.get()
if random_value != None:
while (random_value != None):
random_id = str(random.randint(000000, 999999))
random_key = ndb.Key(Directory, random_id)
random_value = random_key.get()
if random_value == None:
new_file = Restore(id=random_id)
new_file.put()
new_file_key = ndb.Key(Restore,random_id)
file = new_file_key.get()
file.file_name = delete_file_name
file.blob_key = blob_key
user = users.get_current_user()
email = user.email()
file.user = email
file.put()
self.redirect('/main')
if self.request.get('button') == 'Delete Permanently':
file_id = self.request.get('file_id')
file_key = ndb.Key(Restore,file_id)
file = file_key.get()
print(file)
blob_key = file.blob_key
file_key.delete()
blobstore.delete(blob_key)
self.redirect('/main')
if self.request.get('button') == 'Delete Shared File' :
directory_id = self.request.get('directory_id')
directory_key = ndb.Key(Directory,directory_id)
directory = directory_key.get()
delete_file_name = self.request.get('delete_file_name')
index = int(self.request.get('index'))
del directory.shared_files[index]
del directory.shared_files_blobs[index]
del directory.shared_file_owner[index]
directory.put()
self.redirect('/main')
#new_directory = Directory(id=unique_id)
#new_directory.directory_name = directory_name
#new_directory.prev_directory = directory_id
#new_path = directory_path + directory_name + '/'
#new_directory.directory_path = new_path
#new_directory.put()
# search = Directory.query(Directory.directory_name == directory_name).fetch()
#directory.list_of_directories.append(random_id)
#directory.put()
|
DivinaThomas/dropBoxReplica_CloudComputing
|
directory.py
|
<reponame>DivinaThomas/dropBoxReplica_CloudComputing
from google.appengine.ext import ndb
class Directory(ndb.Model):
prev_directory = ndb.StringProperty()
directory_name = ndb.StringProperty()
directory_path = ndb.StringProperty()
list_of_directories = ndb.StringProperty(repeated=True)
list_of_files = ndb.StringProperty(repeated=True)
blobs = ndb.BlobKeyProperty(repeated=True)
shared_files = ndb.StringProperty(repeated=True)
shared_files_blobs = ndb.BlobKeyProperty(repeated=True)
shared_file_owner = ndb.StringProperty(repeated=True)
#directory_name = ndb.StringProperty()
#file_info = ndb.StructuredProperty(File, repeated=True)
|
DivinaThomas/dropBoxReplica_CloudComputing
|
adddirectory.py
|
<reponame>DivinaThomas/dropBoxReplica_CloudComputing<gh_stars>0
import webapp2
import jinja2
from google.appengine.api import users
from google.appengine.ext import ndb
import os
from user import User
from directory import Directory
import random
JINJA_ENVIRONMENT = jinja2.Environment(
loader = jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions = ['jinja2.ext.autoescape'],
autoescape = True
)
class AddDirectory(webapp2.RequestHandler):
def post(self):
self.response.headers['Content-Type'] = 'text/html'
url = ''
url_string = ''
welcome = 'Welcome back'
directory = ' '
user = users.get_current_user()
logout = users.create_logout_url('/')
directory=''
logout = logout = users.create_logout_url('/')
if self.request.get('button') == 'Add a Directory' :
directory_id = self.request.get('directory_id')
user = users.get_current_user()
template_values = {
'directory_id' : directory_id,
'user' : user,
'logout' : logout
}
template = JINJA_ENVIRONMENT.get_template('adddirectory.html')
self.response.write(template.render(template_values))
if self.request.get('button') == 'Submit' :
directory_id = self.request.get('directory_id')
directory_key = ndb.Key(Directory,directory_id)
directory = directory_key.get()
directory_path = directory.directory_path
directory_name = self.request.get('unique_directory_name')
checking_unique_directory_name = 0;
for each in directory.list_of_directories :
preprocess = each.replace('u','').replace(' ','').replace('\'','')
sub_directory_key = ndb.Key(Directory,preprocess)
sub_directory = sub_directory_key.get()
if sub_directory.directory_name == directory_name :
checking_unique_directory_name = checking_unique_directory_name + 1
break
if checking_unique_directory_name == 0 :
random_id = str(random.randint(000000,999999))
random_key = ndb.Key(Directory,random_id)
random_value = random_key.get()
if random_value != None :
while (random_value!=None) :
random_id = str(random.randint(000000, 999999))
random_key = ndb.Key(Directory, random_id)
random_value = random_key.get()
if random_value == None :
new_directory = Directory(id=random_id)
new_directory.directory_name = directory_name
new_directory.prev_directory = directory_id
new_path = directory_path + directory_name + '/'
new_directory.directory_path = new_path
new_directory.put()
#search = Directory.query(Directory.directory_name == directory_name).fetch()
directory.list_of_directories.append(random_id)
directory.put()
self.redirect('/main')
else :
error_message = "Please enter a unique directory name. The directory name entered exists"
user = users.get_current_user()
template_values = {
'directory_id' : directory_id,
'error_message' : error_message,
'user' : user,
'logout' : logout
}
template = JINJA_ENVIRONMENT.get_template('adddirectory.html')
self.response.write(template.render(template_values))
|
DivinaThomas/dropBoxReplica_CloudComputing
|
deletedirectory.py
|
import webapp2
import jinja2
from google.appengine.api import users
from google.appengine.ext import ndb
import os
from user import User
from directory import Directory
import random
JINJA_ENVIRONMENT = jinja2.Environment(
loader = jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions = ['jinja2.ext.autoescape'],
autoescape = True
)
class DeleteDirectory(webapp2.RequestHandler):
def post(self):
self.response.headers['Content-Type'] = 'text/html'
if self.request.get('button') == 'Delete' :
directory_id = self.request.get('directory_id')
directory_key = ndb.Key('Directory',directory_id)
directory = directory_key.get()
# template_values = {
# 'directory_id' : directory,
# }
#
# template = JINJA_ENVIRONMENT.get_template('adddirectory.html')
# self.response.write(template.render(template_values))
delete_directory_name = self.request.get('delete_directory_name')
delete_counter = 0
for index,each in enumerate(directory.list_of_directories) :
preprocessed_id = each.replace('u','').replace(' ','').replace('\'','')
sub_directory_key = ndb.Key(Directory,preprocessed_id)
sub_directory = sub_directory_key.get()
if sub_directory.directory_name == delete_directory_name:
if sub_directory.list_of_files == [] :
if sub_directory.list_of_directories == [] :
sub_directory_key.delete()
del directory.list_of_directories[index]
directory.put()
delete_counter = delete_counter + 1
break
if delete_counter > 0:
self.redirect('/main')
else :
user = users.get_current_user()
logout = users.create_logout_url('/')
error_message = 'This directory has files or directories in it so it cant be deleted'
template_values = {
# 'collection' : new_file,
'error_message': error_message,
'user' : user,
'logout' : logout
# 'directory_id' : directory_id,
# 'upload_url': blobstore.create_upload_url('/uploadfilehandler'),
}
template = JINJA_ENVIRONMENT.get_template('error.html')
self.response.write(template.render(template_values))
|
dutitello/parAnsys
|
paransys/__init__.py
|
<gh_stars>1-10
# -*- coding: UTF-8 -*-
#----------------------------------------------------------------------------#
# PARANSYS docs are also available in https://dutitello.github.io/parAnsys/ #
#----------------------------------------------------------------------------#
from paransys.ansys import *
from paransys.montecarlo import *
from paransys.form import *
|
dutitello/parAnsys
|
setup.py
|
<filename>setup.py
import setuptools
try:
import numpy
except:
raise Exception('Please install NumPy first.')
try:
import scipy
except:
raise Exception('Please install SciPy first.')
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="paransys",
version="0.1",
author="<NAME>",
author_email="<EMAIL>",
description="PARANSYS: Python pArametric Reliability Analysis on ANSYS",
url="https://github.com/dutitello/parAnsys",
long_description=long_description,
long_description_content_type="text/markdown",
packages=['paransys'],
python_requires='>=3.6',
license='MIT',
classifiers=[
"Programming Language :: Python :: 3",
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'License :: OSI Approved :: MIT License',
'Operating System :: Microsoft :: Windows',
],
install_requires=['numpy>=1.18.0',
'scipy>=1.17']
)
|
dutitello/parAnsys
|
examples/AngTang_FORM_67-Implicit.py
|
"""
Running example 6.7 from Ang & Tang 1984 with an Implicit limit state function
"""
import paransys
# Call ParAnsys
form = paransys.FORM()
# Console log On
form.Info(True)
# Create random variables
form.CreateVar('y', 'gauss', 40, cv=0.125)
form.CreateVar('z', 'gauss', 50, cv=0.050)
form.CreateVar('m', 'gauss', 1000, cv=0.200)
# An Python function that will be the Implicit function
def myFunc(y, z, m):
# Just to show that we can do a lot of things here...
# Some conditional
if m > 1e27:
m = 1e27
# Determine the result
result = y * z - m
# Print the result externally to PARANSYS
print('~~~The LS result is: {}'.format(result))
# Send return
return result
# There are two ways to use implicit functions:
# Setting a Python function in the place of the LS string, as next line:
form.SetLimState(myFunc)
# Or setting the Python function as userf and using it inside LS string:
#form.SetLimState("userf(y=y, z=z, m=m)", userf=myFunc)
# Run
values = form.Run(dh=0.01, meth='iHLRF')
|
dutitello/parAnsys
|
examples/AngTang_FORM_69.py
|
"""
Running example 6.9 from Ang & Tang 1984
"""
import paransys
# Call ParAnsys
form = paransys.FORM()
# Console log On
form.Info(True)
# Create random variables
form.CreateVar('y', 'gauss', 40, cv=0.125)
form.CreateVar('z', 'gauss', 50, cv=0.050)
form.CreateVar('m', 'gauss', 1000, cv=0.200)
form.CreateVar('c', 'const', 0)
# Create limit state
form.SetLimState('y*z-m+c')
# Set correlation
form.SetCorrel('y', 'z', 0.40)
# Run
values = form.Run(dh=0.01, meth='iHLRF', tolRel=0.001)
form.ExportDataCSV('AngTang-FORM-69', 'Comentarios?')
|
dutitello/parAnsys
|
examples/BeckCantilever.py
|
<reponame>dutitello/parAnsys
"""
This is and PARANSYS-ANSYS problem that is really quick to run, just for validation.
Example 1 from <NAME>. and <NAME>., 2006, "Structural Reliability Analysis Using Deterministic Finite Element Programs", Latin American Journal of Solids and Structures, 3, pp. 197-222.
- https://www.lajss.org/index.php/LAJSS/article/download/101/95/
- https://www.researchgate.net/publication/228570899_Structural_reliability_analysis_using_deterministic_finite_element_programs
Here some parameters need to be set:
- form.ANSYS:
- exec_loc = ansys executable location
- run_location = where ansys will work (an empty dir)
- form.SetANSYSModel:
- directory = where is the file BeckCantileverAPDL.inp
"""
import paransys
import numpy as np
# Call ParAnsys
form = paransys.FORM()
form.ANSYS(exec_loc='C:\\Program Files\\ANSYS Inc\\v194\\ansys\\bin\\winx64\\ansys194.exe',
run_location='C:\\Temp\\wk',
jobname='file', nproc=4, override=True, cleardir=False, add_flags='')
# Console log On
form.Info(True)
# Set ANSYS Model
form.SetANSYSModel(inputname='BeckCantileverAPDL.inp', extrafiles=[], directory='C:\\ANSYS\\Conf_Dissert\\quali_parAnsys\\_tests\\')
# Create random variables
form.CreateVar('q', 'norm', 1.15, cv=0.029)
form.CreateVar('l', 'norm', 60, cv=0.01)
form.CreateVar('b', 'norm', 4, cv=0.03)
form.CreateVar('h', 'normal', 1, cv=0.03)
form.CreateVar('sy', 'norm', 3600, cv=0.083)
# ANSYS Random variables
form.SetANSYSVar('q')
form.SetANSYSVar('l')
form.SetANSYSVar('b')
form.SetANSYSVar('h')
# Set ANSYS output
form.SetANSYSOutVar('stress')
# Create limit state
form.SetLimState('sy-stress')
# Run
values = form.Run(maxIter=12, tolRel=0.001, dh=0.005, diff='forward', meth='iHLRF')
# Exports to form69.csv
form.ExportDataCSV('BeckCantilever')
|
dutitello/parAnsys
|
examples/AngTang_MCAI_69-Implicit.py
|
<reponame>dutitello/parAnsys<filename>examples/AngTang_MCAI_69-Implicit.py
"""
Running example 6.9 from Ang & Tang 1984
"""
import paransys
import numpy as np
# Call ParAnsys
mc = paransys.MonteCarlo()
# Console log On
mc.Info(True)
# Create random variables
mc.CreateVar('y', 'gauss', 40, cv=0.125)
mc.CreateVar('z', 'gauss', 50, cv=0.050)
mc.CreateVar('m', 'gauss', 1000, std=0.200*1000)
# An Python function that will be the Implicit function
def myFunc(y, z, m):
# Just to show that we can do a lot of things here...
# Some conditional
if m > 1e27:
m = 1e27
# Determine the result
result = y * z - m
# Print the result externally to PARANSYS
print('~~~The LS result is: {}'.format(result))
# Send return
return result
# There are two ways to use implicit functions:
# Setting a Python function in the place of the LS string, as next line:
mc.CreateLimState(myFunc)
# Or setting the Python function as userf and using it inside LS string:
#mc.CreateLimState("userf(y=y, z=z, m=m)", userf=myFunc)
# Define correlation
mc.SetCorrel('y', 'z', 0.40)
# Sampling for first limit state (0)
# It create failures before =)
k = 2
# For GAUSS and GUMBEL is better to use STD and for LOGN CV ;)
mc.SetRandomVarSampl('y', 0, 'gauss', 40*(1-k*0.125), std=0.125*40)
mc.SetRandomVarSampl('z', 0, 'gauss', 50*(1-k*0.050), std=0.050*50)
mc.SetRandomVarSampl('m', 0, 'gauss', 1000*(1+k*0.200), std=0.200*1000)
# Running
values = mc.Run(100, 1000, 0.05, 0.005)
# Export
#mc.ExportDataCSV('AngTang-MCAI-69', 'Comentarios?')
# Figures
#mc.Graph(['N_Pf', 'N_Beta', 'N_CVPf'], show=True)
|
dutitello/parAnsys
|
examples/AngTang_MCAI_610.py
|
<filename>examples/AngTang_MCAI_610.py
"""
Running example 6.10 from Ang & Tang 1984
"""
import paransys
import numpy as np
# Call ParAnsys
mc = paransys.MonteCarlo()
# Console log On
mc.Info(True)
# Create random variables
mc.CreateVar('y', 'logn', 40, cv=0.125)
mc.CreateVar('z', 'logn', 50, cv=0.050)
mc.CreateVar('m', 'gumbel', 1000, cv=0.200)
mc.CreateVar('c', 'const', 0)
# First limit state (0)
mc.CreateLimState('y*z-m+c')
# Define correlation
mc.SetCorrel('y', 'z', 0.40)
# Sampling for first limit state (0)
# It create failures before =)
k = 2
# For GAUSS and GUMBEL is better to use ORIGINAL STD and for LOGN CV ;)
mc.SetRandomVarSampl('y', 0, 'logn', 40*(1-k*0.125), cv=0.125)
mc.SetRandomVarSampl('z', 0, 'logn', 50*(1-k*0.050), cv=0.050)
mc.SetRandomVarSampl('m', 0, 'gumbel', 1000*(1+k*0.200), std=0.200*1000)
# Running
values = mc.Run(100, 1000, 0.05, 0.005)
# Export
#mc.ExportDataCSV('AngTang-MCAI-610', 'Comentarios?')
# Figures
#mc.Graph(['N_Pf', 'N_Beta', 'N_CVPf'], show=True)
|
dutitello/parAnsys
|
paransys/montecarlo.py
|
# -*- coding: UTF-8 -*-
"""
This module performns Monte Carlo simulations using Python.
Please read the class docstring for more.
Docs are available at https://dutitello.github.io/parAnsys/
"""
import os
import time
from paransys.ansys import ANSYS
import numpy as np
import scipy.stats
import math
from math import *
class MonteCarlo(object):
"""
This class performns Monte Carlo simulations inside Python using ParAnsys as
a connection with ANSYS for evaluate FEM models.
It is possible to run Monte Carlo simulations without using ANSYS, just
defining the limit state equation and all variables.
This code was made following the ideia of ANSYS being a tool for getting the
ultimate load of the structure. This works applying a displacement in the
loaded node, and then getting the biggest reaction force on that node,
following this way the limit state defined here is 'R-S', where R are the
values get from ANSYS and S the values generated in Python. It's also
possible to work applying the true load on ANSYS, it's just necessary to
formulate a valid limit state equation.
|
ATTENTION: When using ANSYS the weight of results from ANSYS variables
are determined using the weights of all ANSYS input variables.
|
**To do**
1) When structure has more than one limit state the PDF of sampling
distribution is the sum of all limit states sampling distributions vs their
sampling weights (h(x) = w1.h1(x) + w2.h2(x) + hi.wi(x)...)
It's already done the division of simulations for each cycle with the
limit state weights.
2) When sampling distribution is different of real distribution Pf is going
wrong, so it's not able to be used, for now.
|
|
**Class methods:**
"""
#---------------------------------------------------------------------------
def __init__(self):
"""
"""
# Definition of self varibles that just exists on MonteCarlo
# Dictionary with variables distrib and parameters
self.variableDistrib = {}
self.samplingDistrib = {}
self.variableConst = {}
self.corlist = {}
self.controls = {}
self.ansysvars = []
self.limstates = {}
#
self.PrintR = False
# Control if ANSYS is being used
self._ANSYS = False
pass
def ANSYS(self, exec_loc=None, run_location=os.getcwd()+'\\ansys_anl\\', jobname='file',
nproc=2, override=False, cleardir=False, add_flags=''):
"""
If ANSYS will be used it defines ANSYS properties, for initialize the
paransys.ANSYS class.
Parameters
----------
exec_loc : str, obligatory
Location of ANSYS executable file.
run_location : str, optional
ANSYS working directory. Recomended to be a separated directory.
Defaults to ansys_anl on current directory.
jobname : str, optional
ANSYS jobname. Defaults to 'file'.
nproc : int, optional
Number of processors. Defaults to 2.
override : bool, optional
Attempts to delete the .lock file at working directory.
It's useful when ANSYS was interrupted.
Defaults to False
cleardir : bool, optional
Delete all the files from ANSYS working directory when call the Run command.
Defaults to False
add_flags : str, optional
Additional flags to be called with ANSYS.
If it's an academic version use add_flags='-aa_r'
Do not use '-b -i -o'
Flags can be found at https://www.sharcnet.ca/Software/Ansys/16.2.3/en-us/help/ans_ope/Hlp_G_OPE3_1.html
"""
self.ansys = ANSYS(exec_loc=exec_loc, run_location=run_location,
jobname=jobname, nproc=nproc, override=override,
cleardir=cleardir, add_flags=add_flags)
self._ANSYS = True
def Info(self, act=False):
"""
Turn on/off the return of the commands to Python.
Parameters
----------
act : bool, obligatory
True turn On and False turn Off the return of the commands to Python.
"""
self.PrintR = act
if self._ANSYS:
self.ansys.Info(act)
#return self._PrintR('Now the commands will send a return to Python (like this).')
def _PrintR(self, value):
"""
Internal function to print or not the return of commands based on command Info()
"""
if self.PrintR:
return print(value, flush=True)
else:
pass
def SetANSYSModel(self, inputname, extrafiles=[], directory=os.getcwd()):
"""
Set the input script file to be used on ANSYS and extra files that should
be copied together.
All this files must be in the same directory set in parameter directory.
Parameters
----------
inputname : str, obligatory
Name with extension of the script that will be executed in the analysis.
The script must be done in function of the INPUT variables defined here,
(as parameters of ANSYS), and must define/calculate ANSYS parameters with
the results using the names defined here.
extrafiles : list of strings, optional
A list of strings containing extra files (with extension) that are necessary to
run the script analys, could be an MODEL with the MESH already generated,
for example.
An example of extrafiles list is:
extrafiles = ['temps.txt', 'model1.ans', 'file.db']
directory : str, optional
If the script is not in the current running Python directory you should
place the entire location, if it's in a subdirectory of current directory
you can use '/dirname/filename.ext'.
Defaults to current running Python directory.
"""
if self._ANSYS:
self.ansys.SetModel(inputname, extrafiles, directory)
else:
exception = Exception('ANSYS not declared yet. Before set ANSYS '+
'model you must define ANSYS properties with ANSYS(...).')
raise exception
def SetANSYSOutVar(self, name):
"""
Defines a parameter/variable from ANSYS APDL script as an variable to
return values for Python.
Parameters
----------
name : str, obligatory
Variable/Parameter name, as defined in APDL script.
"""
if self._ANSYS:
self.ansys.CreateVarOut(name)
else:
exception = Exception('ANSYS not declared yet. Before set ANSYS '+
'variables you must define ANSYS properties with ANSYS(...).')
raise exception
# Internal for variable distrib definition
def _VarDistrib(self, type, name, distrib, mean, std, cv, par1, par2, limst=0):
"""
Internal function that verify the values for CreateVar
and SetRandomVarSampl.
Parameters
----------
type : int, obligatory
type = 0: for CreateVar -> variableDistrib
type = 1: for SetRandomVarSampl -> samplingDistrib
limst : integer, obligatory for type=1 with more than 1 limit state
Limit state ID that will use current sampling distribution.
"""
# Verify the variable name
if name in ['', None, ' ']:
exception = Exception('You must define the name of the variable that will receive the values.')
raise exception
else:
name = name.lower()
# Sampling distribution not created yet
if type == 1 and name not in self.variableDistrib:
exception = Exception('Variable %s is not declared yet. '+
'Before set sampling distribution you must create it '+
'with CreateVar().')
raise exception
# Set distribution name as lower case
distrib = distrib.lower()
# CV or STD?
if distrib not in ['constant', 'const', 'cons', 'c']:
if cv != None:
std = cv*mean
elif mean == 0.0:
cv = 1e99
else:
cv = std/mean
# Verify the distribution and then determine the parameters
# Gaussian variable
if distrib in ['gauss', 'gaus', 'gaussian', 'normal', 'norm']:
# Type 0 = Random Variable distribution
if type == 0:
self.variableDistrib[name] = ['gauss', mean, std, cv]
return self._PrintR('Variable %s defined as Gaussian with mean=%f, std. dev.=%f, CV=%f.'
% (name, mean, std, cv))
# Type 1 = Sampling distribution
elif type == 1:
# Vrify of samplingDistrib[limst] exists
try:
self.samplingDistrib[limst]
except:
# Not created yet
self.samplingDistrib[limst] = {}
self.samplingDistrib[limst][name] = ['gauss', mean, std, cv]
else:
# Already created
self.samplingDistrib[limst][name] = ['gauss', mean, std, cv]
return self._PrintR('Variable %s sampling defined as Gaussian with mean=%f, std. dev.=%f, CV=%f for limit state %d.'
% (name, mean, std, cv, limst))
# Lognormal distribution
elif distrib in ['lognormal', 'logn', 'ln', 'log', 'lognorm']:
# Distribution parameters are qsi, related do std dev, and
# lmbd, related to mean.
#qsi = math.sqrt(math.log(1 + (cv)**2))
#lmbd = math.log(mean) - 0.5*qsix**2
# Type 0 = Random Variable distribution
if type == 0:
self.variableDistrib[name] = ['logn', mean, std, cv]
return self._PrintR('Variable %s defined as LogNormal with mean=%f, std. dev.=%f, CV=%f.'
% (name, mean, std, cv))
# Type 1 = Sampling distribution
elif type == 1:
# Vrify of samplingDistrib[limst] exists
try:
self.samplingDistrib[limst]
except:
# Not created yet
self.samplingDistrib[limst] = {}
self.samplingDistrib[limst][name] = ['logn', mean, std, cv]
else:
# Already created
self.samplingDistrib[limst][name] = ['logn', mean, std, cv]
return self._PrintR('Variable %s sampling defined as LogNormal with mean=%f, std. dev.=%f, CV=%f for limit state %d.'
% (name, mean, std, cv, limst))
# Gumbel distribution
elif distrib in ['gumbel', 'gumb', 'type1']:
# Type 0 = Random Variable distribution
if type == 0:
self.variableDistrib[name] = ['gumbel', mean, std, cv]
return self._PrintR('Variable %s defined as Gumbel with mean=%f, std. dev.=%f, CV=%f.'
% (name, mean, std, cv))
# Type 1 = Sampling distribution
elif type == 1:
# Vrify of samplingDistrib[limst] exists
try:
self.samplingDistrib[limst]
except:
# Not created yet
self.samplingDistrib[limst] = {}
self.samplingDistrib[limst][name] = ['gumbel', mean, std, cv]
else:
# Already created
self.samplingDistrib[limst][name] = ['gumbel', mean, std, cv]
return self._PrintR('Variable %s sampling defined as Gumbel with mean=%f, std. dev.=%f, CV=%f for limit state %d.'
% (name, mean, std, cv, limst))
# Constant value
elif distrib in ['constant', 'const', 'cons', 'c']:
# Type 0 = Random Variable distribution
if type == 0:
self.variableConst[name] = mean
return self._PrintR('Variable %s defined as Constant with value=%f'
% (name, mean))
# Type 1 = Sampling distribution
elif type == 1:
# Not possible, break
exception = Exception('Error on %s: You can not use a constant value for sampling!'
% (distrib.upper()))
raise exception
else:
exception = Exception('Distribution %s set on variable %s is not recognized.'
% (distrib.upper(), name))
raise exception
# Setting distribution of variables
def CreateVar(self, name, distrib, mean, std=0, cv=None, par1=None, par2=None):
"""
Create a Random Variable
If it's used on ANSYS it need to be told, so after this use:
>>> mc.SetANSYSVar(name)
Parameters
----------
name : str, obligatory
Name of variable.
distrib : str, obligatory
Probabilistic variable distribution type.
For all distributions Mean and Std are related to Normal distribution
(the code determines the parameters for the desired distribution).
Available types are:
* gaussian (or gauss, normal);
* lognormal (or log, logn, ln, lognorm);
* gumbel (or gumb, type1);
* constant (or const) - Constant value (doesn't need std).
mean : float, obligatory
Standard mean of variable values.
std : float, optional
Standard deviation of variable. You must define it or cv for variables
that aren't constant, if both (cv and std) declared std will be used.
For LogNormal variables it's recommend to use CV!
cv : float, optional
Coeficient of Variation of variable. You must define it or std for variables
that aren't constant, if both (cv and std) declared std will be used.
For LogNormal variables it's recommend to use CV!
par1 and par2 : float, optional
Parameters for future implementations.
"""
self._VarDistrib(type=0, name=name, limst=None, distrib=distrib, mean=mean, std=std, cv=cv, par1=par1, par2=par2)
# Sampling definition
def SetRandomVarSampl(self, name, limst, distrib, mean, std=0, cv=None, par1=None, par2=None):
"""
Sets the sampling distribution of a variable to performn Importance
Sampling the simulations.
ATTENTION: When using ANSYS the weight of results from ANSYS variables
are determined using the weights of all ANSYS input variables.
Parameters
----------
name : str, obligatory
Name of variable.
limst : integer, obligatory for type=1 with more than 1 limit state
Limit state ID that will use current sampling distribution.
distrib : str, obligatory
Probabilistic variable distribution type.
For all distributions Mean and Std are related to Normal distribution
(the code determines the parameters for the desired distribution).
Available types are:
* gaussian (or gauss, normal);
* lognormal (or log, logn, ln, lognorm);
* gumbel (or gumb, type1);
* constant (or const) - Constant value (doesn't need std).
mean : float, obligatory
Standard mean of variable values.
std : float, optional
Standard deviation of variable. You must define it or cv for variables
that aren't constant, if both (cv and std) declared std will be used.
For LogNormal variables it's recommend to use CV!
cv : float, optional
Coeficient of Variation of variable. You must define it or std for variables
that aren't constant, if both (cv and std) declared std will be used.
For LogNormal variables it's recommend to use CV!
par1 and par2 : float, optional
Parameters for future implementations.
"""
if limst > (len(self.limstates)-1):
exception = Exception('Limit state %d is not created yet. Please create it before set sampling distribution.' % limst)
raise exception
self._VarDistrib(type=1, name=name, limst=limst, distrib=distrib, mean=mean, std=std, cv=cv, par1=par1, par2=par2)
def SetCorrel(self, var1, var2, correl):
"""
Set the correlation betwen two variables.
The values will be transformed by the Nataf process before running.
Parameters
----------
var1 : str, obligatory
First variable name.
var2 : str, obligatory
Second variable name.
correl : float, obligatory
Correlation betwen var1 and var2.
"""
# Set lower
var1 = var1.lower()
var2 = var2.lower()
# Verify if it's already created
if var1 not in self.variableDistrib:
exception = Exception('Variable "%s" is not declared yet. ' % var1 +
'Before set the correlation you must create the random ' +
'variable with CreateVar().')
raise exception
if var2 not in self.variableDistrib:
exception = Exception('Variable "%s" is not declared yet. ' % var2 +
'Before set the correlation you must create the random ' +
'variable with CreateVar().')
raise exception
if var1 == var2:
exception = Exception(
'You cannot change the correlation from a variable with itself.')
raise exception
if correl < -1 or correl > 1:
exception = Exception('Correlation must be a value betwen -1 and +1.')
raise exception
# Store correlations on correlation list self.corlist
self.corlist[var1, var2] = correl
self.corlist[var2, var1] = correl
self._PrintR('Correlation betwen \"%s\" and \"%s\" set as %f.' %
(var1, var2, correl))
def _SetControls(self, Ns, Nmaxcycles, CVPf=0.00, tolAdPt=False):
"""
Set the controls of simulation process.
Parameters
----------
Ns : integer, obligatory
Number of simulations performed on each cycle.
After each cycle the convergence of simualtion is verified.
When using Importance Sampling with Adaptive Sampling, after each
cycle the new sampling point will be determined.
Nmaxcycles : integer, obligatory
Maximum number of cycles to be performed, if CVPf is not reached on
Nmaxcycles the simulation will be interrupted.
CVPf : float, optional
Target value of Probability Failure Coefficient of Variation, when
reached the simulation stops.
tolAdPt : float or False, optional
Maximum relative tolerance for adaptive sampling point search.
If the value is "False" it disable adaptive sampling, simulations
will use always the user set point.
"""
if Ns >= 0 and Nmaxcycles >= 0 and CVPf >= 0 and (tolAdPt == False or tolAdPt >= 0):
# Save controls variable
self.controls['Ns'] = Ns
self.controls['Nmaxcycles'] = Nmaxcycles
self.controls['CVPf'] = CVPf
self.controls['tolAdPt'] = tolAdPt
self._PrintR('Process controls set as:')
self._PrintR(' Simulations per cycle: %d' % Ns)
self._PrintR(' Maximum number of cycles: %d' % Nmaxcycles)
self._PrintR(' Total maximum number of simulations: %2.3E' % (Ns*Nmaxcycles))
self._PrintR(' Target CVPf: %2.3E' % CVPf)
if tolAdPt != False:
self._PrintR(' Maximum relative tolerance for adaptive sampling point search: %2.3E' % tolAdPt)
else:
exception = Exception('Error while setting simulation controls. Please verify the set values.')
raise exception
def CreateLimState(self, equat, weight=1.00, userf=None):
"""
Create and Set a new limit state.*
The number ID of LimitStates are generated automatically starting at 0
for the first.
*** Current version supports only one limit state!**
ATTENTION: When using ANSYS the weight of results from ANSYS variables
are determined using the weights of all ANSYS input variables.
Parameters
----------
equat : str, obligatory
String with the equation of the limit state. It must be write as a
function of defined variables (In and Out).
weight : float, obligatory only with more than 1 limit state
The weight of current limit state, it determines how the simulations
are distributed betwen all the limit states.
The sum of all limit states must be 1.00, so, if there is just one
limit state it's weight should be 1.00
userf : function, optional
An user defined function that could be used inside the limit state
equation, called inside equat as ``userf()``. Each limit state has it's
own userf, but you can use the same Python function for all limit states.
For example, you can create a complex Python function with loops, ifs
and whatever for evaluate the R part of your limit state function
for a concrete beam. An example is showed after.
First example: if ANSYS returns the maximum load on a truss as variable
FxMAX, and applied loads to be tested are ``(g+q)*sin(theta)``, where
``g``, ``q``, theta are defined random variables created with ``CreateVar()``.
.. code-block:: python
mc.CreateLimState(equat='FxMAX-(g+q)*sin(theta)', weight=1.00)
Note that you can use math expressions as ``sin()``, ``cos()``, ``tan()``, ``sqrt()``
from Python math module inside the equation.
|
Second example: you have a steel bar in tension that hasn't hardening.
It's stress is a function of ``(def, fy, E)``, where ``def`` is current
deformation, ``fy`` is yield stress and ``E`` the elastic moduli,
you can create inside your code an function like:
.. code-block:: python
def stress(def, fy, E):
if def > fy/E:
return fy
else:
return def*E
And now defining ``userf=stress`` we can:
.. code-block:: python
mc.CreateLimState(equat='userf(def,fy,E)-q', weight=1.00, userf=stress)
where ``def``, ``fy``, ``E`` and ``q`` are random variables.
Note that the function inside the limit state equation should be
called as ``userf()`` with the parameters from ``stress``.
Or we can do the same using the functions instead of the string:
.. code-block:: python
mc.CreateLimState(equat=stress)
"""
if weight <= 0 or weight > 1.00:
exception = Exception('The weigth of limit state must be greater than zero and less equal to 1.00.')
raise exception
# Add current limit state to the list
act = len(self.limstates)
#---------------
#
# TAKE A LOOK HERE !
#
#
#
#
#
#
#
# For now just 1 limit state with weight=1
# this part prevents more than 1 limit states.
#
#
#
#
weight = 1
if act >= 1:
exception = Exception('Current version of ParAnsys does not support more than one limit states.')
raise exception
#else:
# self._PrintR('\n\nCurrent version of ParAnsys supports only one limit state, do not try to set another.\n\n')
#
#
#
#
#
#
#---------------
if(type(equat) == str):
# String equation
# Change equation to lowcase
equat = equat.lower()
else:
self._userf = None
# Fourth value limstates[act][3] will be the Ns for LS
self.limstates[act] = [equat, weight, userf, 0]
#
try:
self.samplingDistrib[act]
except:
self.samplingDistrib[act] = {}
# Verify the sum off all limit states until now
# itsn't possible to verify the equantion since we dont have ANSYS out values
tempsum = 0.00
for each in self.limstates:
tempsum += self.limstates[each][1]
if tempsum > 1.00:
exception = Exception('The sum of limit states weights exceeded 1.00, please verify the inserted values.')
raise exception
return self._PrintR('Limit state %d defined as "%s" with weigth %f, the sum of weights untill now is %f.' %(act, equat, weight, tempsum))
def SetANSYSVar(self, name):
"""
Mark a Random variable as ANSYS variable.
ATTENTION: When using ANSYS the weight of results from ANSYS variables
are determined using the weights of all ANSYS input variables.
Parameters
----------
name : str, obligatory
Name of variable.
"""
# Verify ANSYS object
if self._ANSYS == False:
exception = Exception('ANSYS not declared yet. Before set ANSYS '+
'variables you must define ANSYS properties with ANSYS(...).')
raise exception
# Verify the variable name
if name in ['', None, ' ']:
exception = Exception('You must define the name of the variable.')
raise exception
else:
name = name.lower()
if name not in self.variableDistrib and name not in self.variableConst:
exception = Exception('This variable name is not declared. '+
'Only Random variables can be set as ANSYS variables.\n'+
'Please use CreateVar() to declare it.')
raise exception
# Declare it on ansys object
self.ansys.CreateVarIn(name)
# If exists and isn't an ANSYS variable yet append to ansysvars
#if name not in self.ansysvars:
# self.ansysvars.append(name)
# return self._PrintR('Variable %s set as an ANSYS variable.' % name)
#else:
# return self._PrintR('Variable %s is already set as an ANSYS variable.' % name)
def _GenRandomVW(self, varDistrib, sampDist, NCValues_h):
"""
Internal function for Generate Random Values and it's Weights
(when sampDist not False)
Parameters
----------
varDistrib : float, obligatory
A list with the real distribution parameters,
[distrib, mean, std, par1, par2]
sampDist : float, obligatory
A list with the sampling distribution parameters, like varDistrib.
It also could be "False", in this case there is no sampling distribution.
NCValues_h : 1D np.array
INPUT Normalized correlated (or not) values from sampling distirbuition (h_X(x))
Returns
-------
[randval, weights, NCValues_f] where:
* randval : float
Random values generated.
* weights : float
Weight of each value, when sampDist being used.
* NCValues_f : 1D np.array
OUTPUT Normalized correlated (or not) values for real distribution (f_X(x))
"""
# Initial values
Nsi = len(NCValues_h)
randval = np.zeros(Nsi)
weights = np.zeros(Nsi)
NCValues_f = np.zeros(Nsi)
# Verify the existence of sampling distrib
if sampDist == False:
# Current distribution for generate values is varDistrib
curDist = varDistrib
else:
curDist = sampDist
# Generate it
# Gauss distribution
if curDist[0] == 'gauss':
# Sampling distribution values (X_h)
randval = curDist[1] + curDist[2]*NCValues_h
# Weights
# Not sampling
if sampDist == False:
NCValues_f = NCValues_h.copy()
weights[:] = 1
# Real distrib is gaussian:
elif varDistrib[0] == 'gauss':
# Real normal reduced correlated values (Z_f)
NCValues_f = (randval - varDistrib[1])/varDistrib[2]
# Weights
weights = (scipy.stats.norm.pdf(randval, varDistrib[1], varDistrib[2]) / scipy.stats.norm.pdf(NCValues_f, 0, 1)) \
/ (scipy.stats.norm.pdf(randval, curDist[1], curDist[2]) / scipy.stats.norm.pdf(NCValues_h, 0, 1))
# Real distrib is logn:
#elif varDistrib[0] is 'logn':
# eq for varDistrib
# qsiv = math.sqrt(math.log(1 + (varDistrib[2]/varDistrib[1])**2))
# lambdav = math.log(varDistrib[1]) - 0.5*qsiv**2
# weights = (1/(qsiv*randval)*scipy.stats.norm.pdf(np.log(randval), lambdav, qsiv)) \
# / scipy.stats.norm.pdf(randval, curDist[1], curDist[2])
else:
exception = Exception('For now it is not possible to use different distributions in the same var.')
raise exception
# Lognormal distribution
elif curDist[0] == 'logn':
# equiv std dev
qsic = math.sqrt(math.log(1 + (curDist[3])**2))
# equiv mean
lambdac = math.log(curDist[1]) - 0.5*qsic**2
# Random values
randval = np.exp(lambdac + qsic*NCValues_h)
#np.random.lognormal(lambdac, qsic, Nsi)
# Weights
# Not sampling
if sampDist == False:
NCValues_f = NCValues_h.copy()
weights[:] = 1
# Real distrib is logn:
elif varDistrib[0] == 'logn':
# eq for varDistrib
qsiv = math.sqrt(math.log(1 + (varDistrib[3])**2))
lambdav = math.log(varDistrib[1]) - 0.5*qsiv**2
# Real normal reduced correlated values (Z_f)
NCValues_f = (lambdac-lambdav+qsic*NCValues_h)/qsiv
weights = (scipy.stats.norm.pdf(np.log(randval), lambdav, qsiv)/qsiv/scipy.stats.norm.pdf(NCValues_f)) \
/ (scipy.stats.norm.pdf(np.log(randval), lambdac, qsic)/qsic/scipy.stats.norm.pdf(NCValues_h))
# old and wrong! who is s???
# scipy.stats.lognorm.pdf(x, s, loc, scale)
#weights = scipy.stats.lognorm.pdf(randval, s, lambdav, qsiv) \
# / scipy.stats.lognorm.pdf(randval, s, lambdac, qsic)
# Real distrib is gaussian:
#elif varDistrib[0] is 'gauss':
# # weights = pdfgauss/pdflogn
# weights = scipy.stats.norm.pdf(randval, varDistrib[1], varDistrib[2]) \
# / (scipy.stats.norm.pdf(np.log(randval), lambdac, qsic)/(qsic*randval))
else:
exception = Exception('For now it is not possible to use different distributions in the same var.')
raise exception
# Gumbel distribution
elif curDist[0] == 'gumbel':
# scale parm - eqv to std
sclc = math.sqrt(6)*curDist[2]/math.pi
# loc parm - equiv to mean
locc = curDist[1] - 0.57721*sclc
# Random values
randval = scipy.stats.gumbel_r.ppf(scipy.stats.norm.cdf(NCValues_h), loc=locc, scale=sclc)
#randval = np.random.gumbel(locc, sclc, Nsi)
# Weights
# Not sampling
if sampDist == False:
NCValues_f = NCValues_h.copy()
weights[:] = 1
# Real distrib is gumbel
elif varDistrib[0] == 'gumbel':
# eq for varDistrib
sclv = math.sqrt(6)*varDistrib[2]/math.pi
locv = varDistrib[1] - 0.57721*sclv
# Real normal reduced correlated values (Z_f)
NCValues_f = scipy.stats.norm.ppf(scipy.stats.gumbel_r.cdf(randval, loc=locv, scale=sclv))
weights = (scipy.stats.gumbel_r.pdf(randval, loc=locv, scale=sclv)/scipy.stats.norm.pdf(NCValues_f)) \
/ (scipy.stats.gumbel_r.pdf(randval, loc=locc, scale=sclc)/scipy.stats.norm.pdf(NCValues_h))
else:
exception = Exception('For now it is not possible to use different distributions in the same var.')
raise exception
# Return
return [randval, weights, NCValues_f]
def Run(self, Ns, Nmaxcycles, CVPf=0.00, tolAdPt=False):
"""
Run the Monte Carlo simulation.
Parameters
----------
Ns : integer, obligatory
Number of simulations performed on each cycle.
After each cycle the convergence of simualtion is verified.
When using Importance Sampling with Adaptive Sampling, after each
cycle the new sampling point will be determined.
Nmaxcycles : integer, obligatory
Maximum number of cycles to be performed, if CVPf is not reached on
Nmaxcycles the simulation will be interrupted.
CVPf : float, optional
Target value of Probability Failure Coefficient of Variation, when
reached the simulation stops.
tolAdPt : float or False, optional
Maximum relative tolerance for adaptive sampling point search.
If the value is "False" it disable adaptive sampling, simulations
will use always the user set point.
**Returns a dictionary with:**
* stnumb : integer
Status of solution, values can be found after this list.
* Pf : float
Probability of failure.
* Beta : float
Reliability index.
* CVPf : float
Coefficient of Variation of Probability of failure
* {SamplingPoints} : dictionary of dictionaries
Dictionary with sampling points used, or founded in case of
adaptive sampling, for each Variable on each Limit State.
(SamplingPoints[eachLS][eachVar])
* cycles : int
Number of cycles performed to obtain the solution.
* distparms : dictionary of dictionaries
Return mean (gMean) and standart deviation (gStd) of each limit state function.
**Status values:**
* 0: no problem;
* 1: warning, maximum of cycles reached with no convergence of CVPf;
* 99: undefined error!
"""
#---------------
#
#
#
# TAKE A LOOK HERE !
#
#
#
#
# For now just 1 limit state with weight=1
#
#
#
#
#
#
#
#---------------
#-----------------------------------------------------------------------
# Set the controls
self._SetControls(Ns=Ns, Nmaxcycles=Nmaxcycles, CVPf=CVPf, tolAdPt=tolAdPt)
#if self.controls == {}:
# exception = Exception('Before Run the Monte Carlo simulation you '+
# 'must define the controls of simulation '+
# 'process with SetControls().')
# raise exception
# Adaptive condition
if tolAdPt != False:
adapt = True
else:
adapt = False
# Time in the beggining
timei = time.time()
# It starts with undefined error
stnumb = 99
self._PrintR('\nStarting simulation cycles.')
#-----------------------------------------------------------------------
# Before all we have to set current
#
#
# For each cycle we need to generate all data, pass ANSYS data for
# self.ansys object, run it, get results, process monte carlo results,
# calibrate new point (if necessary)
#
#=======================================================================
# Before first Run:
# Create variables;
# Set initial values;
# Verify controls of process;
#
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
# Verify if ANSYS is being used
if self._ANSYS:
# Set Length
self.ansys.SetLength(Ns)
# Verify if the model is set
if self.ansys.Model == {}:
exception = Exception('Before running ANSYS you must define the model that will be analysed with SetANSYSModel().')
raise exception
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
# Sampling Point For each Limit State (dictionary of dictionaries)
# and Sampling Point properties (dic. of dics.)
#
#
#
self.SPForLS = {}
self.SPprops = {}
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
# Monte Carlo simulation controls of each LIMIT STATE
# (dictionary of dictionaries)
#
# These has one for each limit state on each cycle
self.MCControl_LS = {}
# Failure count for each limit state on each cycle
self.MCControl_LS['Nfi'] = {}
# Square Failure count for each limit state on each cycle
self.MCControl_LS['Nfi2'] = {}
# Cumulative Pf of each limit state on each cycle
self.MCControl_LS['Pf'] = {}
# CV of cumulative Pf for each cycle (Pf)
self.MCControl_LS['CVPf'] = {}
# Mean values of LS g(x)
self.MCControl_LS['gMean'] = {}
# Std.Dev values of LS g(x)
self.MCControl_LS['gStd'] = {}
# S1 and S2 for means and std devs (internal use)
gS1 = {}
gS2 = {}
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
# Monte Carlo simulation controls GENERAL
# (dictionary of arrays)
#
# These has one for each limit state on each cycle
self.MCControl = {}
# Failure count on each cycle
self.MCControl['Nfi'] = []
# Square Failure count on each cycle
self.MCControl['Nfi2'] = []
# Cumulative Pf on each cycle
self.MCControl['Pf'] = []
# Reliability index Beta
self.MCControl['Beta'] = []
# CV of cumulative Pf for each cycle (Pf)
self.MCControl['CVPf'] = []
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
# Run all limit states looking for settings and completing some
# dictionaries.
#
# Temp sum for verifying
sumNsi = 0
for eachLS in self.limstates:
# Start an empty dic.
self.SPForLS[eachLS] = {}
# Weigth values on W of dic.
self.SPForLS[eachLS]['W'] = 0
# Point on Pt of dict.
self.SPForLS[eachLS]['pt'] = {}
# Weight zero for initial point
# Set Ns for each limit state on self.limstates[eachLS]
Nsi = round(self.limstates[eachLS][1]*Ns)
sumNsi += Nsi
self.limstates[eachLS][3] = Nsi
# Empty arrays
self.MCControl_LS['Nfi'][eachLS] = []
self.MCControl_LS['Nfi2'][eachLS] = []
self.MCControl_LS['Pf'][eachLS] = []
self.MCControl_LS['CVPf'][eachLS] = []
self.MCControl_LS['gMean'][eachLS] = []
self.MCControl_LS['gStd'][eachLS] = []
# Start empty!
gS1[eachLS] = 0
gS2[eachLS] = 0
# Run all variables declared on each limit state to get Sampling Point
for eachVar in self.samplingDistrib[eachLS]:
# SPForLS it's a copy of sampling distribution to keep values in case of adaptive sampling.
self.SPForLS[eachLS]['pt'][eachVar] = list.copy(self.samplingDistrib[eachLS][eachVar])
## self.samplingDistrib[limst][name] = [distrib, mean, std]
# If sumNsi < Ns add simulations to last LS
if sumNsi < Ns:
self.limstates[eachLS][3] = Ns - (sumNsi - Nsi)
print('Is not possible to split Ns correctly betwen current limit states, '
+ 'last limit state received some extra simulations.')
# If sumNsi > Ns remove simulations from last LS
if sumNsi > Ns:
extra = sumNsi - Ns
self.limstates[eachLS][3] = self.limstates[eachLS][3] - extra
print('Is not possible to split Ns correctly betwen current limit states, '
+ 'last limit lost %d simulations.' % extra)
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
# Variables and Constants lengths
NInRandVars = len(self.variableDistrib)
#NInConstVars = len(self.variableConst)
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
# Equivalent Correlation matrix
# Create var id list
varId = {}
idx = 0
# For random variables
for eachVar in self.variableDistrib:
varId[eachVar] = idx
idx += 1
for eachVar in self.variableConst:
varId[eachVar] = idx
idx += 1
self.varId = varId
# Initial correlation Matrix
self.correlMat = np.eye(NInRandVars)
for each in self.corlist:
i = varId[each[0]]
j = varId[each[1]]
cor = self.corlist[each]
# Apply Nataf to transform the correlation
var1props = self.variableDistrib[each[0]]
var2props = self.variableDistrib[each[1]]
# Both are gauss
if (var1props[0] == 'gauss' and var2props[0] == 'gauss'):
cor = cor
# Both are LN
elif var1props[0] == 'logn' and var2props[0] == 'logn':
cv1 = var1props[3]
cv2 = var2props[3]
cor = cor*(math.log(1+cor*cv1*cv2) /
(cor*math.sqrt(math.log(1+cv1**2)*math.log(1+cv2**2))))
# Both are Gumbel
elif var1props[0] == 'gumbel' and var2props[0] == 'gumbel':
cor = cor*(1.064 - 0.069*cor + 0.005*cor**2)
# One is gauss and other is logn
elif (var1props[0] == 'gauss' and var2props[0] == 'logn') \
or (var2props[0] == 'gauss' and var1props[0] == 'logn'):
# who is logn?
if var1props[0] == 'logn':
cv = var1props[3]
else:
cv = var2props[3]
# cor is
cor = cor*cv/math.sqrt(math.log(1+cv**2))
# One is gauss and other is gumbel
elif (var1props[0] == 'gauss' and var2props[0] == 'gumbel') \
or (var2props[0] == 'gauss' and var1props[0] == 'gumbel'):
cor = 1.031*cor
# One is logn and other is gumbel
elif (var1props[0] == 'logn' and var2props[0] == 'gumbel') \
or (var2props[0] == 'logn' and var1props[0] == 'gumbel'):
# who is logn?
if var1props[0] == 'logn':
cv = var1props[3]
else:
cv = var2props[3]
# cor is
cor = cor*(1.029 + 0.001*cor + 0.014*cv + 0.004*cor**2 + 0.233*cv**2 - 0.197*cor*cv)
# Forbiden zone
else:
exception = Exception('When applying NATAF on variables \"%s\" and \"%s\" the variables ' +
'conditions wasn\'t possible.')
raise exception
# Save it!
self.correlMat[i, j] = cor
self.correlMat[j, i] = cor
# Lower Cholesky matrix
matL = np.linalg.cholesky(self.correlMat)
matCorInv = np.linalg.inv(self.correlMat)
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
#
# Cycle loop's
#
#
for cycle in range(1, 1+self.controls['Nmaxcycles']):
# 1+Ncycle because Python counts from 0
#-------------------------------------------------------------------
# Beggining of cycle
#
self._PrintR('\n---\nSimulation cycle %d of %d(cycle limit).'
% (cycle, self.controls['Nmaxcycles']))
#-------------------------------------------------------------------
#-------------------------------------------------------------------
# Generate Random values
#
# Store variables Values and Weights
# (dictionary of dictionaries)
varsValues = {}
# Generate random normal matrix with all values
normalValuesMatrix = np.random.normal(0.0, 1.0, (NInRandVars, Nsi))
# Apply correlation with x_c=L.x
normalCorrelatedMatrix_h = matL.dot(normalValuesMatrix)
normalCorrelatedMatrix_f = normalCorrelatedMatrix_h.copy()
# Run for each variable each limit state, of limit state get limit
# state size (Ns), if there is SamplDistrb generate Ns from
# sampling, else generate from RandomDistrib.
for eachVar in self.variableDistrib:
eachVar = eachVar.lower()
# For each variable has values and weights
varsValues[eachVar] = {}
# All values as 0 in the beggining
varsValues[eachVar]['values'] = np.zeros(Ns)
# All weights are 1 in the beggining
varsValues[eachVar]['weights'] = 1+np.zeros(Ns)
# Initial position on values array (of limit state values)
posi = 0
# Final position on values array (of limit state values)
posf = 0
for eachLS in self.limstates:
# Get posf
Nsi = self.limstates[eachLS][3]
posf = posi + Nsi
# Get True Random Distribution
varDistrib = self.variableDistrib[eachVar]
# Verify the existence of SamplingDistrib/SPForLS, if exists get it
if eachVar in self.SPForLS[eachLS]['pt']:
# Have sampling distribution
sampDist = self.SPForLS[eachLS]['pt'][eachVar]
else:
# Have no sampling distribution
sampDist = False
# Call _GenRandomVW to get values and weights
[varsValues[eachVar]['values'][posi:posf], varsValues[eachVar]['weights'][posi:posf], \
normalCorrelatedMatrix_f[varId[eachVar], posi:posf]] = \
self._GenRandomVW(varDistrib, sampDist, normalCorrelatedMatrix_h[varId[eachVar], posi:posf])
# Set posf as next posi
posi = posf
# Joint distributions weights
varsValues['__joint_all_w__'] = 1+np.zeros(Ns)
# Ratio of joint normal distributions (fX/hX) for Nataf process - if not using importance sampling it will be 1 !
for each in range(Ns):
# matCorInv
Z_f = normalCorrelatedMatrix_f[:, each]
Z_h = normalCorrelatedMatrix_h[:, each]
varsValues['__joint_all_w__'][each] = math.exp(-1/2*(Z_f.T).dot(matCorInv.dot(Z_f))+1/2*(Z_h.T).dot(matCorInv.dot(Z_h)))
# Now for constats variables
for eachVar in self.variableConst:
eachVar = eachVar.lower()
# For each variable has values and weights
varsValues[eachVar] = {}
# All values as 0 in the beggining
varsValues[eachVar]['values'] = Ns*[self.variableConst[eachVar]]
varsValues[eachVar]['weights'] = Ns*[1]
#-------------------------------------------------------------------
#-------------------------------------------------------------------
# If ANSYS is being used:
# Set variables values;
# Run ANSYS;
# Import results;
# Put results on varsValues
#
if self._ANSYS:
# Initialize results dic.
ansysRes = {}
# Set weigths as 1 for afeter do the product of all weights of
# ANSYS variables
ansysRes['weights'] = np.zeros(Ns)
ansysRes['weights'][:] = 1
# Runs ansys object looking for ANSYS variables
for eachVar in self.ansys.varInNames:
eachVar = eachVar.lower()
# Send value to ansys object
self.ansys.SetVarInValues(eachVar, varsValues[eachVar]['values'])
# Join the weight of variables to ansysRes['weights']
ansysRes['weights'] = ansysRes['weights'] * varsValues[eachVar]['weights']
# Run ANSYS
self.ansys.Run()
# Import results
ansysRes['values'] = self.ansys.GetVarOutValues()
# Put results and weights on varsValues
for eachVar in self.ansys.varOutNames:
#eachVar = eachVar.lower()
varsValues[eachVar.lower()] = {}
varsValues[eachVar.lower()]['values'] = ansysRes['values'][eachVar.upper()]
varsValues[eachVar.lower()]['weights'] = ansysRes['weights']
#-------------------------------------------------------------------
#print('ansysRes:')
#print(ansysRes)
#print('*varsValues:')
#print(varsValues)
#-------------------------------------------------------------------
# Eval each Limit State function
#
self._PrintR('Evaluating limit state functions.')
# Run all the limit states list (self.limstates[act] = [equat, weight, [], 0])
# Store positions of eachLS
posi = 0
posf = 0
# Vectors for store failures weights
# Igw: = 0, if not failed
# = weight, if failed
Igw = np.zeros(Ns)
for eachLS in self.limstates:
# Get range of LS
Nsi = self.limstates[eachLS][3]
posf = posi + Nsi
# For eachLS run eachSim(ulation) and evaluate equat from LS
for eachSim in range(posi, posf):
# Dictionary of values in this simulation and LS (used on eval)
varVal = {}
# Weigth of simulation (starts as 1)
#simWei = 1.00
simWei = varsValues['__joint_all_w__'][eachSim]
# Run all variables from variableDistrib, if it has SamplingDistirb
# it will be erased in next step...
for eachVar in self.variableDistrib:
eachVar = eachVar.lower()
varVal[eachVar] = varsValues[eachVar]['values'][eachSim]
# Run all variables from ANSYSOut and SamplingDistirbs
# Run eachVar of eachLS in eachSim getting value and weight
#for eachVar in self.limstates[eachLS][2]:
for eachVar in self.samplingDistrib[eachLS]:
eachVar = eachVar.lower()
varVal[eachVar] = varsValues[eachVar]['values'][eachSim]
simWei = simWei * varsValues[eachVar]['weights'][eachSim]
for eachVar in self.variableConst:
eachVar = eachVar.lower()
varVal[eachVar] = varsValues[eachVar]['values'][eachSim]
#----------------------------------------------------------
# if running ansys:
# Attention here: the ANSYS output variables has the weight
# of all input variables, so it mustn't be used, if used
# some weights will be applyed 2 times!
#
if self._ANSYS:
for eachVar in self.ansys.varOutNames:
eachVar = eachVar.lower()
varVal[eachVar] = varsValues[eachVar]['values'][eachSim]
# DO NOT REMOVE NEXT COMMENT!
##simWei = simWei * varsValues[eachVar]['weights'][eachSim]
# Evaluate each simulation with its LS equation
#Igw[eachSim] = eval(self.limstates[eachLS][0], globals(), varVal)
# Old solution:
#curSLSvalue = eval(self.limstates[eachLS][0], globals(), varVal)
# Test if limstate is a string or a function
if(type(self.limstates[eachLS][0]) == str):
varVal['userf'] = self.limstates[eachLS][2]
curSLSvalue = eval(self.limstates[eachLS][0], globals(), varVal)
else:
curSLSvalue = self.limstates[eachLS][0](**varVal)
# Count failures
if curSLSvalue <= 0:
Igw[eachSim] = simWei
else:
Igw[eachSim] = 0
# add curSLSvalue to gS1 and gS2 for mean and std
gS1[eachLS] += curSLSvalue**2
gS2[eachLS] += curSLSvalue
# Determine current mean and std.dev for LS
self.MCControl_LS['gMean'][eachLS].append(gS2[eachLS]/(Nsi*cycle))
self.MCControl_LS['gStd'][eachLS].append(math.sqrt((Nsi*cycle*gS1[eachLS]-gS2[eachLS]**2)/(Nsi*cycle*(Nsi*cycle-1))))
# Convergence for each Limit State
# Nf and Nf**2 (weighted)
csIgw = sum(Igw[posi:posf])
self.MCControl_LS['Nfi'][eachLS].append(csIgw)
csIgw2 = sum(Igw[posi:posf]**2)
self.MCControl_LS['Nfi2'][eachLS].append(csIgw2)
# Current Pf of limit state
cPfi = sum(self.MCControl_LS['Nfi'][eachLS])/(cycle*Nsi)
self.MCControl_LS['Pf'][eachLS].append(cPfi)
# Current CVPf of Limit state
sumNfi = sum(self.MCControl_LS['Nfi'][eachLS])
sumNfi2 = sum(self.MCControl_LS['Nfi2'][eachLS])
cCVPf = 1/cPfi * 1/math.sqrt((cycle*Nsi)*(cycle*Nsi-1)) \
*(sumNfi2 - 1/(cycle*Nsi)*(sumNfi)**2)**0.50
self.MCControl_LS['CVPf'][eachLS].append(cCVPf)
#self._PrintR('**Pf=%3.8E; CVPf=%f\n' % (cPfi, cCVPf))
# Set next LS range
posi = posf
# Convergence for entire simulation
# Nf and Nf**2 (weighted)
csIgw = sum(Igw)
self.MCControl['Nfi'].append(csIgw)
csIgw2 = sum(Igw**2)
self.MCControl['Nfi2'].append(csIgw2)
# Current Pf of limit state
cPfi = sum(self.MCControl['Nfi'])/(cycle*Ns)
self.MCControl['Pf'].append(cPfi)
# Current Beta
self.MCControl['Beta'].append(-scipy.stats.norm.ppf(cPfi))
# Current CVPf of Limit state
sumNfi = sum(self.MCControl['Nfi'])
sumNfi2 = sum(self.MCControl['Nfi2'])
cCVPf = 1/cPfi * 1/math.sqrt((cycle*Ns)*(cycle*Ns-1)) \
*(sumNfi2 - 1/(cycle*Ns)*(sumNfi)**2)**0.50
self.MCControl['CVPf'].append(cCVPf)
########################################
# It was used for debug/control
#print('varsValues:')
#print(varsValues)
#print('Igw:')
#print(Igw)
#print('csIgw:')
#print(csIgw)
#print('csIgw2:')
#print(csIgw2)
#print('cPfi:')
#print(cPfi)
#print('cCVPf:')
#print(cCVPf)
#print('---------------')
# Print main results
self._PrintR('\nSolution on Cycle %d with %3.3E simulations:' % (cycle, cycle*Ns))
self._PrintR('Pf=%2.4E \nBeta=%2.3f \nCVPf=%2.3f \n' % (cPfi, -scipy.stats.norm.ppf(cPfi), cCVPf))
# Print limit states mean and std dev
self._PrintR('Limit states means and standard deviations:')
for eachLS in self.limstates:
self._PrintR(' Limit state %d: mean=%.4E, std.dev.=%.4E.' % (eachLS, self.MCControl_LS['gMean'][eachLS][-1], self.MCControl_LS['gStd'][eachLS][-1]))
# Verify the convergence criteria for CVPf after 3rd cycle
# Avoid CVPf < 1E-5!
if cCVPf <= self.controls['CVPf'] and cycle > 3 and cCVPf > 1E-5:
self._PrintR('CVPf convergence criteria reached on cycle %d with %3.3E simulations.' % (cycle, cycle*Ns))
self._PrintR('Finalizing process.')
# Set status as 0 (no problem)
stnumb = 0
break
#-------------------------------------------------------------------
#-------------------------------------------------------------------
# Find new design point
#
if adapt == True:
self._PrintR('Evaluating new design point.')
# Initial Igw position
posi = 0
# Initial max relative error is 0, in the end will have the max
# value from adaption
maxrelerror = 0
# eachLS has its sampling distrib
for eachLS in self.limstates:
# Get range of LS
Nsi = self.limstates[eachLS][3]
# Final Igw position of LS
posf = posi + Nsi
# Copy of current sample variables (that will be the old sampling)
#oldSamp = {}
#oldSamp = self.SPForLS[eachLS]['pt']
# Old and Current simualtions weights
oldW = self.SPForLS[eachLS]['W']
curW = sum(Igw[posi:posf])
# Calibrate one-by-one each variable of each LS
self._PrintR('New design point for Limit State %d:' % eachLS)
for eachVar in self.SPForLS[eachLS]['pt']:
# Old and Current simulations point coord * simulations weights
oldPW = (self.SPForLS[eachLS]['pt'][eachVar][1] * self.SPForLS[eachLS]['W'])
curPW = sum(varsValues[eachVar]['values'][posi:posf] * Igw[posi:posf])
# New point
NP = (oldPW+curPW)/(oldW+curW)
# Print
self._PrintR(' %s = %3.5E' % (eachVar, NP))
# Relative error of current point abs(new-old)/new
relerror = abs(NP-self.SPForLS[eachLS]['pt'][eachVar][1])/NP
# The biggest will be maxrelerror
maxrelerror = max(maxrelerror, relerror)
# Save New Point
self.SPForLS[eachLS]['pt'][eachVar][1] = NP
# Future old weight
self.SPForLS[eachLS]['W'] = oldW + curW
# Print max point error of LS
self._PrintR('Max. relative error on sampling point search is %f.' % maxrelerror)
if maxrelerror <= self.controls['tolAdPt'] and cycle > 3:
self._PrintR('Sampling point search converged on cycle %d.' % cycle)
adapt = False
# Current LS Igw is Igw[posi:posf]
#Igw[posi:posf]
# loop on eachvar of eachlimstate
# X_i+1,j=sum(X_i,j*W_i)/sum(W_i,j)
#-------------------------------------------------------------------
#-------------------------------------------------------------------
# After CYCLEs
#
timef = time.time()
# Save last cycles
self.cycles = cycle
self.MCControl['ElapsedTime'] = ((timef-timei)/60)
self._PrintR('\n\n=======================================================================\n')
# Verify if stopped with no convergence
if cycle == self.controls['Nmaxcycles']:
stnumb = 1
self._PrintR(' WARNING:')
self._PrintR(' The process was finished after reach the limit of cycles without')
self._PrintR(' reach the expected CVPf.\n')
self._PrintR(' Total of simulations: %3.3E' % (Ns*cycle))
self._PrintR(' CV of Prob. of failure (CVPf): %2.3f' % (self.MCControl['CVPf'][-1]))
self._PrintR(' Probability of failure (Pf): %2.4E' % (self.MCControl['Pf'][-1]))
self._PrintR(' Reliability index (Beta): %2.3f' % (self.MCControl['Beta'][-1]))
# Confidence intervals: 10% = 1.645 | 5% = 1.960 | 1% = 2.576
resBetaSup = -scipy.stats.norm.ppf(cPfi*(1-1.960*self.MCControl['CVPf'][-1]))
resBetaInf = -scipy.stats.norm.ppf(cPfi*(1+1.960*self.MCControl['CVPf'][-1]))
self._PrintR(' For a 5% confidence interval: {:.3f} <= Beta <= {:.3f}'.format(resBetaInf, resBetaSup))
self._PrintR(' Elapsed time: %f minutes.' % (self.MCControl['ElapsedTime']))
# Print limit states mean and std dev
self._PrintR(' Final Limit States means and standard deviations:')
for eachLS in self.limstates:
self._PrintR(' Limit state %d: mean=%.4E, std.dev.=%.4E.' % (eachLS, self.MCControl_LS['gMean'][eachLS][-1], self.MCControl_LS['gStd'][eachLS][-1]))
self._PrintR('\n=======================================================================\n\n')
#-------------------------------------------------------------------
#-----------------------------------------------------------------------
# Send the return
#
# rsampdict is the dictionary with sampling points in each limit state
rsampdict = {}
# distparms is the dictionary with g(X) mean and std dev for each ls
distparms = {}
for eachLS in self.limstates:
rsampdict[eachLS] = {}
for eachVar in self.SPForLS[eachLS]['pt']:
rsampdict[eachLS][eachVar] = self.SPForLS[eachLS]['pt'][eachVar][1]
distparms[eachLS] = {}
distparms[eachLS]['gMean'] = self.MCControl_LS['gMean'][eachLS][-1]
distparms[eachLS]['gStd'] = self.MCControl_LS['gStd'][eachLS][-1]
# put all in a dict
self._stnumb = stnumb
finret = {}
finret['status'] = self._stnumb
finret['Pf'] = self.MCControl['Pf'][-1]
finret['Beta'] = self.MCControl['Beta'][-1]
finret['CVPf'] = self.MCControl['CVPf'][-1]
finret['SamplingPoints'] = rsampdict
finret['cycles'] = cycle
finret['distparms'] = distparms
return finret
#-----------------------------------------------------------------------
def GetSolutionControl(self, thing):
"""
Return values of Monte Carlo solution controllers.
Parameters
----------
thing: str, obligatory
Control that will be returned. Available things are listed below.
Available things
----------------
In function of N:
* 'N_Pf' = Probability of failure
* 'N_Beta' = Reliability index
* 'N_CVPf' = CV of Probability of failure
Returns
-------
2D numpy array of floats:
Each line has simulation number and requested value on this simulation.
"""
# Number of simulations on each cycle and cycles
Ns = self.controls['Ns']
cycles = self.cycles
if thing == 'N_Pf':
# N vs Pf
result = np.zeros([cycles, 2])
result[:, 0] = range(Ns, Ns*(cycles+1), Ns)
result[:, 1] = self.MCControl['Pf']
elif thing == 'N_Beta':
# N vs Beta
result = np.zeros([cycles, 2])
result[:, 0] = range(Ns, Ns*(cycles+1), Ns)
result[:, 1] = self.MCControl['Beta']
elif thing == 'N_CVPf':
# N vs CVPf
result = np.zeros([cycles, 2])
result[:, 0] = range(Ns, Ns*(cycles+1), Ns)
result[:, 1] = self.MCControl['CVPf']
else:
exception = Exception('Error while getting values of Monte Carlo solution control. '+
'"%s" is not recognized as a valid "thing".' % (thing))
raise exception
# return
self._PrintR('Returning values of "%s".' % thing)
return result
def ExportDataCSV(self, filename, description=None):
"""
Exports Simulation data to a CSV file.
Parameters
----------
filename : str, obligatory
Name of file that will receive the values, doesn't need the
extension ".csv", it will be placed automatically.
description : str, optional
A string that will be write in the beggining of the file.
"""
# Open file
filename = filename+'.csv'
try:
f = open(filename, 'wt')
except:
exception = Exception('Unable to open the %s file for write data.' % filename)
raise exception
else:
# Starts with sep=, for Microsoft Excel
f.write('sep=,\n')
# Description
if description != None:
f.write('%s\n\n' % description)
f.write('Input data:\n')
# Simulation Controllers:
f.write('Simulation Controllers:\n')
f.write(',Ns/Cycle:,%d\n' % self.controls['Ns'])
f.write(',MaxCycles:,%d\n' % self.controls['Nmaxcycles'])
f.write(',CVPf target:,%2.4f\n' % self.controls['CVPf'])
f.write(',tol. Adapt.:,%s\n' % str(self.controls['tolAdPt']))
f.write('\n')
# ANSYS Properties
if self._ANSYS:
f.write('\n')
f.write('ANSYS Properties:\n')
f.write(',ANSYS Model:\n')
f.write(',,Model:,%s\n' % self.ansys.Model['inputname'])
f.write(',,Extra files:,%s\n' % self.ansys.Model['extrafiles'])
f.write(',,Input dir.:,%s\n' % self.ansys.Model['directory'])
f.write(',ANSYS Input variables:\n')
for eachVar in self.ansys.varInNames:
f.write(',,%s\n' % eachVar)
f.write(',ANSYS Output variables:\n')
for eachVar in self.ansys.varOutNames:
f.write(',,%s\n' % eachVar)
f.write('\n')
# Random variables
f.write('Random variables:\n')
f.write(',Name, Distribution, Mean, Standard Deviation, CV, Par1, Par2\n')
for eachVar in self.variableDistrib:
values = self.variableDistrib[eachVar]
cmd = ',%s,%s' % (eachVar, values[0])
for eachVal in values[1:]:
cmd = '%s,%f' % (cmd, eachVal)
f.write('%s\n' % cmd)
f.write('\n')
# Constant variables
f.write('Constant variables:\n')
f.write(',Name,Value\n')
for eachVar in self.variableConst:
cmd = ',%s,%8.5E' % (eachVar, self.variableConst[eachVar])
f.write('%s\n' % cmd)
f.write('\n')
# Correlation Matrix
f.write('Correlation matrix:\n')
# First line with Varnames:
cmd = ','
idx = 0
for eachVar in self.varId:
# Just random variables!
if idx >= len(self.variableDistrib):
break
cmd = '%s,%s' % (cmd, eachVar)
idx += 1
cmd = '%s\n' % cmd
f.write(cmd)
# Matrix lines with first column as Varname
idx = 0
for eachLine in self.correlMat:
# WTF DID I DO? But it works very well... xD
# GO HORSE! GO!!
cmd = ',%s' % list(self.varId.keys())[idx]
for eachVal in eachLine:
cmd = '%s,%f' % (cmd, eachVal)
cmd = '%s\n' % cmd
f.write(cmd)
idx += 1
f.write('\n')
# Limit states and theirs Sampling Distributions
f.write('Limit States:\n')
for eachLS in self.limstates:
f.write(',Limit State %d:,"%s"\n' % (eachLS, self.limstates[eachLS][0]))
f.write(',,Weight:,%f\n' % self.limstates[eachLS][1])
f.write(',,Ns/Cycle:,%f\n' % self.limstates[eachLS][3])
f.write(',,Starting Sampling Distributions:\n')
f.write(',,,Name, Distribution, Mean, Standard Deviation, CV, Par1, Par2\n')
for eachVar in self.samplingDistrib[eachLS]:
values = self.samplingDistrib[eachLS][eachVar]
cmd = ',,,%s,%s' % (eachVar, values[0])
for eachVal in values[1:]:
cmd = '%s,%f' % (cmd, eachVal)
f.write('%s\n' % cmd)
f.write('\n')
# Results part
f.write('\nResults:\n')
# Final results
f.write('Monte Carlo results:\n')
f.write(',Exit status:,%d,\n' % self._stnumb)
f.write(',Total of simulations:,%3.3E\n' % (self.cycles*self.controls['Ns']))
f.write(',Probability of failure (Pf):,%2.4E\n' % self.MCControl['Pf'][-1])
f.write(',Reliability Index (Beta):,%2.3f\n' % self.MCControl['Beta'][-1])
f.write(',CV of Prob. of failure (CVPf):,%2.3f\n' % self.MCControl['CVPf'][-1])
f.write(',Elapsed time (minutes):,%4.3f\n' % self.MCControl['ElapsedTime'])
f.write('\n')
# Final Sampling Point
if self.controls['tolAdPt'] != False:
f.write('Final sampling point:\n')
for eachLS in self.SPForLS:
f.write(',Limit State %d:\n' % (eachLS))
for eachVar in self.SPForLS[eachLS]['pt']:
f.write(',,%s,%f\n' % (eachVar, self.SPForLS[eachLS]['pt'][eachVar][1]))
f.write('\n')
# Convergence on each Cycle
f.write('\nProcess Convergence:\n')
result = np.zeros([self.cycles, 4])
result[:, 0] = range(self.controls['Ns'], self.controls['Ns']*(self.cycles+1), self.controls['Ns'])
result[:, 1] = self.MCControl['Pf']
result[:, 2] = self.MCControl['Beta']
result[:, 3] = self.MCControl['CVPf']
f.write(',N,Pf,Beta,CVPf\n')
for eachLine in result:
f.write(',%d, %2.4E, %2.3f, %2.3f\n' % \
(eachLine[0],eachLine[1],eachLine[2],eachLine[3]))
# End
f.close()
self._PrintR('Simulation data exported to "%s".' % filename)
def Graph(self, things, show=True, savefile=False):
"""
Generate graphics of Monte Carlo solution controllers.
Things can be a list of data that will be ploted in the same figure, the
figure doesn't need to be opened, it could be just saved, or just opened.
Parameters
----------
things : list of strings, obligatory
List of data that will be ploted in the same figure. Available things
are listed below.
show : bool, optional
Sinalize if figure should be opened.
Defaults to True.
savefile : str/bool, optional
If it's False doesn't save anything.
If it's a string it will be used as directory+name that figure will
have, it shouldn't have extension, since it will be SVG.
Defaults to False.
Available things
----------------
With N as horizontal axis:
* 'N_Pf' = Probability of failure
* 'N_Beta' = Reliability index
* 'N_CVPf' = CV of Probability of failure
"""
import matplotlib.pyplot as plt
# Verify if things is a list, if isn't put on a list
if type(things) == list:
nplots = len(things)
things = things
else:
nplots = 1
things = [things]
idx = 0
# Create figure and plots
plots = plt.subplots(nplots, 1)
plt.figure(num=1, figsize=[4,8])
plt.subplots_adjust(hspace = 0.45, left = 0.15, right = 0.95, \
top = 0.95, bottom = 0.12)
# Now run all on the list
for eachPlot in things:
idx += 1
if eachPlot == 'N_CVPf':
xlab = 'Simulations'
ylab = 'CVPf'
elif eachPlot == 'N_Pf':
xlab = 'Simulations'
ylab = 'Probability of failure'
elif eachPlot == 'N_Beta':
xlab = 'Simulations'
ylab = r'Reliability index ($\beta$)'
else:
exception = Exception('Error while getting values of Monte Carlo solution control. '+
'"%s" is not recognized as a valid "thing".' % (eachPlot))
raise exception
if nplots > 1:
plots[1][idx-1].plot(self.GetSolutionControl(eachPlot)[:,0], self.GetSolutionControl(eachPlot)[:,1])
plots[1][idx-1].set_ylabel(ylab)
plots[1][idx-1].grid(True)
# Just last has xlabel
plots[1][-1].set_xlabel(xlab)
else:
plots[1].plot(self.GetSolutionControl(eachPlot)[:,0], self.GetSolutionControl(eachPlot)[:,1])
plots[1].set_ylabel(ylab)
plots[1].grid(True)
plots[1].set_xlabel(xlab)
# Save, or not
if savefile != False:
file = savefile+'.svg'
self._PrintR('Saving figure as "%s".' % file)
plt.savefig(file, transparent=True)
# Show, or not
if show == True:
plt.show()
|
dutitello/parAnsys
|
examples/AngTang_FORM_610.py
|
<reponame>dutitello/parAnsys
"""
Running example 6.10 from Ang & Tang 1984
"""
import paransys
# Call ParAnsys
form = paransys.FORM()
# Console log On
form.Info(True)
# Create random variables
form.CreateVar('y', 'logn', 40, cv=0.125)
form.CreateVar('z', 'logn', 50, cv=0.050)
form.CreateVar('m', 'gumbel', 1000, cv=0.200)
# Set correlation
form.SetCorrel('y', 'z', 0.40)
# Create limit state
form.SetLimState('y*z-m')
# Run
values = form.Run(dh=0.001, meth='iHLRF', tolRel=1E-6)
form.ExportDataCSV('AngTang-FORM-610', 'Comentarios?')
|
dutitello/parAnsys
|
examples/ANSYS-Cantilever.py
|
<reponame>dutitello/parAnsys
"""
In this example we just connect ANSYS to Python by PARANSYS.
Here some parameters need to be set:
- form.ANSYS:
- exec_loc = ansys executable location
- run_location = where ansys will work (an empty dir)
- form.SetANSYSModel:
- directory = where is the file BeckCantileverAPDL.inp
"""
import paransys
import numpy as np
import pandas as pd
#===========================================================================================
# Analysis setup
#
# Call ANSYS
ans = paransys.ANSYS(exec_loc='C:\\Program Files\\ANSYS Inc\\v194\\ansys\\bin\\winx64\\ansys194.exe',
run_location='C:\\Temp\\wk',
jobname='file', nproc=4, override=True, cleardir=False, add_flags='')
# Activate the output
ans.Info(True)
# Set APDL model
ans.SetModel(inputname='BeckCantileverAPDL.inp', extrafiles=[], directory='.')
# Set input parameters
ans.CreateVarIn('q')
ans.CreateVarIn('l')
ans.CreateVarIn('b')
ans.CreateVarIn('h')
# Set output parameters
ans.CreateVarOut('stress')
# Set analysis lenght (number of tests)
ans.SetLength(3)
#===========================================================================================
# Set values and Run! If the model need to run many times just this part need to change!
#
# We are doing 3 analysis using 4 variables, so we need to create 4 NumPy arrays with the
# values that we will be testing at each simulation, like in this table:
# ____________________________________________
# | Sim\Var | q | l | b | h |
# | Sim #1 | 1.15 | 60.0 | 4.0 | 1.0 |
# | Sim #2 | 1.15 | 62.0 | 4.0 | 1.0 |
# | Sim #3 | 1.15 | 60.0 | 4.0 | 1.2 |
# |__________|_______|________|_______|_______|
#
# Values at (Sim#1, Sim#2, Sim#3)
q = np.array([1.15, 1.15, 1.15])
l = np.array([60.0, 62.0, 60.0])
b = np.array([4.00, 4.00, 4.00])
h = np.array([1.00, 1.00, 1.20])
# Passing it to ANSYS
ans.SetVarInValues('q', q)
ans.SetVarInValues('l', l)
ans.SetVarInValues('b', b)
ans.SetVarInValues('h', h)
# Run baby, RUN!
ans.Run()
# Now we get the results in a dictionary
results = ans.GetVarOutValues()
# Print all results
print(results)
# The output parameters always came in UPPER CASE because of ANSYS is made in FORTRAN77 (I think it's because of that!)
#
# NOW IT'S JUST FUN!
# Print just stresses
print(results['STRESS'])
# Put all in a Pandas DataFrame:
df = pd.DataFrame({'q': q, 'l': l, 'b': b, 'h': h, 'stress': results['STRESS']})
print(df)
# Derivatives of dStress/dl and dStress/dh:
dSdl = (df['stress'][1]-df['stress'][0])/(df['l'][1]-df['l'][0])
print(f'dStress/dl = {dSdl}')
dSdh = (df['stress'][2]-df['stress'][0])/(df['h'][2]-df['h'][0])
print(f'dStress/dh = {dSdh}')
#===========================================================================================
# Bonus:
# If you have to change variables names, APDL model or anything beside the values
# you may have to use:
#
# To clear everything after paransys.ANSYS():
# It will erase the model, variables names and values.
#ans.ClearAll()
#
# To clear just the variables values:
#ans.ClearValues()
#
#===========================================================================================
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.