blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3a8cc33cd2d2757f1c27da8c048ce5fa539d91f6
|
531c47c15b97cbcb263ec86821d7f258c81c0aaf
|
/sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2019_04_01/operations/_webhooks_operations.py
|
4ae54819faf2034d91ecade21e580da2c75800ef
|
[
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] |
permissive
|
YijunXieMS/azure-sdk-for-python
|
be364d3b88204fd3c7d223df23756386ff7a3361
|
f779de8e53dbec033f98f976284e6d9491fd60b3
|
refs/heads/master
| 2021-07-15T18:06:28.748507
| 2020-09-04T15:48:52
| 2020-09-04T15:48:52
| 205,457,088
| 1
| 2
|
MIT
| 2020-06-16T16:38:15
| 2019-08-30T21:08:55
|
Python
|
UTF-8
|
Python
| false
| false
| 34,174
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class WebhooksOperations(object):
"""WebhooksOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: The client API version. Constant value: "2017-10-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-10-01"
self.config = config
def get(
self, resource_group_name, registry_name, webhook_name, custom_headers=None, raw=False, **operation_config):
"""Gets the properties of the specified webhook.
:param resource_group_name: The name of the resource group to which
the container registry belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param webhook_name: The name of the webhook.
:type webhook_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Webhook or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.containerregistry.v2019_04_01.models.Webhook or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'registryName': self._serialize.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
'webhookName': self._serialize.url("webhook_name", webhook_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Webhook', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/webhooks/{webhookName}'}
def _create_initial(
self, resource_group_name, registry_name, webhook_name, webhook_create_parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'registryName': self._serialize.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
'webhookName': self._serialize.url("webhook_name", webhook_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(webhook_create_parameters, 'WebhookCreateParameters')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Webhook', response)
if response.status_code == 201:
deserialized = self._deserialize('Webhook', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create(
self, resource_group_name, registry_name, webhook_name, webhook_create_parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Creates a webhook for a container registry with the specified
parameters.
:param resource_group_name: The name of the resource group to which
the container registry belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param webhook_name: The name of the webhook.
:type webhook_name: str
:param webhook_create_parameters: The parameters for creating a
webhook.
:type webhook_create_parameters:
~azure.mgmt.containerregistry.v2019_04_01.models.WebhookCreateParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns Webhook or
ClientRawResponse<Webhook> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.containerregistry.v2019_04_01.models.Webhook]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.containerregistry.v2019_04_01.models.Webhook]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
webhook_name=webhook_name,
webhook_create_parameters=webhook_create_parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('Webhook', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/webhooks/{webhookName}'}
def _delete_initial(
self, resource_group_name, registry_name, webhook_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'registryName': self._serialize.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
'webhookName': self._serialize.url("webhook_name", webhook_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, registry_name, webhook_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes a webhook from a container registry.
:param resource_group_name: The name of the resource group to which
the container registry belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param webhook_name: The name of the webhook.
:type webhook_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
webhook_name=webhook_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/webhooks/{webhookName}'}
def _update_initial(
self, resource_group_name, registry_name, webhook_name, webhook_update_parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.update.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'registryName': self._serialize.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
'webhookName': self._serialize.url("webhook_name", webhook_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(webhook_update_parameters, 'WebhookUpdateParameters')
# Construct and send request
request = self._client.patch(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Webhook', response)
if response.status_code == 201:
deserialized = self._deserialize('Webhook', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update(
self, resource_group_name, registry_name, webhook_name, webhook_update_parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Updates a webhook with the specified parameters.
:param resource_group_name: The name of the resource group to which
the container registry belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param webhook_name: The name of the webhook.
:type webhook_name: str
:param webhook_update_parameters: The parameters for updating a
webhook.
:type webhook_update_parameters:
~azure.mgmt.containerregistry.v2019_04_01.models.WebhookUpdateParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns Webhook or
ClientRawResponse<Webhook> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.containerregistry.v2019_04_01.models.Webhook]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.containerregistry.v2019_04_01.models.Webhook]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._update_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
webhook_name=webhook_name,
webhook_update_parameters=webhook_update_parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('Webhook', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/webhooks/{webhookName}'}
def list(
self, resource_group_name, registry_name, custom_headers=None, raw=False, **operation_config):
"""Lists all the webhooks for the specified container registry.
:param resource_group_name: The name of the resource group to which
the container registry belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Webhook
:rtype:
~azure.mgmt.containerregistry.v2019_04_01.models.WebhookPaged[~azure.mgmt.containerregistry.v2019_04_01.models.Webhook]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'registryName': self._serialize.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.WebhookPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/webhooks'}
def ping(
self, resource_group_name, registry_name, webhook_name, custom_headers=None, raw=False, **operation_config):
"""Triggers a ping event to be sent to the webhook.
:param resource_group_name: The name of the resource group to which
the container registry belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param webhook_name: The name of the webhook.
:type webhook_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: EventInfo or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.containerregistry.v2019_04_01.models.EventInfo or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.ping.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'registryName': self._serialize.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
'webhookName': self._serialize.url("webhook_name", webhook_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('EventInfo', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
ping.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/webhooks/{webhookName}/ping'}
def get_callback_config(
self, resource_group_name, registry_name, webhook_name, custom_headers=None, raw=False, **operation_config):
"""Gets the configuration of service URI and custom headers for the
webhook.
:param resource_group_name: The name of the resource group to which
the container registry belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param webhook_name: The name of the webhook.
:type webhook_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: CallbackConfig or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.containerregistry.v2019_04_01.models.CallbackConfig or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get_callback_config.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'registryName': self._serialize.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
'webhookName': self._serialize.url("webhook_name", webhook_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CallbackConfig', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_callback_config.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/webhooks/{webhookName}/getCallbackConfig'}
def list_events(
self, resource_group_name, registry_name, webhook_name, custom_headers=None, raw=False, **operation_config):
"""Lists recent events for the specified webhook.
:param resource_group_name: The name of the resource group to which
the container registry belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param webhook_name: The name of the webhook.
:type webhook_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Event
:rtype:
~azure.mgmt.containerregistry.v2019_04_01.models.EventPaged[~azure.mgmt.containerregistry.v2019_04_01.models.Event]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_events.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'registryName': self._serialize.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
'webhookName': self._serialize.url("webhook_name", webhook_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.EventPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list_events.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/webhooks/{webhookName}/listEvents'}
|
[
"laurent.mazuel@gmail.com"
] |
laurent.mazuel@gmail.com
|
58fc33a5d4efa84fc4525ebd70ca5e1600944b5a
|
a5d712b886ed70edc1790536be3552b90802d84a
|
/env/lib/python3.6/abc.py
|
155573ae0da6959cc99617821b93bc0980d37c4a
|
[] |
no_license
|
NehemiasEC/django_react_full_stack
|
21e1a390c9910c221869cd20f5e0f953e469e621
|
ac8db7d07a023362cdae070cc5ed6feeaedfc79f
|
refs/heads/master
| 2020-03-26T22:50:08.982883
| 2018-08-21T01:42:10
| 2018-08-21T01:42:10
| 145,490,603
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 47
|
py
|
/home/nehemiasec/anaconda3/lib/python3.6/abc.py
|
[
"neli92santscz@gmail.com"
] |
neli92santscz@gmail.com
|
dc16e1aa50408d04cc565b1a879caf73e058ce13
|
20adcce3dd974d9fe8270743aa0161a9d55fa9c7
|
/test.py
|
df32903df1fa6c4e3b4dba9352dddf361fc8d13c
|
[] |
no_license
|
AlexDel/clause_handler
|
3c23c4974be858ae65b06b251b613f617fb0e711
|
e7ea540f95680135f30cff14c768f0e1fbf35c68
|
refs/heads/master
| 2020-04-05T08:39:30.331361
| 2012-11-16T08:58:48
| 2012-11-16T08:58:48
| 156,723,557
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 685
|
py
|
# coding=utf-8
def test():
#берем тесты из csv-файла
tests = []
for row in csv.reader(open('test_set/DB_1000.csv'), delimiter=';'):
#переводим в байты из кодировки cp1251
tests.append(tuple([r.decode('utf8') for r in row]))
return tests
o = 0
for t in tests[1:]:
s1 = split_to_clauses(sentence = t[1], language='ru')
s2 = split_to_clauses(sentence = t[0], language='en')
if len(s2['clauses']) > 1:
print str(len(s2['clauses'])) + str(s2['clauses'])
for i,s in enumerate(s1['clauses']):
print i
print s.encode('utf8')
print '\n'
o += 1
print o
|
[
"verbalab@veralab-laptop"
] |
verbalab@veralab-laptop
|
69657370afa71482dad27666f26a61cd6136a9e6
|
4e7742153afc6695de1838e352f2ba99bccc9e2e
|
/genImg.py
|
8ed29169bde7e91312d3f573d83f49732cce94ca
|
[] |
no_license
|
gottfriede/SFM-simulation
|
77c523de2e652cdc3c5ac752bd65ad4361077468
|
d0bf10d8aab54c08b0497595cdb1954d4cf5d0ad
|
refs/heads/main
| 2023-01-30T23:45:28.093229
| 2020-12-01T03:56:56
| 2020-12-01T03:56:56
| 316,510,278
| 10
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,191
|
py
|
import openpyxl
from openpyxl import load_workbook
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
plt.rcParams['font.sans-serif'] = ['KaiTi'] # 指定默认字体
plt.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'-'显示为方块的问题
if __name__ == '__main__' :
x = []
y = []
z = []
for i in [10,20,30,40] :
for j in range(4) :
x.append(i)
for i in range(4) :
for j in [1,2,3,4] :
y.append(j)
workbook = load_workbook('仿真结果.xlsx')
worksheet = workbook['Sheet1']
i = 10
while i <= 37 :
z.append(float(worksheet.cell(i, 5).value))
z.append(float(worksheet.cell(i+1, 5).value))
z.append(float(worksheet.cell(i+2, 5).value))
z.append(float(worksheet.cell(i+3, 5).value))
i = i + 8
# print(x)
# print(y)
# print(z)
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_trisurf(x, y, z, linewidth=0.2, antialiased=True)
ax.set_xlabel('房间人数')
ax.set_ylabel('出口宽度')
ax.set_zlabel('撤离时间')
plt.show()
|
[
"noreply@github.com"
] |
gottfriede.noreply@github.com
|
8b7fe62e7a52be37926c594a6e45bcb003570e69
|
44c4e5b10d7e1eb1d154bce602e2e863eb15ae14
|
/opensea.py
|
fc74e332d627dcef83a901db0f7a5a19c2dfc1da
|
[] |
no_license
|
holyverse/OpenSea-get-MetaData
|
d0b2639291233cf80a073f6cd12034c18994be4e
|
1f0c4afeb941992c54e45d77c5a2e7286908dec1
|
refs/heads/main
| 2023-07-24T22:54:20.674334
| 2021-09-05T07:34:46
| 2021-09-05T07:34:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,269
|
py
|
import requests
import csv
import time
def get_leaves(item, key=None):
if isinstance(item, dict):
leaves = []
for i in item.keys():
leaves.extend(get_leaves(item[i], i))
return leaves
elif isinstance(item, list):
leaves = []
for i in item:
leaves.extend(get_leaves(i, key))
return leaves
else:
return [(key, item)]
def main():
output_file = time.strftime("%Y%m%d-%H%M%S") + ".csv"
s = requests.Session()
HEADER = {
"Referer": "https://opensea.io/",
"sec-ch-ua": '"Chromium";v="92", " Not A;Brand";v="99", "Google Chrome";v="92"',
"sec-ch-ua-mobile": "?0",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36",
"X-API-KEY": "2f6f419a083c46de9d83ce3dbe7db601"
}
opensea_url = "https://api.opensea.io/api/v1/assets"
offset = 0
try:
write_header = True
while True:
querystring = {
"asset_contract_address": "0x3bf2922f4520a8ba0c2efc3d2a1539678dad5e9d",
"order_direction": "desc",
"offset": offset, "limit": "50"}
r = s.get(opensea_url, headers=HEADER, params=querystring)
if r.status_code == 200:
result = r.json()
if result != {}:
last_results = []
data = result["assets"]
for asset in data:
metadata = {}
metadata["name"] = asset["name"]
metadata["image"] = asset["image_original_url"]
assets = sorted(asset["traits"], key=lambda k: k['trait_type'])
for idx, trait in enumerate(assets):
metadata["attributes" + str(idx)] = {}
metadata["attributes" + str(idx)]["trait_type"] = trait["trait_type"]
metadata["attributes" + str(idx)]["value"] = trait["value"]
try:
metadata["attributes" + str(idx)]["max_value"] = trait["max_value"]
except:
pass
# metadata["attributes"] = asset["traits"]
last_results.append(metadata)
with open(output_file, 'a', newline='', encoding="utf8") as f_output:
for result in last_results:
csv_output = csv.writer(f_output)
leaf_entries = get_leaves(result)
if write_header:
csv_output.writerow([k for k, v in leaf_entries])
write_header = False
csv_output.writerow([v for k, v in leaf_entries])
else:
break
offset = offset + 50
# print(result)
# print("done!")
else:
print("get error id")
break
except Exception as e:
print(str(e))
if __name__ == "__main__":
main()
|
[
"89365132+Artsiomliaver@users.noreply.github.com"
] |
89365132+Artsiomliaver@users.noreply.github.com
|
9fbc644b412de08113816ddaddca55d050d6e252
|
e5b7ec56dab01e7ac65694fb0b9f51004285479e
|
/got_backend/gotapp/models/uczestnictwo.py
|
02f7acc0186473d16c9d2ed428cfcaf92215dffb
|
[] |
no_license
|
MIKE432/project-po-backend
|
9a2c45a62c61e6fbd647702c005c86d320857482
|
a37972119d856eef89c5adccac891f35ebb8a140
|
refs/heads/master
| 2023-02-27T00:27:19.089258
| 2021-01-30T01:50:56
| 2021-01-30T01:50:56
| 330,274,405
| 0
| 0
| null | 2021-02-01T13:57:00
| 2021-01-16T23:01:01
|
Python
|
UTF-8
|
Python
| false
| false
| 626
|
py
|
from django.db import models
from rest_framework import serializers
class Uczestnictwo(models.Model):
turysta = models.ForeignKey('gotapp.Osoba', on_delete=models.CASCADE)
wycieczka = models.ForeignKey('gotapp.Wycieczka', on_delete=models.CASCADE)
opis = models.CharField(max_length=2000, blank=True, null=True)
class Meta:
unique_together = (('turysta', 'wycieczka'),)
def __str__(self) -> str:
return f'{str(self.turysta)} ---> {str(self.wycieczka)}'
class UczestnictwoSerializer(serializers.ModelSerializer):
class Meta:
model = Uczestnictwo
fields = '__all__'
|
[
"lokigameplayer@gmail.com"
] |
lokigameplayer@gmail.com
|
a4c53d61f3f8cec5029c106bb11270a3a472841b
|
1b6e73e154beed974e7611c177e057735b439f78
|
/prueba2/prueba2.py
|
552f1833b1d29b2a113c0fe164a15ec0ec20cd4e
|
[] |
no_license
|
crist-start/compilador-pseudo-c
|
d1f9dfe17cad1126724aea7a955f6cc945607e9f
|
473ee2947679d22bc4adf7ff2f90d6b6261b4730
|
refs/heads/master
| 2022-11-06T09:47:29.042724
| 2020-06-22T18:29:09
| 2020-06-22T18:29:09
| 270,763,931
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 69
|
py
|
def triangulo(a=0):
for i in range(a):
print("*"*(i+1))
|
[
"cristart.cs@gmail.com"
] |
cristart.cs@gmail.com
|
195e826a50c2a37aca713f35e1a54d9456d070fa
|
07ea5504b7de8a67cb93eb7db1f8ea71a4642f37
|
/transform.py
|
77c6b7c1f971d6189f47312a8a3abc948f628943
|
[] |
no_license
|
allisterke/color-transform
|
9d84ef160e5e98960f3bec03b47643c249060de7
|
5527374f51e22a8fee9c086f854b9eb71b480f5c
|
refs/heads/master
| 2021-01-12T15:18:09.264698
| 2016-10-24T03:49:29
| 2016-10-24T03:49:29
| 71,749,802
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,900
|
py
|
from __future__ import division
import numpy as np
from scipy.misc import imread, imresize, imsave
from collections import OrderedDict
color_path = 'image2.jpeg'
content_path = 'image1.jpeg'
color_unit_pixel = 8
content_unit_pixel = 16
max_size = 1042
def channel_to_rgb(image, unit_pixel):
pixel_num = 256 // unit_pixel
height = image.shape[0]
width = image.shape[1]
channel_num = image.shape[2]
image_list = image.reshape([-1, channel_num])
image_rgb = np.array([0] * (height * width))
for idx in range(height * width):
channel = image_list[idx]
channel_r = int(channel[0]) // unit_pixel
channel_g = int(channel[1]) // unit_pixel
channel_b = int(channel[2]) // unit_pixel
rgb = channel_r * pixel_num * pixel_num + channel_g * pixel_num + channel_b
image_rgb[idx] = rgb
return image_rgb.reshape([height, width])
def rgb_to_channel(rgb, unit_pixel):
pixel_num = 256 // unit_pixel
channel_r = int(rgb) // (pixel_num * pixel_num)
channel_g = (int(rgb) - (channel_r * pixel_num * pixel_num)) // pixel_num
channel_b = int(rgb) - (channel_r * pixel_num * pixel_num) - (channel_g * pixel_num)
# image[idx] = np.array([channel_r * unit_pixel, channel_g * unit_pixel, channel_b * unit_pixel])
return np.array([channel_r, channel_g, channel_b])
def load_image(image_path, unit_pixel):
image = imread(image_path)
height = image.shape[0]
width = image.shape[1]
if height >= width:
width = int(float(width) / float(height) * max_size)
height = max_size
else:
height = int(float(height) / float(width) * max_size)
width = max_size
image = imresize(image, [height, width])
image_rgb = channel_to_rgb(image, unit_pixel)
color_dict = dict()
color_idx = dict()
color_idx_reverse = dict()
image_list = np.reshape(image_rgb, [image_rgb.size])
# print image_list[0]
for idx in range(image_list.size):
rgb = image_list[idx]
if rgb in color_dict.keys():
color_dict[rgb] += 1
else:
color_dict[rgb] = 1
if idx % 20000 == 0:
print "%d pixels complete. " % idx
color_dict = OrderedDict(sorted(color_dict.items(), key=lambda x: x[1], reverse=True))
count = 0
for rgb in color_dict:
# print rgb
color_idx[rgb] = count
color_idx_reverse[count] = rgb
count += 1
return image_rgb, color_idx, color_idx_reverse
def pixel_trans(image_rgb, image_idx, color_idx_reverse):
height = image_rgb.shape[0]
width = image_rgb.shape[1]
image_rgb = image_rgb.reshape([image_rgb.size])
generate_image = np.array([0, 0, 0] * image_rgb.size).reshape(image_rgb.size, 3)
image_num = len(image_idx)
color_num = len(color_idx_reverse)
num = min(image_num, color_num)
for idx in range(image_rgb.size):
rgb = int(image_rgb[idx])
index = image_idx[rgb]
if index >= num:
channel = rgb_to_channel(rgb, content_unit_pixel)
generate_image[idx] = channel
continue
channel = rgb_to_channel(color_idx_reverse[index], color_unit_pixel)
generate_image[idx] = channel
generate_image = np.reshape(generate_image, [height, width, 3])
return generate_image
def transform():
color_rgb, color_idx, color_idx_reverse = load_image(color_path, color_unit_pixel)
content_rgb, content_idx, content_idx_reverse = load_image(content_path, content_unit_pixel)
generate_image = pixel_trans(content_rgb, content_idx, color_idx_reverse)
# generate_image = rgb_to_channel(generate_image, 1)
imsave('./generate.jpg', generate_image)
def test():
a = np.array([[[1,2,3],[11, 12, 13]],[[21,22,23],[31,32,33]]])
b = channel_to_rgb(a, 1)
c = rgb_to_channel(b, 1)
print a
print b
print c
transform()
|
[
"allister@gmail.com"
] |
allister@gmail.com
|
c6ab7229a6adeb73948dc28f3e733ad9ee6ecaf1
|
0680e9c5c4849a6a6650b92a58fdb8335c892379
|
/nlc_model.py
|
6a25659478cde641d53898fc26625a5fdcef944a
|
[] |
no_license
|
wanjinchang/nlc
|
e62899d2e99d12f47bd6c39e62580963172801e2
|
04efb45b7c8a76f9fbbfa02c282166351a2198ee
|
refs/heads/master
| 2020-04-05T18:58:53.500314
| 2016-05-05T06:05:19
| 2016-05-05T06:05:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,815
|
py
|
# Copyright 2016 Stanford University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops.math_ops import sigmoid
from tensorflow.python.ops.math_ops import tanh
class GRUCellAttn(rnn_cell.GRUCell):
def __init__(self, num_units, encoder_output, scope=None):
self.hs = encoder_output
with vs.variable_scope(scope or type(self).__name__):
with vs.variable_scope("Attn1"):
hs2d = tf.reshape(self.hs, [-1, num_units])
phi_hs2d = tanh(rnn_cell.linear(hs2d, num_units, True, 1.0))
self.phi_hs = tf.reshape(phi_hs2d, tf.shape(self.hs))
super(GRUCellAttn, self).__init__(num_units)
def __call__(self, inputs, state, scope=None):
gru_out, gru_state = super(GRUCellAttn, self).__call__(inputs, state, scope)
with vs.variable_scope(scope or type(self).__name__):
with vs.variable_scope("Attn2"):
gamma_h = tanh(rnn_cell.linear(gru_out, self._num_units, True, 1.0))
weights = tf.reduce_sum(self.phi_hs * gamma_h, reduction_indices=2, keep_dims=True)
weights = tf.exp(weights - tf.reduce_max(weights, reduction_indices=0, keep_dims=True))
weights = weights / (1e-6 + tf.reduce_sum(weights, reduction_indices=0, keep_dims=True))
context = tf.reduce_sum(self.hs * weights, reduction_indices=0)
with vs.variable_scope("AttnConcat"):
out = tf.nn.relu(rnn_cell.linear([context, gru_out], self._num_units, True, 1.0))
attn_map = tf.reduce_sum(tf.slice(weights, [0, 0, 0], [-1, -1, 1]), reduction_indices=2)
return (out, out)
class NLCModel(object):
def __init__(self, vocab_size, size, num_layers, max_gradient_norm, batch_size, learning_rate,
learning_rate_decay_factor, dropout, forward_only=False):
self.size = size
self.vocab_size = vocab_size
self.batch_size = batch_size
self.num_layers = num_layers
self.keep_prob = 1.0 - dropout
self.learning_rate = tf.Variable(float(learning_rate), trainable=False)
self.learning_rate_decay_op = self.learning_rate.assign(self.learning_rate * learning_rate_decay_factor)
self.global_step = tf.Variable(0, trainable=False)
self.source_tokens = tf.placeholder(tf.int32, shape=[None, self.batch_size], name="source_tokens")
self.target_tokens = tf.placeholder(tf.int32, shape=[None, self.batch_size], name="target_tokens")
self.source_mask = tf.placeholder(tf.int32, shape=[None, self.batch_size], name="source_mask")
self.target_mask = tf.placeholder(tf.int32, shape=[None, self.batch_size], name="target_mask")
self.source_length = tf.reduce_sum(self.source_mask, reduction_indices=0)
self.target_length = tf.reduce_sum(self.target_mask, reduction_indices=0)
self.setup_embeddings()
self.setup_encoder()
self.setup_decoder()
self.setup_loss()
params = tf.trainable_variables()
if not forward_only:
opt = tf.train.AdamOptimizer(self.learning_rate)
gradients = tf.gradients(self.losses, params)
clipped_gradients, _ = tf.clip_by_global_norm(gradients, max_gradient_norm)
self.gradient_norm = tf.global_norm(clipped_gradients)
self.param_norm = tf.global_norm(params)
self.updates = opt.apply_gradients(
zip(clipped_gradients, params), global_step=self.global_step)
self.saver = tf.train.Saver(tf.all_variables())
def setup_embeddings(self):
with vs.variable_scope("embeddings"):
self.L_enc = tf.get_variable("L_enc", [self.vocab_size, self.size])
self.L_dec = self.L_enc #tf.get_variable("L_dec", [self.vocab_size, self.size])
self.encoder_inputs = embedding_ops.embedding_lookup(self.L_enc, self.source_tokens)
self.decoder_inputs = embedding_ops.embedding_lookup(self.L_dec, self.target_tokens)
def setup_encoder(self):
self.encoder_cell = rnn_cell.GRUCell(self.size)
with vs.variable_scope("PryamidEncoder"):
inp = self.encoder_inputs
mask = self.source_mask
out = None
for i in xrange(self.num_layers):
with vs.variable_scope("EncoderCell%d" % i) as scope:
srclen = tf.reduce_sum(mask, reduction_indices=0)
out, _ = self.bidirectional_rnn(self.encoder_cell, self.dropout(inp), srclen, scope=scope)
inp, mask = self.downscale(out, mask)
self.encoder_output = out
def setup_decoder(self):
if self.num_layers > 1:
self.decoder_cell = rnn_cell.GRUCell(self.size)
self.attn_cell = GRUCellAttn(self.size, self.encoder_output, scope="DecoderAttnCell")
out = self.decoder_inputs
with vs.variable_scope("Decoder"):
inp = self.decoder_inputs
for i in xrange(self.num_layers - 1):
with vs.variable_scope("DecoderCell%d" % i) as scope:
out, _ = rnn.dynamic_rnn(self.decoder_cell, self.dropout(inp), time_major=True,
dtype=dtypes.float32, sequence_length=self.target_length,
scope=scope)
inp = out
with vs.variable_scope("DecoderAttnCell") as scope:
out, _ = rnn.dynamic_rnn(self.attn_cell, self.dropout(inp), time_major=True,
dtype=dtypes.float32, sequence_length=self.target_length,
scope=scope)
self.decoder_output = out
def setup_loss(self):
with vs.variable_scope("Logistic"):
do2d = tf.reshape(self.decoder_output, [-1, self.size])
logits2d = rnn_cell.linear(do2d, self.vocab_size, True, 1.0)
outputs2d = tf.nn.softmax(logits2d)
self.outputs = tf.reshape(outputs2d, [-1, self.batch_size, self.vocab_size])
targets_no_GO = tf.slice(self.target_tokens, [1, 0], [-1, -1])
masks_no_GO = tf.slice(self.target_mask, [1, 0], [-1, -1])
# easier to pad target/mask than to split decoder input since tensorflow does not support negative indexing
labels1d = tf.reshape(tf.pad(targets_no_GO, [[0, 1], [0, 0]]), [-1])
mask1d = tf.reshape(tf.pad(masks_no_GO, [[0, 1], [0, 0]]), [-1])
losses1d = tf.nn.sparse_softmax_cross_entropy_with_logits(logits2d, labels1d) * tf.to_float(mask1d)
losses2d = tf.reshape(losses1d, [-1, self.batch_size])
self.losses = tf.reduce_sum(losses2d) / self.batch_size
def dropout(self, inp):
return tf.nn.dropout(inp, self.keep_prob)
def downscale(self, inp, mask):
with vs.variable_scope("Downscale"):
inp2d = tf.reshape(tf.transpose(inp, perm=[1, 0, 2]), [-1, 2 * self.size])
out2d = rnn_cell.linear(inp2d, self.size, True, 1.0)
out3d = tf.reshape(out2d, [self.batch_size, -1, self.size])
out3d = tf.transpose(out3d, perm=[1, 0, 2])
out = tanh(out3d)
mask = tf.transpose(mask)
mask = tf.reshape(mask, [-1, 2])
mask = tf.cast(mask, tf.bool)
mask = tf.reduce_any(mask, reduction_indices=1)
mask = tf.to_int32(mask)
mask = tf.reshape(mask, [self.batch_size, -1])
mask = tf.transpose(mask)
return out, mask
def bidirectional_rnn(self, cell, inputs, lengths, scope=None):
name = scope.name or "BiRNN"
# Forward direction
with vs.variable_scope(name + "_FW") as fw_scope:
output_fw, output_state_fw = rnn.dynamic_rnn(cell, inputs, time_major=True, dtype=dtypes.float32,
sequence_length=lengths, scope=fw_scope)
# Backward direction
with vs.variable_scope(name + "_BW") as bw_scope:
output_bw, output_state_bw = rnn.dynamic_rnn(cell, inputs, time_major=True, dtype=dtypes.float32,
sequence_length=lengths, scope=bw_scope)
output_bw = tf.reverse_sequence(output_bw, tf.to_int64(lengths), seq_dim=0, batch_dim=1)
outputs = output_fw + output_bw
output_state = output_state_fw + output_state_bw
return (outputs, output_state)
def train(self, session, source_tokens, source_mask, target_tokens, target_mask):
input_feed = {}
input_feed[self.source_tokens] = source_tokens
input_feed[self.target_tokens] = target_tokens
input_feed[self.source_mask] = source_mask
input_feed[self.target_mask] = target_mask
output_feed = [self.updates, self.gradient_norm, self.losses, self.param_norm]
outputs = session.run(output_feed, input_feed)
return outputs[1], outputs[2], outputs[3]
def test(self, session, source_tokens, source_mask, target_tokens, target_mask):
input_feed = {}
input_feed[self.source_tokens] = source_tokens
input_feed[self.target_tokens] = target_tokens
input_feed[self.source_mask] = source_mask
input_feed[self.target_mask] = target_mask
output_feed = [self.losses, self.outputs]
outputs = session.run(output_feed, input_feed)
return outputs[0], outputs[1]
|
[
"avati@cs.stanford.edu"
] |
avati@cs.stanford.edu
|
2bc47c390bbcd0f6fef0942aaca9a6b47da97c2f
|
09db1ddaaa1efea440537c39249bff411b977611
|
/GUI/deepsort_v5/track.py
|
6d67287ea73c7827ef92414d2b8b63b27a7ff4e4
|
[] |
no_license
|
lucas-korea/AI_Bigdata_11th_B1
|
32075ab2c236508c8b2de85b1ca983beb7d59324
|
03fb1335efb4a4cc8eee3299d11963cd610a1e98
|
refs/heads/master
| 2023-02-02T14:16:27.440491
| 2020-12-19T15:07:48
| 2020-12-19T15:07:48
| 290,676,858
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,011
|
py
|
from yolov5.utils.datasets import LoadImages, LoadStreams
from yolov5.utils.general import (
check_img_size, non_max_suppression, apply_classifier, scale_coords, xyxy2xywh, plot_one_box, strip_optimizer)
from yolov5.utils.torch_utils import select_device, load_classifier, time_synchronized
from deep_sort.utils.parser import get_config
from deep_sort.deep_sort import DeepSort
import argparse
import os
import platform
import shutil
import time
from pathlib import Path
import cv2
import torch
import torch.backends.cudnn as cudnn
# https://github.com/pytorch/pytorch/issues/3678
import sys
import os
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
sys.path.insert(0, './yolov5')
palette = (2 ** 11 - 1, 2 ** 15 - 1, 2 ** 20 - 1)
def bbox_rel(image_width, image_height, *xyxy):
"""" Calculates the relative bounding box from absolute pixel values. """
bbox_left = min([xyxy[0].item(), xyxy[2].item()])
bbox_top = min([xyxy[1].item(), xyxy[3].item()])
bbox_w = abs(xyxy[0].item() - xyxy[2].item())
bbox_h = abs(xyxy[1].item() - xyxy[3].item())
x_c = (bbox_left + bbox_w / 2)
y_c = (bbox_top + bbox_h / 2)
w = bbox_w
h = bbox_h
return x_c, y_c, w, h
def compute_color_for_labels(label):
"""
Simple function that adds fixed color depending on the class
"""
color = [int((p * (label ** 2 - label + 1)) % 255) for p in palette]
return tuple(color)
def draw_boxes(img, bbox, identities=None, offset=(0,0)):
for i, box in enumerate(bbox):
x1, y1, x2, y2 = [int(i) for i in box]
x1 += offset[0]
x2 += offset[0]
y1 += offset[1]
y2 += offset[1]
# box text and bar
id = int(identities[i]) if identities is not None else 0
color = compute_color_for_labels(id)
label = '{}{:d}'.format("", id)
t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 2, 2)[0]
cv2.rectangle(img, (x1, y1), (x2, y2), color, 3)
cv2.rectangle(img, (x1, y1), (x1 + t_size[0] + 3, y1 + t_size[1] + 4), color, -1)
cv2.putText(img, label, (x1, y1 + t_size[1] + 4), cv2.FONT_HERSHEY_PLAIN, 2, [255, 255, 255], 2)
return img
def detect(opt, save_img=False):
out, source, weights, view_img, save_txt, imgsz = \
opt.output, opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size
webcam = source == '0' or source.startswith('rtsp') or source.startswith('http') or source.endswith('.txt')
# initialize deepsort
cfg = get_config()
cfg.merge_from_file(opt.config_deepsort)
deepsort = DeepSort(cfg.DEEPSORT.REID_CKPT,
max_dist=cfg.DEEPSORT.MAX_DIST, min_confidence=cfg.DEEPSORT.MIN_CONFIDENCE,
nms_max_overlap=cfg.DEEPSORT.NMS_MAX_OVERLAP, max_iou_distance=cfg.DEEPSORT.MAX_IOU_DISTANCE,
max_age=cfg.DEEPSORT.MAX_AGE, n_init=cfg.DEEPSORT.N_INIT, nn_budget=cfg.DEEPSORT.NN_BUDGET,
use_cuda=True)
# Initialize
device = select_device(opt.device)
if os.path.exists(out):
shutil.rmtree(out) # delete output folder
os.makedirs(out) # make new output folder
half = device.type != 'cpu' # half precision only supported on CUDA
# Load model
model = torch.load(weights, map_location=device)['model'].float() # load to FP32
model.to(device).eval()
if half:
model.half() # to FP16
# Set Dataloader
vid_path, vid_writer = None, None
if webcam:
view_img = True
cudnn.benchmark = True # set True to speed up constant image size inference
dataset = LoadStreams(source, img_size=imgsz)
else:
view_img = True
save_img = True
dataset = LoadImages(source, img_size=imgsz)
# Get names and colors
names = model.module.names if hasattr(model, 'module') else model.names
# Run inference
t0 = time.time()
img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
_ = model(img.half() if half else img) if device.type != 'cpu' else None # run once
save_path = str(Path(out))
txt_path = str(Path(out)) + '/results.txt'
for frame_idx, (path, img, im0s, vid_cap) in enumerate(dataset):
img = torch.from_numpy(img).to(device)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
t1 = time_synchronized()
pred = model(img, augment=opt.augment)[0]
# Apply NMS
pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms)
t2 = time_synchronized()
# Process detections
for i, det in enumerate(pred): # detections per image
if webcam: # batch_size >= 1
p, s, im0 = path[i], '%g: ' % i, im0s[i].copy()
else:
p, s, im0 = path, '', im0s
s += '%gx%g ' % img.shape[2:] # print string
save_path = str(Path(out) / Path(p).name)
if det is not None and len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
# Print results
for c in det[:, -1].unique():
n = (det[:, -1] == c).sum() # detections per class
s += '%g %ss, ' % (n, names[int(c)]) # add to string
bbox_xywh = []
confs = []
# Adapt detections to deep sort input format
for *xyxy, conf, cls in det:
img_h, img_w, _ = im0.shape
x_c, y_c, bbox_w, bbox_h = bbox_rel(img_w, img_h, *xyxy)
obj = [x_c, y_c, bbox_w, bbox_h]
bbox_xywh.append(obj)
confs.append([conf.item()])
xywhs = torch.Tensor(bbox_xywh)
confss = torch.Tensor(confs)
# Pass detections to deepsort
outputs = deepsort.update(xywhs, confss, im0)
# draw boxes for visualization
if len(outputs) > 0:
bbox_xyxy = outputs[:, :4]
identities = outputs[:, -1]
draw_boxes(im0, bbox_xyxy, identities)
# Write MOT compliant results to file
if save_txt and len(outputs) != 0:
for j, output in enumerate(outputs):
bbox_left = output[0]
bbox_top = output[1]
bbox_w = output[2]
bbox_h = output[3]
identity = output[-1]
with open(txt_path, 'a') as f:
f.write(('%g ' * 10 + '\n') % (frame_idx, identity, bbox_left,
bbox_top, bbox_w, bbox_h, -1, -1, -1, -1)) # label format
# Print time (inference + NMS)
print('%sDone. (%.3fs)' % (s, t2 - t1))
# Stream results
if view_img:
cv2.imshow(p, im0)
if cv2.waitKey(1) == ord('q'): # q to quit
raise StopIteration
# Save results (image with detections)
if save_img:
print('saving img!')
if dataset.mode == 'images':
cv2.imwrite(save_path, im0)
else:
print('saving video!')
if vid_path != save_path: # new video
vid_path = save_path
if isinstance(vid_writer, cv2.VideoWriter):
vid_writer.release() # release previous video writer
fps = vid_cap.get(cv2.CAP_PROP_FPS)
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*opt.fourcc), fps, (w, h))
vid_writer.write(im0)
if save_txt or save_img:
print('Results saved to %s' % os.getcwd() + os.sep + out)
if platform == 'darwin': # MacOS
os.system('open ' + save_path)
print('Done. (%.3fs)' % (time.time() - t0))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--weights', type=str, default='yolov5/weights/yolov5s.pt', help='model.pt path')
parser.add_argument('--source', type=str, default='0', help='source') # file/folder, 0 for webcam
parser.add_argument('--output', type=str, default='inference/output', help='output folder') # output folder
parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.4, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.5, help='IOU threshold for NMS')
parser.add_argument('--fourcc', type=str, default='mp4v', help='output video codec (verify ffmpeg support)')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--view-img', action='store_true', help='display results')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
# class 0 is person
parser.add_argument('--classes', nargs='+', type=int, default=[0], help='filter by class')
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument("--config_deepsort", type=str, default="deep_sort/configs/deep_sort.yaml")
args = parser.parse_args()
args.img_size = check_img_size(args.img_size)
print(args)
with torch.no_grad():
detect(args)
|
[
"jcy37012@naver.com"
] |
jcy37012@naver.com
|
f6988437015968d9b95b5809d4ca55b49c3b756e
|
67336c27c00f6277a944c1179b77ec12c3b8ee70
|
/Clases_PDI/ScriptsEjemplosGDAL/modis_open.py
|
d2733c6252ef34e72bda8eff794b46791900b781
|
[] |
no_license
|
ramonapariciog/LIAPRE-Masters
|
7c93182398fdd1071ab5cdcb108d09a2ea0f76c0
|
1ac4f1c73ffaea9f16dfa29ad8e117e147446bf8
|
refs/heads/master
| 2023-05-26T09:11:58.210472
| 2023-05-18T04:40:58
| 2023-05-18T04:40:58
| 131,368,822
| 0
| 0
| null | 2018-04-28T03:27:45
| 2018-04-28T03:27:45
| null |
UTF-8
|
Python
| false
| false
| 683
|
py
|
from osgeo import gdal
imag = gdal.Open('HDF4_EOS:EOS_SWATH:"MOD021KM.A2012111.1645.005.2012112014505.hdf":MODIS_SWATH_Type_L1B:EV_1KM_Emissive_Uncert_Indexes')
band = imag.GetRasterBand(1)
imag.GetGeoTransform()
imag.GetProjection()
from osgeo import ogr
from osgeo import osr
wgs84 = osr.SpatialReference()
wgs84.ImpoertFromEPSG(4326)
wgs84.ImportFromEPSG(4326)
wgs84
dir(wgs84)
modis_sinu = osr.SpatialReference()
modis_sinu.ImportFromProj4("+proj=sinu +R=6371007.181 +nadgrids=@null +wktext")
tx = osr.CoordinateTransformation(wgs84, modis_sinu)
tx
lon, lat = (-3.904, 50.58)
modis_x, modis_y, modis_z = tx.TransformPoint(lon, lat)
modis_x
modis_y
modis_z
%hist -f modis_open.py
|
[
"moncho_apa@hotmail.com"
] |
moncho_apa@hotmail.com
|
f9851548bffc16ba63bf28b707f73f90b8c199ab
|
e2590e0a78046a22131b69c76ebde21bf042cdd1
|
/ABC201_300/ABC245/A.py
|
391df3caa20a5d971169d88fe7fb49746b01f2c0
|
[] |
no_license
|
masato-sso/AtCoderProblems
|
b8e23941d11881860dcf2942a5002a2b19b1f0c8
|
fbc02e6b7f8c6583e5a4e5187463e0001fc5f4d8
|
refs/heads/main
| 2023-01-22T23:57:58.509585
| 2023-01-21T14:07:47
| 2023-01-21T14:07:47
| 170,867,816
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 159
|
py
|
A,B,C,D = map(int, input().split())
takahashi = A*3600 + B*60
aoki = C*3600 + D*60 + 1
if(takahashi < aoki):
print("Takahashi")
else:
print("Aoki")
|
[
"masato@seijinnoMacBook-Pro-2.local"
] |
masato@seijinnoMacBook-Pro-2.local
|
8890966b3630ad7736f037f9f507835ae70ccea5
|
a52c3e4dfd4ef3777c9a2d5da4e762ce68d272df
|
/paperserver/papers/models.py
|
2a66d39e54358f65180d13f693dfde6aefd32aad
|
[] |
no_license
|
kevinkarsch/research-paper-database
|
2b1117d639b0383a770ec7a73c046c846ccc4faa
|
cbb3aa154ea0993518920cd5b93f0817e0d76a0c
|
refs/heads/master
| 2020-05-27T13:04:56.348689
| 2019-05-28T05:16:39
| 2019-05-28T05:16:39
| 188,630,700
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,953
|
py
|
from django.db import models
import bibtexparser
from itertools import cycle
def rotate(lst): #Places the last element first
return lst[-1:]+lst[:-1]
def extractAuthors(bibtexEntry):
cleanAuthorList = [] #Create an author list as "First1 Last1, First2 Last2, etc"
authorList = bibtexEntry.get("author")
if authorList:
for author in authorList.split(" and "):
authorSplit = [v.strip() for v in author.split(",")] # split on "," and trim whitespace
authorSplit = rotate(authorSplit) #Put the first name first (if needed)
cleanAuthorList.append(" ".join(authorSplit))
return ", ".join(cleanAuthorList)
def extractVenue(bibtexEntry):
if "booktitle" in bibtexEntry:
return bibtexEntry["booktitle"]
elif "journal" in bibtexEntry:
return bibtexEntry["journal"]
elif "school" in bibtexEntry:
return bibtexEntry["school"]
else:
return ""
class Paper(models.Model):
bibtex = models.TextField("Bibtex")
link = models.URLField("Link", blank=True)
notes = models.TextField("Notes", blank=True)
def __str__(self):
bib = self.asDict()
return "[{}] {}, {}. {}, {}.".format(bib["bibtexId"], bib["authors"], bib["title"], bib["venue"], bib["year"])
def asDict(self):
bibtexParsed = bibtexparser.loads(self.bibtex)
bibtexEntry = bibtexParsed.entries[0] if len(bibtexParsed.entries) > 0 else {}
return {
"id": self.id,
"bibtex": self.bibtex,
"link": self.link,
"notes": self.notes if self.notes else "(None)",
"bibtexId": bibtexEntry["ID"] if "ID" in bibtexEntry else "",
"title": bibtexEntry["title"] if "title" in bibtexEntry else "",
"year": bibtexEntry["year"] if "year" in bibtexEntry else "",
"authors": extractAuthors(bibtexEntry),
"venue": extractVenue(bibtexEntry),
}
|
[
"kevin@lightform.com"
] |
kevin@lightform.com
|
ebe1091d558f273639c056783c9063c71c194a84
|
4c59f79e178c78790ea6e6947bf894c8246d626c
|
/start.py
|
62f851d982272fbd29a849c27cdffef895a7007c
|
[] |
no_license
|
shitalkallole/Projects
|
a1b08f0acdaace717587ffbb64ec1fb2dc50e76e
|
c2f8306589406d64becd2d156a2e8be69ae59053
|
refs/heads/master
| 2020-04-13T18:36:17.799229
| 2019-01-08T14:27:14
| 2019-01-08T14:27:14
| 163,379,527
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,978
|
py
|
import tkinter
import tkinter.ttk as ttk #to use advanced widget
from tkinter import messagebox #to use messagebox
from tkinter import filedialog #to use file dialog
#Definition of required function
def btn_Source_Function(arg=None):
filename = filedialog.askopenfilename(filetypes=[("JPEG file","*.jpg")])
lbl_Source_Setter.set(filename)
def btn_Dest_Function(arg=None):
dirname=filedialog.askdirectory()
lbl_Dest_Setter.set(dirname)
def btn_Scan_Function(arg=None):#when Enter pressed arg passed like -<KeyPress event state=Mod1 keysym=Return keycode=13 char='\r' x=69 y=24> (So we dont want to receive)
sourceData=lbl_Source_Setter.get()
destData=lbl_Dest_Setter.get()
if(sourceData!="" and destData!=""):
container.attributes("-disabled", 1)
prgbar_Status=ttk.Progressbar(container,length=400,mode="indeterminate",orient="horizontal")
prgbar_Status.place(x=100,y=200)
prgbar_Status.start([80])
else:
if(sourceData=="" and destData==""):
messagebox.showinfo("Information","Please select Source Image and Scan Folder")
elif(sourceData==""):
messagebox.showinfo("Information","Please select Source Image")
else:
messagebox.showinfo("Information","Please select Scan Folder")
def btn_Cancel_Function(arg=None):
container.quit()
#Create Container to hold widget
container=tkinter.Tk()
w=container.winfo_screenwidth()//2 #take width of screen
h=container.winfo_screenheight()//2 #take height of screen
container.minsize(600,300) #window size
container.geometry("+{}+{}".format(w-300,h-150)) #left and upper space
container.resizable(0,0) #Remove maximize button
container.title("Image Scanner") #Set Title
#container.withdraw() for hiding purpose
#Configure Style
style=ttk.Style()
style.configure("TButton",width=25)
#Create Widget
btn_Source=ttk.Button(container,text="Browse Source Image",style="TButton",command=btn_Source_Function)
btn_Source.bind("<Return>",btn_Source_Function)
btn_Source.place(x=27,y=22)
lbl_Source_Setter=tkinter.StringVar()
lbl_Source=ttk.Label(container,textvariable=lbl_Source_Setter,width=60,background="white",anchor="center")
lbl_Source.place(x=197,y=22)
btn_Dest=ttk.Button(container,text="Browse Scan Folder",style="TButton",command=btn_Dest_Function)
btn_Dest.bind("<Return>",btn_Dest_Function)
btn_Dest.place(x=27,y=72)
lbl_Dest_Setter=tkinter.StringVar()
lbl_Dest=ttk.Label(container,textvariable=lbl_Dest_Setter,width=60,background="white",anchor="center")
lbl_Dest.place(x=197,y=72)
btn_Scan=ttk.Button(container,text="Scan",style="TButton",command=btn_Scan_Function)
btn_Scan.bind("<Return>",btn_Scan_Function)
btn_Scan.place(x=197,y=122)
btn_Cancel=ttk.Button(container,text="Cancel",style="TButton",command=btn_Cancel_Function)
btn_Cancel.bind("<Return>",btn_Cancel_Function)
btn_Cancel.place(x=403,y=122)
#Infinite loop to take action on event
container.mainloop()
|
[
"noreply@github.com"
] |
shitalkallole.noreply@github.com
|
804945d5c0cfcccdcae65df9e9c64532fcd33c8a
|
f947dead9b44f2a6b7d1f7de24c204d1c654d3e0
|
/networks/svhn/complicated_ensemble/submodel3.py
|
652d95f826868c8c0516ed60069f9841b3ea05a8
|
[] |
no_license
|
Adamantios/NN-Train
|
cf0b981eab806598a1b3d5961287c524d26472b4
|
160a5c7a3f06b27ee0e740f6dec940514af6bd2c
|
refs/heads/master
| 2023-04-03T11:50:41.301950
| 2021-04-08T15:12:59
| 2021-04-08T15:12:59
| 207,342,259
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,279
|
py
|
from typing import Union
from numpy.core.multiarray import ndarray
from numpy.ma import logical_or
from tensorflow.python.keras import Model
from tensorflow.python.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, BatchNormalization
from tensorflow.python.keras.regularizers import l2
from networks.tools import create_inputs, load_weights
def svhn_complicated_ensemble_submodel3(input_shape=None, input_tensor=None, n_classes=None,
weights_path: Union[None, str] = None) -> Model:
"""
Defines a svhn network.
:param n_classes: used in order to be compatible with the main script.
:param input_shape: the input shape of the network. Can be omitted if input_tensor is used.
:param input_tensor: the input tensor of the network. Can be omitted if input_shape is used.
:param weights_path: a path to a trained custom network's weights.
:return: Keras functional API Model.
"""
inputs = create_inputs(input_shape, input_tensor)
# Define a weight decay for the regularisation.
weight_decay = 1e-4
x = Conv2D(64, (3, 3), padding='same', activation='elu', name='conv1', kernel_regularizer=l2(weight_decay))(inputs)
x = BatchNormalization(name='batch-norm')(x)
x = Conv2D(64, (3, 3), padding='same', activation='elu', name='conv2', kernel_regularizer=l2(weight_decay))(x)
x = MaxPooling2D(pool_size=(2, 2), name='pool')(x)
x = Dropout(0.3, name='dropout', seed=0)(x)
# Add top layers.
x = Flatten(name='flatten')(x)
outputs = Dense(n_classes, activation='softmax', name='softmax_outputs')(x)
# Create Submodel 3.
model = Model(inputs, outputs, name='svhn_complicated_ensemble_submodel3')
# Load weights, if they exist.
load_weights(weights_path, model)
return model
def svhn_complicated_ensemble_submodel3_labels_manipulation(labels_array: ndarray) -> int:
"""
The model's labels manipulator.
:param labels_array: the labels to manipulate.
:return: the number of classes predicted by the model.
"""
labels_array[logical_or(labels_array < 3, labels_array > 5)] = 0
labels_array[labels_array == 3] = 1
labels_array[labels_array == 4] = 2
labels_array[labels_array == 5] = 3
return 4
|
[
"manoszaras94@gmail.com"
] |
manoszaras94@gmail.com
|
8ea951c1ee2703c52bb8b28fb2c185f677da2081
|
5e471e97ec337e4985a924cf31f2d5d9980de4a0
|
/oakland-scraper/legistar/base.py
|
915e5ff73e6e0a4b3dbbcaea6324aae35371e9a6
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
jberardini/oakland-councilmatic
|
e3fddcd92d2639c9c711ff2be34a281335878575
|
ffa6a5ea296cb993a8a5311622c3593ccfb4f939
|
refs/heads/master
| 2021-01-22T05:06:03.956618
| 2017-02-15T18:04:04
| 2017-02-15T18:04:04
| 81,616,351
| 0
| 1
| null | 2017-02-15T16:17:54
| 2017-02-10T23:30:38
|
Python
|
UTF-8
|
Python
| false
| false
| 7,318
|
py
|
#where is Scraper coming from? Do I need to install something?
from pupa.scrape import Scraper
import lxml.html
import lxml.etree as etree
import traceback
import datetime
from collections import defaultdict, deque
import itertools
import pytz
import icalendar
import re
class LegistarScraper(Scraper):
date_format='%m/%d/%Y'
def __init__(self, *args, **kwargs) :
super(LegistarScraper, self).__init__(*args, **kwargs)
self.timeout = 600
def lxmlize(self, url, payload=None):
if payload :
entry = self.post(url, payload, verify=False).text
else :
entry = self.get(url, verify=False).text
page = lxml.html.fromstring(entry)
page.make_links_absolute(url)
return page
def pages(self, url, payload=None) :
page = self.lxmlize(url, payload)
yield page
next_page = page.xpath("//a[@class='rgCurrentPage']/following-sibling::a[1]")
if payload and 'ctl00$ContentPlaceHolder1$btnSearch' in payload:
del payload['ctl00$ContentPlaceHolder1$btnSearch']
while len(next_page) > 0 :
if payload is None:
payload = {}
payload.update(self.sessionSecrets(page))
event_target = next_page[0].attrib['href'].split("'")[1]
payload['__EVENTTARGET'] = event_target
page = self.lxmlize(url, payload)
yield page
next_page = page.xpath("//a[@class='rgCurrentPage']/following-sibling::a[1]")
def parseDetails(self, detail_div) :
"""
Parse the data in the top section of a detail page.
"""
detail_query = ".//*[starts-with(@id, 'ctl00_ContentPlaceHolder1_lbl')"\
" or starts-with(@id, 'ctl00_ContentPlaceHolder1_hyp')]"
fields = detail_div.xpath(detail_query)
details = {}
for field_key, field in itertools.groupby(fields,
fieldKey) :
field = list(field)
field_1, field_2 = field[0], field[-1]
key = field_1.text_content().replace(':', '').strip()
if field_2.find('.//a') is not None :
value = []
for link in field_2.xpath('.//a') :
value.append({'label' : link.text_content().strip(),
'url' : self._get_link_address(link)})
elif 'href' in field_2.attrib :
value = {'label' : field_2.text_content().strip(),
'url' : self._get_link_address(field_2)}
else :
value = field_2.text_content().strip()
details[key] = value
return details
def parseDataTable(self, table):
"""
Legistar uses the same kind of data table in a number of
places. This will return a list of dictionaries using the
table headers as keys.
"""
headers = table.xpath(".//th[starts-with(@class, 'rgHeader')]")
rows = table.xpath(".//tr[@class='rgRow' or @class='rgAltRow']")
keys = []
for header in headers :
text_content = header.text_content().replace(' ', ' ').strip()
if text_content :
keys.append(text_content)
else :
keys.append(header.xpath('.//input')[0].value)
for row in rows:
try:
data = defaultdict(lambda : None)
for key, field in zip(keys, row.xpath("./td")):
text_content = self._stringify(field)
if field.find('.//a') is not None :
address = self._get_link_address(field.find('.//a'))
if address :
if key == '' and 'View.ashx?M=IC' in address :
req = self.get(address, verify=False)
value = icalendar.Calendar.from_ical(req.text)
key = 'iCalendar'
else :
value = {'label': text_content,
'url': address}
else :
value = text_content
else :
value = text_content
data[key] = value
yield data, keys, row
except Exception as e:
print('Problem parsing row:')
print(etree.tostring(row))
print(traceback.format_exc())
raise e
def _get_link_address(self, link):
url = None
if 'onclick' in link.attrib:
onclick = link.attrib['onclick']
if (onclick is not None
and onclick.startswith(("radopen('",
"window.open",
"OpenTelerikWindow"))):
url = self.BASE_URL + onclick.split("'")[1]
elif 'href' in link.attrib :
url = link.attrib['href']
return url
def _stringify(self, field) :
for br in field.xpath("*//br"):
br.tail = "\n" + br.tail if br.tail else "\n"
for em in field.xpath("*//em"):
if em.text :
em.text = "--em--" + em.text + "--em--"
return field.text_content().replace(' ', ' ').strip()
def toTime(self, text) :
time = datetime.datetime.strptime(text, self.date_format)
time = pytz.timezone(self.TIMEZONE).localize(time)
return time
def toDate(self, text) :
return self.toTime(text).date().isoformat()
def now(self) :
return datetime.datetime.utcnow().replace(tzinfo = pytz.utc)
def mdY2Ymd(self, text) :
month, day, year = text.split('/')
return "%d-%02d-%02d" % (int(year), int(month), int(day))
def sessionSecrets(self, page) :
payload = {}
payload['__EVENTARGUMENT'] = None
payload['__VIEWSTATE'] = page.xpath("//input[@name='__VIEWSTATE']/@value")[0]
try :
payload['__EVENTVALIDATION'] = page.xpath("//input[@name='__EVENTVALIDATION']/@value")[0]
except IndexError :
pass
return(payload)
def fieldKey(x) :
field_id = x.attrib['id']
field = re.split(r'hyp|lbl', field_id)[-1]
field = field.split('Prompt')[0]
field = field.rstrip('X21')
return field
class LegistarAPIScraper(Scraper):
date_format = '%Y-%m-%dT%H:%M:%S'
def toTime(self, text) :
time = datetime.datetime.strptime(text, self.date_format)
time = pytz.timezone(self.TIMEZONE).localize(time)
return time
def pages(self, url, params=None, item_key=None):
if params is None:
params = {}
seen = deque([], maxlen=1000)
page_num = 0
while page_num == 0 or len(response.json()) == 1000 :
params['$skip'] = page_num * 1000
response = self.get(url, params=params)
for item in response.json() :
if item[item_key] not in seen :
yield item
seen.append(item[item_key])
page_num += 1
|
[
"jberardini@gmail.com"
] |
jberardini@gmail.com
|
83bf2988452341236593ec0b18f7c8fbb05f18af
|
9fcd6a91132fd12731d259fe7d709cdf222381bb
|
/aquaq/4/foo.py
|
49f7ccdf31a31dd12138a371284f70236a68ab78
|
[] |
no_license
|
protocol7/advent-of-code
|
f5bdb541d21414ba833760958a1b9d05fc26f84a
|
fa110cef83510d86e82cb5d02f6af5bb7016f2c7
|
refs/heads/master
| 2023-04-05T15:33:26.146031
| 2023-03-18T14:22:43
| 2023-03-18T14:22:43
| 159,989,507
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 548
|
py
|
import sys
from math import sqrt
def parse(line):
return line.strip()
xs = int(sys.stdin.read())
def factors(n):
j = 2
while n > 1:
for i in range(j, int(sqrt(n+0.05)) + 1):
if n % i == 0:
n //= i ; j = i
yield i
break
else:
if n > 1:
yield n; break
def coprime(a, b):
a = set(factors(a))
return not a & b
b = set(factors(xs))
cp = []
for i in range(1, xs):
if coprime(i, b):
cp.append(i)
print(sum(cp))
|
[
"niklas@protocol7.com"
] |
niklas@protocol7.com
|
3d2b9c57cd9092a9f79ef33bebeab6f8683f6ab6
|
585bac463cb1919ac697391ff130bbced73d6307
|
/8_StringtoIntegerAtoi/solution_1.py
|
c3ead2e6e4d8c093d7690f134b94737d7b047caa
|
[] |
no_license
|
llgeek/leetcode
|
ce236cf3d3e3084933a7a4a5e8c7766f7f407285
|
4d340a45fb2e9459d47cbe179ebfa7a82e5f1b8c
|
refs/heads/master
| 2021-01-22T23:44:13.318127
| 2020-03-11T00:59:05
| 2020-03-11T00:59:05
| 85,667,214
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,120
|
py
|
class Solution:
def myAtoi(self, numstr):
"""
:type str: str
:rtype: int
"""
numchar = set('1234567890')
numval = dict(zip(numchar, map(int, numchar)))
numstr = numstr.strip(' ')
if not numstr or numstr[0] not in (numchar | {'+', '-'}):
return 0
idx = 0
flag = 1
if numstr[0] == '+':
flag = 1
idx += 1
if numstr[0] == '-':
flag = -1
idx += 1
val = 0
INTMAX = (1<<31)-1
INTMIN = - (1<<31)
while idx < len(numstr) and numstr[idx] in numchar:
if val > INTMAX//10 or (val == INTMAX//10 and (numval[numstr[idx]] > 7 if flag==1 else numval[numstr[idx]] > 8)):
return INTMAX if flag == 1 else INTMIN
val = val * 10 + numval[numstr[idx]]
idx += 1
return flag * val
if __name__ == "__main__":
# numstr = " -42 with words"
# numstr = "words and 987"
# numstr = "-91283472332"
numstr = "-3924x8fc"
sol = Solution()
print(sol.myAtoi(numstr))
|
[
"angleflycll@gmail.com"
] |
angleflycll@gmail.com
|
5df75cc146a550e84c4e6ca4ebff983f6ac56c8e
|
1499592f38adcb841e3606c3b5e414cee7200cef
|
/setup.py
|
e3beb0cccc14a0cc30ed39122cb3f74746ca3ee3
|
[] |
no_license
|
vardaofthevalier/coding_practice
|
8e3a8258674ed4551f4ac5193ad7eb4aaf4540f1
|
9b3162b07c9e47feb02938136adc4f0b0a90e053
|
refs/heads/master
| 2021-08-23T06:14:20.723957
| 2017-12-03T20:52:54
| 2017-12-03T20:52:54
| 110,907,690
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 240
|
py
|
from distutils.core import setup
setup(
name = 'data_structures',
packages = [
'graph',
'heap',
'linked_list',
'queue',
'stack',
'tree',
'util'
],
version = '0.1'
)
|
[
"abbyhahn@level11.com"
] |
abbyhahn@level11.com
|
6067ab1f5fe512cb7ca63b9306da128f5e6197bc
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/data/cirq_new/cirq_program/startCirq_pragma419.py
|
1264ba91718f83a89f2ae4128fae4fe48b3de51f
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,626
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=16
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
class Opty(cirq.PointOptimizer):
def optimization_at(
self,
circuit: 'cirq.Circuit',
index: int,
op: 'cirq.Operation'
) -> Optional[cirq.PointOptimizationSummary]:
if (isinstance(op, cirq.ops.GateOperation) and isinstance(op.gate, cirq.CZPowGate)):
return cirq.PointOptimizationSummary(
clear_span=1,
clear_qubits=op.qubits,
new_operations=[
cirq.CZ(*op.qubits),
cirq.X.on_each(*op.qubits),
cirq.X.on_each(*op.qubits),
]
)
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[1])) # number=13
c.append(cirq.CZ.on(input_qubit[0],input_qubit[1])) # number=14
c.append(cirq.H.on(input_qubit[1])) # number=15
c.append(cirq.X.on(input_qubit[1])) # number=11
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=12
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.rx(2.808583832309275).on(input_qubit[2])) # number=7
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[3])) # number=8
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=5
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=6
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq_pragma419.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
d2d13ed9606cb1b996bbda67450f74ea39612ca7
|
e3af62bb620675ff351a39d1c8662b60c87d23b2
|
/Cloud_Project_3.0/plots.py
|
4f418315d091f7fe4b33d7f2f45db24f4ce25cfe
|
[] |
no_license
|
shipra1101/twitter-sentiment-visualization
|
61212f9e87ea3bddecffd58bb70806ea655cebe0
|
b1ecbcd997119ed14f356528d8e3bb935ae3f5d6
|
refs/heads/master
| 2020-03-25T12:52:04.900449
| 2018-08-07T00:41:27
| 2018-08-07T00:41:27
| 143,797,990
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,148
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 22 22:51:57 2018
@author: deeks
"""
import matplotlib.pyplot as plt
import numpy as np
x = np.arange(5)
y = np.array([53,54,76,100,76])
plt.gca().set_color_cycle(['red', 'green', 'yellow'])
plt.plot(x,y)
plt.plot(x, 2 * y)
plt.plot(x, 3 * y)
plt.legend(['Positive', 'Negative', 'Nuetral'], loc='upper left')
plt.show()
'mysql+pymysql://dk488492:sameera123@cloudupload.czlq7phghrul.us-east-2.rds.amazonaws.com/awsdb'
q='SELECT * FROM SEARCH_INFO WHERE SEARCH=\''+"twitter"+"\'" +" ORDER BY ID ASC"
result = engine.execute(q).fetchall()
postive=[]
negative=[]
nuetral=[]
import numpy as np
for row in result:
postive.append(row[2])
negative.append(row[3])
nuetral.append(row[4])
pos=np.array(postive)
neg=np.array(negative)
neut=np.array(nuetral)
plt.gca().set_color_cycle(['red', 'green', 'yellow'])
d = np.arange(len(pos))
e = np.arange(len(neg))
f = np.arange(len(neut))
plt.plot(d,pos)
plt.plot(e, neg)
plt.plot(f,neut)
plt.legend(['Positive', 'Negative', 'Nuetral'], loc='upper left')
plt.show()
|
[
"noreply@github.com"
] |
shipra1101.noreply@github.com
|
431b16e4c7b6842e39ee6ee82c0aba8b8003552f
|
f928517af3ad26f4792929a9f5efb609469d40a2
|
/indeed.py
|
5c8b5cf665dda3a2d815d6e8bbb66217a9b90330
|
[] |
no_license
|
HBanana-1/jobs
|
d2b2b21849014a2ffd3e0e345a2a7a2d645f2f4f
|
b00b9f9bda748a3a32da2e5e2247fce7ca21ebd7
|
refs/heads/master
| 2023-04-08T22:49:45.312479
| 2021-03-25T03:33:20
| 2021-03-25T03:33:20
| 351,297,136
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,600
|
py
|
import requests
from bs4 import BeautifulSoup
LIMIT = 50
URL = f"https://www.indeed.com/jobs?q=python&limit={LIMIT}"
def get_last_page():
result = requests.get(URL)
soup = BeautifulSoup(result.text, "html.parser")
pagination = soup.find("div", {"class": "pagination"})
links = pagination.find_all('a')
pages = []
for link in links[:-1]:
pages.append(int(link.string))
max_page = pages[-1]
return max_page
def extract_job(html):
title = html.find("h2", {"class": "title"}).find("a")["title"]
company = html.find("span", {"class": "company"})
if company:
company_anchor = company.find("a")
if company_anchor is not None:
company = str(company_anchor.string)
else:
company = str(company.string)
company = company.strip()
else:
company = None;
location = html.find("div", {"class": "recJobLoc"})["data-rc-loc"]
job_id = html["data-jk"]
return {
'title': title,
'company': company,
'location': location,
"link": f"https://www.indeed.com/viewjob?jk={job_id} "
}
def extract_jobs(last_page):
jobs = []
for page in range(last_page):
print(f"Scrapping Indeed: Page: {page}")
result = requests.get(f"{URL}&start={page*LIMIT}")
soup = BeautifulSoup(result.text, "html.parser")
results = soup.find_all("div", {"class": "jobsearch-SerpJobCard"})
for result in results:
job = extract_job(result)
jobs.append(job)
return jobs
def get_jobs():
last_page = get_last_page()
jobs = extract_jobs(last_page)
return jobs
|
[
"64995628+HBanana-1@users.noreply.github.com"
] |
64995628+HBanana-1@users.noreply.github.com
|
b59f489dc8e9b4b348f02f911acb7201b91696eb
|
9238c5adf211d66cbe9bea5a89e97ca02c31da9a
|
/bin/.venv-ansible-venv/lib/python2.6/site-packages/ansible/runner/lookup_plugins/indexed_items.py
|
c1db1fdee2cf1ed8939d21ea5ff9034416395850
|
[
"MIT"
] |
permissive
|
marcusramberg/dotfiles
|
803d27fb88da8e46abb283b2e2987e51a83b08aa
|
413727089a15e775f532d2da363c03d9fb3fb90a
|
refs/heads/main
| 2023-03-04T17:08:40.123249
| 2023-03-01T07:46:51
| 2023-03-01T07:46:51
| 7,285,450
| 4
| 2
|
MIT
| 2022-12-22T14:39:35
| 2012-12-22T11:57:42
|
Python
|
UTF-8
|
Python
| false
| false
| 1,418
|
py
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible.utils import safe_eval
import ansible.utils as utils
import ansible.errors as errors
def flatten(terms):
ret = []
for term in terms:
if isinstance(term, list):
ret.extend(term)
else:
ret.append(term)
return ret
class LookupModule(object):
def __init__(self, basedir=None, **kwargs):
self.basedir = basedir
def run(self, terms, inject=None, **kwargs):
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
if not isinstance(terms, list):
raise errors.AnsibleError("with_indexed_items expects a list")
items = flatten(terms)
return zip(range(len(items)), items)
|
[
"marcus.ramberg@usit.uio.no"
] |
marcus.ramberg@usit.uio.no
|
dfbc874a52ba184f5c211419fb24206e136ef3cf
|
508c5e01aa7dce530093d5796250eff8d74ba06c
|
/code/venv/lib/python3.6/site-packages/pgadmin4/pgadmin/browser/server_groups/servers/databases/schemas/catalog_objects/columns/__init__.py
|
eebf05b48c6479d4365c76b52ad6186b9a6bcc35
|
[
"PostgreSQL",
"MIT"
] |
permissive
|
jhkuang11/UniTrade
|
f220b0d84db06ff17626b3daa18d4cb8b72a5d3f
|
5f68b853926e167936b58c8543b8f95ebd6f5211
|
refs/heads/master
| 2022-12-12T15:58:30.013516
| 2019-02-01T21:07:15
| 2019-02-01T21:07:15
| 166,479,655
| 0
| 0
|
MIT
| 2022-12-07T03:59:47
| 2019-01-18T22:19:45
|
Python
|
UTF-8
|
Python
| false
| false
| 10,760
|
py
|
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2017, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
""" Implements Columns Node (For Catalog objects) """
from functools import wraps
import pgadmin.browser.server_groups.servers.databases as database
from flask import render_template
from flask_babel import gettext
from pgadmin.browser.collection import CollectionNodeModule
from pgadmin.browser.utils import PGChildNodeView
from pgadmin.utils.ajax import make_json_response, internal_server_error, \
make_response as ajax_response
from pgadmin.utils.driver import get_driver
from pgadmin.utils.ajax import gone
from pgadmin.utils.preferences import Preferences
from config import PG_DEFAULT_DRIVER
class CatalogObjectColumnsModule(CollectionNodeModule):
"""
class ColumnModule(CollectionNodeModule)
A module class for column node derived from CollectionNodeModule.
Methods:
-------
* __init__(*args, **kwargs)
- Method is used to initialize the column and it's base module.
* get_nodes(gid, sid, did, scid, coid)
- Method is used to generate the browser collection node.
* node_inode()
- Method is overridden from its base class to make the node as leaf node.
* script_load()
- Load the module script for column, when any of the server node is
initialized.
"""
NODE_TYPE = 'catalog_object_column'
COLLECTION_LABEL = gettext("Columns")
def __init__(self, *args, **kwargs):
"""
Method is used to initialize the ColumnModule and it's base module.
Args:
*args:
**kwargs:
"""
super(CatalogObjectColumnsModule, self).__init__(*args, **kwargs)
self.min_ver = None
self.max_ver = None
def get_nodes(self, gid, sid, did, scid, coid):
"""
Generate the collection node
"""
yield self.generate_browser_collection_node(coid)
@property
def script_load(self):
"""
Load the module script for server, when any of the database node is
initialized.
"""
return database.DatabaseModule.NODE_TYPE
@property
def node_inode(self):
"""
Load the module node as a leaf node
"""
return False
def register_preferences(self):
"""
Register preferences for this module.
"""
# Add the node informaton for browser, not in respective
# node preferences
self.browser_preference = Preferences.module('browser')
self.pref_show_system_objects = self.browser_preference.preference(
'show_system_objects'
)
@property
def module_use_template_javascript(self):
"""
Returns whether Jinja2 template is used for generating the javascript
module.
"""
return False
blueprint = CatalogObjectColumnsModule(__name__)
class CatalogObjectColumnsView(PGChildNodeView):
"""
This class is responsible for generating routes for column node
Methods:
-------
* __init__(**kwargs)
- Method is used to initialize the ColumnView and it's base view.
* check_precondition()
- This function will behave as a decorator which will checks
database connection before running view, it will also attaches
manager,conn & template_path properties to self
* list()
- Returns the properties of all the columns for the catalog object.
* nodes()
- Creates and returns all the children nodes of type - catalog object
column.
* properties(gid, sid, did, scid, coid, clid)
- Returns the properties of the given catalog-object column node.
* dependency(gid, sid, did, scid, coid, clid):
- Returns the dependencies list of the given node.
* dependent(gid, sid, did, scid, coid, clid):
- Returns the dependents list of the given node.
"""
node_type = blueprint.node_type
parent_ids = [
{'type': 'int', 'id': 'gid'},
{'type': 'int', 'id': 'sid'},
{'type': 'int', 'id': 'did'},
{'type': 'int', 'id': 'scid'},
{'type': 'int', 'id': 'coid'}
]
ids = [
{'type': 'int', 'id': 'clid'}
]
operations = dict({
'obj': [{'get': 'properties'}, {'get': 'list'}],
'nodes': [{'get': 'node'}, {'get': 'nodes'}],
'sql': [{'get': 'sql'}],
'dependency': [{'get': 'dependencies'}],
'dependent': [{'get': 'dependents'}],
'module.js': [{}, {}, {'get': 'module_js'}]
})
def check_precondition(f):
"""
This function will behave as a decorator which will checks
database connection before running view, it will also attaches
manager,conn & template_path properties to self
"""
@wraps(f)
def wrap(*args, **kwargs):
# Here args[0] will hold self & kwargs will hold gid,sid,did
self = args[0]
self.manager = get_driver(PG_DEFAULT_DRIVER).connection_manager(
kwargs['sid']
)
self.conn = self.manager.connection(did=kwargs['did'])
self.template_path = 'catalog_object_column/sql/#{0}#'.format(self.manager.version)
return f(*args, **kwargs)
return wrap
@check_precondition
def list(self, gid, sid, did, scid, coid):
"""
This function is used to list all the column
nodes within that collection.
Args:
gid: Server group ID
sid: Server ID
did: Database ID
scid: Schema ID
coid: Catalog objects ID
Returns:
JSON of available column nodes
"""
SQL = render_template("/".join([self.template_path,
'properties.sql']), coid=coid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
return ajax_response(
response=res['rows'],
status=200
)
@check_precondition
def nodes(self, gid, sid, did, scid, coid):
"""
This function will used to create all the child node within that collection.
Here it will create all the column node.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
coid: Catalog objects ID
Returns:
JSON of available column child nodes
"""
res = []
SQL = render_template("/".join([self.template_path,
'nodes.sql']), coid=coid)
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
for row in rset['rows']:
res.append(
self.blueprint.generate_browser_node(
row['attnum'],
coid,
row['attname'],
icon="icon-catalog_object_column"
))
return make_json_response(
data=res,
status=200
)
@check_precondition
def properties(self, gid, sid, did, scid, coid, clid):
"""
This function will show the properties of the selected
column node.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
coid: Catalog object ID
clid: Column ID
Returns:
JSON of selected column node
"""
SQL = render_template("/".join([self.template_path,
'properties.sql']), coid=coid, clid=clid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(gettext("""Could not find the specified column."""))
return ajax_response(
response=res['rows'][0],
status=200
)
@check_precondition
def dependents(self, gid, sid, did, scid, coid, clid):
"""
This function get the dependents and return ajax response
for the column node.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
coid: Catalog object ID
clid: Column ID
"""
# Specific condition for column which we need to append
where = "WHERE dep.refobjid={0}::OID AND dep.refobjsubid={1}".format(
coid, clid
)
dependents_result = self.get_dependents(
self.conn, clid, where=where
)
# Specific sql to run againt column to fetch dependents
SQL = render_template("/".join([self.template_path,
'depend.sql']), where=where)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
for row in res['rows']:
ref_name = row['refname']
if ref_name is None:
continue
dep_type = ''
dep_str = row['deptype']
if dep_str == 'a':
dep_type = 'auto'
elif dep_str == 'n':
dep_type = 'normal'
elif dep_str == 'i':
dep_type = 'internal'
dependents_result.append({'type': 'sequence', 'name': ref_name, 'field': dep_type})
return ajax_response(
response=dependents_result,
status=200
)
@check_precondition
def dependencies(self, gid, sid, did, scid, coid, clid):
"""
This function get the dependencies and return ajax response
for the column node.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
coid: Catalog objects ID
clid: Column ID
"""
# Specific condition for column which we need to append
where = "WHERE dep.objid={0}::OID AND dep.objsubid={1}".format(
coid, clid
)
dependencies_result = self.get_dependencies(
self.conn, clid, where=where
)
return ajax_response(
response=dependencies_result,
status=200
)
CatalogObjectColumnsView.register_node_view(blueprint)
|
[
"jhkuang11@gmail.com"
] |
jhkuang11@gmail.com
|
82c1555d18d78cdb7c440ced3af6c7392fc7de65
|
c5829a0e61f55cddb420f7a17f3e8ba847ab9fd5
|
/aoc/day10.py
|
7f760165922a1dfec6bf79cd2ddcaa57cabc6e00
|
[] |
no_license
|
jhultberg/AoC2020
|
c8590dcdb606cd298b88365a01b52f11d55c96f8
|
92863da772959be4c6e0211f818c021c83b0f66f
|
refs/heads/master
| 2023-02-01T01:01:37.735294
| 2020-12-20T19:54:26
| 2020-12-20T19:54:26
| 317,626,991
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,962
|
py
|
import os
current_dir = os.getcwd()
filename = "data/day10.txt"
#filename = "test.txt"
path = os.path.join(current_dir, filename)
all_jolts = []
with open(path) as f:
for line in f:
all_jolts.append(int(line.strip()))
all_jolts.sort()
def count_jolt_differences(jolts):
device_jolt = jolts[-1]
current_jolt = 0
not_connected = True
ones = 0
threes = 0
while not_connected:
if current_jolt == device_jolt:
not_connected = False
threes += 1
return ones * threes
if jolts[0] - current_jolt == 1:
ones += 1
current_jolt = jolts[0]
jolts = jolts[1:]
elif jolts[0] - current_jolt == 3:
threes += 1
current_jolt = jolts[0]
jolts = jolts[1:]
return 0
def find_sections(jolts):
sections = []
curr_section = []
for jolt in jolts:
if ((jolt+1 in jolts) != (jolt+2 in jolts)) != ((jolt+2 in jolts) != (jolt+3 in jolts)):
curr_section.append(jolt)
sections.append(curr_section)
print(curr_section)
curr_section = []
continue
curr_section.append(jolt)
sections.append(curr_section)
return sections
#def find_all_paths(jolts):
# no_paths = 1
# for jolt in jolts[1:]:
# local_paths = 0
# if jolt - 1 in jolts:
# local_paths += 1
# if jolt - 2 in jolts:
# local_paths += 1
# if jolt - 3 in jolts:
# local_paths += 1
# no_paths *= local_paths
# return no_paths
def find_all_paths(jolts):
ways_to = {0:1}
for jolt in jolts:
local_paths = 0
for x in range(1,4):
if (jolt-x) in ways_to:
local_paths += ways_to[jolt-x]
ways_to[jolt] = local_paths
return ways_to[jolts[-1]]
### A
print(count_jolt_differences(all_jolts))
### B
print(find_all_paths(all_jolts))
|
[
"johanna.chultberg@gmail.com"
] |
johanna.chultberg@gmail.com
|
6c839eb2e83f8a53e15346b86d09b48d10b1a008
|
4500b0c36e268fd5ae2af55b594a799b2279301d
|
/onlinecart/wsgi.py
|
a70bb449889a91d5ea4cb7aa4cc4bf4c867888d8
|
[] |
no_license
|
abhishekbudruk007/online_vegetable_shopping
|
655242fc36365b892d2899ad779fa1e8822d72ef
|
6407b696d5fe968dad73e0395957915424de88d7
|
refs/heads/master
| 2023-07-11T16:00:15.457492
| 2021-08-21T11:17:34
| 2021-08-21T11:17:34
| 396,239,297
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
"""
WSGI config for onlinecart project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'onlinecart.settings')
application = get_wsgi_application()
|
[
"Abhishek.Budruk@Cerner.com"
] |
Abhishek.Budruk@Cerner.com
|
1bbebeeb1e698b8dd505518cceb431440beb28b2
|
50ad61dbbef3a4bb4076da57038bf0b1118acca3
|
/source/sentdex_data_episode22.py
|
f2b1ba284c0a30983c72b64dd8619ddb24523750
|
[
"MIT"
] |
permissive
|
LuposX/sentdex_fixed_market_stock
|
3af98c06decfa5046252bf535b4742279f14111e
|
7a0d23702b8005002a0e767c4d7866e27da84b5b
|
refs/heads/master
| 2021-09-09T15:54:44.783281
| 2021-09-04T09:51:16
| 2021-09-04T09:51:16
| 201,447,377
| 2
| 1
|
MIT
| 2021-09-04T09:51:17
| 2019-08-09T10:36:17
|
Python
|
UTF-8
|
Python
| false
| false
| 9,029
|
py
|
# for episode 22
import pandas as pd
import numpy as np
import os
import re
path = "../datasets/intraQuarter"
def Forward(gather=["Total Debt/Equity",
'Trailing P/E',
'Price/Sales',
'Price/Book',
'Profit Margin',
'Operating Margin',
'Return on Assets',
'Return on Equity',
'Revenue Per Share',
'Market Cap',
'Enterprise Value',
'Forward P/E',
'PEG Ratio',
'Enterprise Value/Revenue',
'Enterprise Value/EBITDA',
'Revenue',
'Gross Profit',
'EBITDA',
'Net Income Avl to Common ',
'Diluted EPS',
'Earnings Growth',
'Revenue Growth',
'Total Cash',
'Total Cash Per Share',
'Total Debt',
'Current Ratio',
'Book Value Per Share',
'Cash Flow',
'Beta',
'Held by Insiders',
'Held by Institutions',
'Shares Short (as of',
'Short Ratio',
'Short % of Float',
'Shares Short (prior ']):
df = pd.DataFrame(columns = ['Date',
'Unix',
'Ticker',
'Price',
'stock_p_change',
'SP500',
'sp500_p_change',
'Difference',
##############
'DE Ratio',
'Trailing P/E',
'Price/Sales',
'Price/Book',
'Profit Margin',
'Operating Margin',
'Return on Assets',
'Return on Equity',
'Revenue Per Share',
'Market Cap',
'Enterprise Value',
'Forward P/E',
'PEG Ratio',
'Enterprise Value/Revenue',
'Enterprise Value/EBITDA',
'Revenue',
'Gross Profit',
'EBITDA',
'Net Income Avl to Common ',
'Diluted EPS',
'Earnings Growth',
'Revenue Growth',
'Total Cash',
'Total Cash Per Share',
'Total Debt',
'Current Ratio',
'Book Value Per Share',
'Cash Flow',
'Beta',
'Held by Insiders',
'Held by Institutions',
'Shares Short (as of',
'Short Ratio',
'Short % of Float',
'Shares Short (prior month)',
##############
'Status'])
file_list = os.listdir("../datasets/episode21/html")
for each_file in file_list[:]:
ticker = each_file.split(".html")[0]
full_file_path = "episode21/html/" + str(each_file)
source = open(full_file_path, "r").read()
try:
value_list = []
for each_data in gather:
try:
regex = re.escape(each_data) + r'.*?(\d{1,8}\.\d{1,8}M?B?|N/A)%?</td>'
value = re.search(regex, source)
value = (value.group(1))
if "B" in value:
value = float(value.replace("B",''))*1000000000
elif "M" in value:
value = float(value.replace("M",''))*1000000
value_list.append(value)
except:
value = np.nan
value_list.append(value)
# communication is the key
print("Last processed file: ", str(ticker) + ".html")
#----------------------------------------------------------------------------------------------------------------------------------------
# only appending when there are no "N/A" values
if value_list.count("N/A") > 20 | value_list.count(np.nan) > 20 | value_list.count() > 20 | value_list.isnull().count() > 20 | value_list.count(None) > 20 | value_list != None | value_list != []:
pass
else:
df = df.append({'Date':np.nan,
'Unix':np.nan,
'Ticker':ticker,
'Price':np.nan,
'stock_p_change':np.nan,
'SP500':np.nan,
'sp500_p_change':np.nan,
'Difference':np.nan,
'DE Ratio':value_list[0],
#'Market Cap':value_list[1],
'Trailing P/E':value_list[1],
'Price/Sales':value_list[2],
'Price/Book':value_list[3],
'Profit Margin':value_list[4],
'Operating Margin':value_list[5],
'Return on Assets':value_list[6],
'Return on Equity':value_list[7],
'Revenue Per Share':value_list[8],
'Market Cap':value_list[9],
'Enterprise Value':value_list[10],
'Forward P/E':value_list[11],
'PEG Ratio':value_list[12],
'Enterprise Value/Revenue':value_list[13],
'Enterprise Value/EBITDA':value_list[14],
'Revenue':value_list[15],
'Gross Profit':value_list[16],
'EBITDA':value_list[17],
'Net Income Avl to Common ':value_list[18],
'Diluted EPS':value_list[19],
'Earnings Growth':value_list[20],
'Revenue Growth':value_list[21],
'Total Cash':value_list[22],
'Total Cash Per Share':value_list[23],
'Total Debt':value_list[24],
'Current Ratio':value_list[25],
'Book Value Per Share':value_list[26],
'Cash Flow':value_list[27],
'Beta':value_list[28],
'Held by Insiders':value_list[29],
'Held by Institutions':value_list[30],
'Shares Short (as of':value_list[31],
'Short Ratio':value_list[32],
'Short % of Float':value_list[33],
'Shares Short (prior month)':value_list[34],
'Status':np.nan},
ignore_index=True)
except Exception as e6:
print("e6: ", e6)
break
#----------------------------------------------------------------------------------------------------------------------------------------
# saving the file with the right format
def save(file_format):
return "forward_sample_WITH_NA." + file_format
# df.to_excel(save("xlsx"), index=False)
df.to_csv(save("csv"), index=False)
Forward()
|
[
"noreply@github.com"
] |
LuposX.noreply@github.com
|
cbeaf9e337285b006d483a4735cb492f8bf665fe
|
11d75798b8c9ef545a898e84141f55f89b53154a
|
/node_modules/webpack-dev-server/node_modules/fsevents/build/config.gypi
|
fe1e3504113d780d265c036032cdb33652c49653
|
[
"MIT"
] |
permissive
|
famone/eatmeat.store
|
f2d4c59c4d934a380d8725cc27e69867dd41fe4c
|
e5d084d0f29cb02d9c00550bb0374a9e2b0350ac
|
refs/heads/main
| 2022-12-24T11:17:32.669998
| 2020-10-07T13:19:04
| 2020-10-07T13:19:04
| 301,674,712
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,671
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"build_v8_with_gn": "false",
"coverage": "false",
"dcheck_always_on": 0,
"debug_nghttp2": "false",
"debug_node": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"error_on_warn": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_in": "../../deps/icu-tmp/icudt66l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_path": "deps/icu-small",
"icu_small": "false",
"icu_ver_major": "66",
"is_debug": 0,
"llvm_version": "0.0",
"napi_build_version": "6",
"node_byteorder": "little",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_install_npm": "true",
"node_module_version": 83,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_brotli": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_node_code_cache": "true",
"node_use_node_snapshot": "true",
"node_use_openssl": "true",
"node_use_v8_platform": "true",
"node_with_ltcg": "false",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_is_fips": "false",
"shlib_suffix": "83.dylib",
"target_arch": "x64",
"v8_enable_31bit_smis_on_64bit_arch": 0,
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_enable_pointer_compression": 0,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 1,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_siphash": 1,
"want_separate_host_toolset": 0,
"xcode_version": "11.0",
"nodedir": "/Users/konstantintimofeev/Library/Caches/node-gyp/14.2.0",
"standalone_static_library": 1,
"dry_run": "",
"legacy_bundling": "",
"save_dev": "",
"browser": "",
"commit_hooks": "true",
"only": "",
"viewer": "man",
"also": "",
"rollback": "true",
"sign_git_commit": "",
"audit": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"maxsockets": "50",
"shell": "/bin/bash",
"metrics_registry": "https://registry.npmjs.org/",
"parseable": "",
"shrinkwrap": "true",
"init_license": "ISC",
"timing": "",
"if_present": "",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"preid": "",
"fetch_retries": "2",
"registry": "https://registry.npmjs.org/",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"logs_max": "10",
"prefer_online": "",
"cache_lock_retries": "10",
"global_style": "",
"update_notifier": "true",
"audit_level": "low",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"offline": "",
"read_only": "",
"searchlimit": "20",
"access": "",
"json": "",
"allow_same_version": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/konstantintimofeev/.npm-init.js",
"userconfig": "/Users/konstantintimofeev/.npmrc",
"cidr": "",
"node_version": "14.2.0",
"user": "",
"auth_type": "legacy",
"editor": "vi",
"ignore_prepublish": "",
"save": "true",
"script_shell": "",
"tag": "latest",
"before": "",
"global": "",
"progress": "true",
"ham_it_up": "",
"optional": "true",
"searchstaleness": "900",
"bin_links": "true",
"force": "",
"save_prod": "",
"searchopts": "",
"depth": "Infinity",
"node_gyp": "/usr/local/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"rebuild_bundle": "true",
"sso_poll_frequency": "500",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"scripts_prepend_node_path": "warn-only",
"sso_type": "oauth",
"strict_ssl": "true",
"tag_version_prefix": "v",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"save_exact": "",
"cache_lock_stale": "60000",
"prefer_offline": "",
"version": "",
"cache_min": "10",
"otp": "",
"cache": "/Users/konstantintimofeev/.npm",
"searchexclude": "",
"color": "true",
"package_lock": "true",
"fund": "true",
"package_lock_only": "",
"save_optional": "",
"user_agent": "npm/6.14.4 node/v14.2.0 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"send_metrics": "",
"init_version": "1.0.0",
"node_options": "",
"umask": "0022",
"scope": "",
"git": "git",
"init_author_name": "",
"onload_script": "",
"tmp": "/var/folders/5j/msm8k2fn22x_xmxzdkhflc7h0000gn/T",
"unsafe_perm": "true",
"format_package_lock": "true",
"link": "",
"prefix": "/usr/local"
}
}
|
[
"kt@webink.site"
] |
kt@webink.site
|
829a63cc03a87abff3d906ce6795c429b94e5d15
|
888df2b6fcdd50442f7f174268617b3f161a3581
|
/fileReadTest.py
|
387dc4d95d6283b62c392b8bca6b247723febe25
|
[] |
no_license
|
onethousandth/MyTest
|
a85764353c6bf8bd8d39e727245ec55ed7bfc108
|
4397455ac200a7cb0289301e3baa12911ba77729
|
refs/heads/master
| 2020-12-27T01:16:30.005275
| 2020-04-30T06:28:50
| 2020-04-30T06:28:50
| 237,715,944
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 269
|
py
|
# -*- coding:utf-8 -*-
#
# Copyright (C) AddyXiao <addyxiao@msn.cn>
#
#
"A read file Test"
__author__ = "AddyXiao"
__author_email__ = "addyxiao@msn.cn"
fpath = r"D:\Documents\Python\MyTest\restrictClass.py"
with open(fpath, "r") as f:
s = f.read()
print(s)
|
[
"addyxiao@msn.cn"
] |
addyxiao@msn.cn
|
5e97869363d3fbbbca63f2748231cad5c5937b2f
|
5b4529582c9e1c6d832387672cc295e775cdd39a
|
/day014/person.py
|
b29afd74aa7f7b31c0bec51f3fef2ced814adb32
|
[] |
no_license
|
gitxf12/demotest
|
8ccbc7f050eaefb96a6399171e103495371f403a
|
5d4807d7e3a4e869038ceb1f08356d0caf40f6f5
|
refs/heads/master
| 2023-04-21T06:16:00.368012
| 2021-05-31T08:37:08
| 2021-05-31T08:37:08
| 359,756,091
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 539
|
py
|
class Person:
def per(self, name, sex, age):
print(name, sex, age)
class Worker(Person):
def word(self):
print("干活")
class Student(Person):
num = 0
def study(self,num):
print("学习,他的学号是:",num)
def sing(self,num):
print("唱歌,他的学号是:",num)
class Test(Person):
person = Person()
person.per(name="张三", sex="男", age=22)
worker = Worker()
worker.word()
student = Student()
student.study(num=1001)
student.sing(num=1002)
|
[
"sxfeng92@163.com"
] |
sxfeng92@163.com
|
180a57840e3c3e6069fa2aac7dee3904a77be205
|
36e23fd7c98de454b1918f3eea4df7fc2472b00f
|
/createPowerProfile.py
|
1ca6f61b8db1193b5f66b3b2c2cdc3cd6a9f662b
|
[] |
no_license
|
gcbanevicius/barcelona
|
d3c647e58e4a028cc764ac7cdbd004fe12646edd
|
67093742085f2f0c22a28257e8933fa9659014c6
|
refs/heads/master
| 2021-01-25T04:01:52.622342
| 2015-05-04T16:16:32
| 2015-05-04T16:16:32
| 31,486,503
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,297
|
py
|
#!/usr/bin/env python
from __future__ import print_function
import sys
import re
def adjustTime(time, startTime):
# get rid of decimal to avoid weird float precision issues
time = time * 10
startTime = startTime * 10
time = time - startTime
time = time/10
return time
def getMethodTuples(methodInputFileName):
startTime = -1L
tuples = []
with open(methodInputFileName, "r") as loggerFile:
for line in loggerFile.readlines():
line = line.strip()
match = re.search('(Enter|Exit)\s(\S+).*?(\d+$)', line)
direction = match.group(1)
name = match.group(2)
time = long(match.group(3))
# found starting time
if startTime < 0:
startTime = time
time = adjustTime(time, startTime)
logTuple = (direction, name, time)
tuples.append(logTuple)
callStack = []
methodTuples = []
lenTuples = len(tuples)
for i in range(0, lenTuples):
currDirection = tuples[i][0]
# if we are entering the function...
if currDirection == "Enter":
currName = tuples[i][1]
callStack.append(currName)
# check if last entry in log
if i == (lenTuples - 1):
#print("%s for ---" % (currName))
methodTup = (float(currTime), currName)
methodTuples.append(methodTup)
#print(methodTup)
continue
else:
currTime = tuples[i][2]
#nextTime = tuples[i+1][2]
#print("%s for %d" % (currName, nextTime-currTime))
#print("%s for %d" % (currName, currTime))
methodTup = (float(currTime), currName)
methodTuples.append(methodTup)
#print(methodTup)
# if we are exiting the function...
elif currDirection == "Exit":
callStack.pop()
if len(callStack) == 0:
currName = "---"
else:
currName = callStack[-1]
if i == (lenTuples - 1):
#print("%s for ---" % (currName))
methodTup = (float(currTime), currName)
methodTuples.append(methodTup)
#print(methodTup)
continue
else:
currTime = tuples[i][2]
#nextTime = tuples[i+1][2]
#print("%s for %d" % (currName, nextTime-currTime))
#print("%s for %d" % (currName, currTime))
methodTup = (float(currTime), currName)
methodTuples.append(methodTup)
#print(methodTup)
return methodTuples
def getPowerTuples(powerInputFileName):
powerTuples = []
startTime = -1L
with open(powerInputFileName, "r") as loggerFile:
for line in loggerFile.readlines()[1:]:
line = line.strip()
line = line.split(',')
time = float(line[0])
avgPower = float(line[2])
if startTime < 0:
startTime = time
time = adjustTime(time, startTime)
logTuple = (time, avgPower)
powerTuples.append(logTuple)
#print(logTuple)
return powerTuples
# remove methods with 0ms durations
def dedupMethodTuples(methodTuples):
newMethodTuples = []
for i in range(0, len(methodTuples)-1):
if methodTuples[i][0] != methodTuples[i+1][0]:
newMethodTuples.append(methodTuples[i])
newMethodTuples.append(methodTuples[-1])
return newMethodTuples
def combineMethodsWithPower(methodTuples, powerTuples):
# we know that len(power) > len(methods),
# since power is every 0.2ms, but
# methods is at MOST every ms
mi = 0
ml = len(methodTuples)
currName = methodTuples[mi][1]
if mi == ml:
nextTime = float("+inf")
else:
nextTime = methodTuples[mi+1][0]
for pi in range(0, len(powerTuples)):
if currName == '---':
break
currPowerTuple = powerTuples[pi]
currTime = currPowerTuple[0]
currAvgPower = currPowerTuple[1]
if currTime < nextTime:
print("%.4f,%.3f,%s" %(currTime, currAvgPower, currName))
else:
mi = mi+1
currName = methodTuples[mi][1]
if mi == ml:
nextTime = float("+inf")
else:
nextTime = methodTuples[mi+1][0]
# we don't want to print out null method, but we break at top of loop
if currName != '---':
print("%.4f,%.3f,%s" %(currTime, currAvgPower, currName))
return 0
def main():
if len(sys.argv) < 3:
print("Error, need 2 input files: [methodTrace] [powerTrace]", file=sys.stderr)
sys.exit(1)
methodInputFileName = sys.argv[1]
powerInputFileName = sys.argv[2]
methodTuples = getMethodTuples(methodInputFileName)
methodTuples = dedupMethodTuples(methodTuples)
#print(methodTuples)
powerTuples = getPowerTuples(powerInputFileName)
combineMethodsWithPower(methodTuples, powerTuples)
if __name__ == "__main__":
main()
|
[
"gcbanevicius@gmail.com"
] |
gcbanevicius@gmail.com
|
c8a04e2c59e4b1e0a88a4c90a5048e22cdd6370b
|
1d36543d7c137dbe839fd9f999905300a91ec7d5
|
/SortingAlgorithm/ShellsSort.py
|
ad518e39667e3737bbde87a73f2020808fae56e9
|
[
"Apache-2.0"
] |
permissive
|
weaponsX/SortingAlgorithmForPython3
|
be893b8b645f5e09cd47600cffe813ff8f0cbd5b
|
8b5e1dd3aaf30b74c53ff643a225024ef8fbac13
|
refs/heads/master
| 2020-03-20T19:06:12.048174
| 2018-06-17T01:37:33
| 2018-06-17T01:37:33
| 137,621,360
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,396
|
py
|
# 插入排序-希尔排序
"""希尔排序是1959 年由D.L.Shell 提出来的,相对直接排序有较大的改进。希尔排序又叫缩小增量排序。
先将整个待排序的记录序列分割成为若干子序列分别进行直接插入排序,待整个序列中的记录“基本有序”时,再对全体记录进行依次直接插入排序。"""
"""希尔排序时效分析很难,关键码的比较次数与记录移动次数依赖于增量因子序列d的选取,特定情况下可以准确估算出关键码的比较次数和记录的移动次数。
目前还没有人给出选取最好的增量因子序列的方法。
增量因子序列可以有各种取法,有取奇数的,也有取质数的,但需要注意:增量因子中除1 外没有公因子,且最后一个增量因子必须为1。
希尔排序方法是一个不稳定的排序方法。"""
def shellInsertSort(numbers):
# 设定步长
step = int(len(numbers)/2)
while step>0:
for i in range(step, len(numbers)):
# 类似插入排序, 当前值与指定步长之前的值比较, 符合条件则交换位置
while i>=step and numbers[i-step]>numbers[i]:
numbers[i], numbers[i-step] = numbers[i-step], numbers[i]
i -= step
step = int(step/2)
MyList = [3, 1, 5, 7, 2, 4, 9, 6]
shellInsertSort(MyList)
print(MyList)
|
[
"houboye@outlook.com"
] |
houboye@outlook.com
|
53baf5384d16072d3e3c52ba7eeb141ee3e02581
|
559fe746028a02d7c5e661aa5b0e1c0a5d19f9a2
|
/home/consumers.py
|
14878bdbde10524260eea947d83af60325115970
|
[] |
no_license
|
Code-Institute-Submissions/crypthome
|
55ac3497234ad7c083b18f88c35ee9b69ec1e989
|
d65772a11c15197efd89000095e3b3c32a14ed2a
|
refs/heads/master
| 2023-07-04T14:07:45.957217
| 2021-08-14T09:14:22
| 2021-08-14T09:14:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,068
|
py
|
import json
from channels.generic.websocket import AsyncWebsocketConsumer
# THE CONSUMER PROCESS WAS STUDIED ON THE PYPLANE YOUTUBE CHANNEL
# Instance of AsyncWebsocketConsumer class.
# We are accepting the incoming Websocket connection.
# We want to add the incoming connection to a group of channels.
class HomeConsumer(AsyncWebsocketConsumer):
async def connect(self):
# Group_add takes two arguments, first the name of the group we are
# adding and then the channel name which is assigned automatically.
await self.channel_layer.group_add('home', self.channel_name)
await self.accept()
async def disconnect(self, code):
await self.channel_layer.group_discard('home', self.channel_name)
# This method handles the message, the event is the message that was
# sent by the get_crypto_data function.
# We are sending this message to all clients (front end)
# in the "home" group.
async def send_new_data(self, event):
new_data = event['text']
await self.send(json.dumps(new_data))
|
[
"francescosaponaro5@gmail.com"
] |
francescosaponaro5@gmail.com
|
2c7757062a92f6ee32ae03bdb602c6c1dd8193da
|
f14003dabe97bf1a1d44592415fdf82b7d1fbd6c
|
/apps/idcs/views.py
|
8c3eee9b23045320da40f96729257f22554c964f
|
[] |
no_license
|
eternalstop/opsweb
|
45a879678f3180ce25f9cca2b35179f48149dd83
|
4cda7e33f45976a033f6fd11f0f36190894c1b0e
|
refs/heads/master
| 2020-03-19T09:48:27.985177
| 2018-09-08T07:42:43
| 2018-09-08T07:42:43
| 136,318,795
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 543
|
py
|
from .models import Idc
from .serializers import IdcsSerializers
from rest_framework import viewsets, generics
class IdcViewset(viewsets.ModelViewSet):
"""
retrieve:
返回指定IDC信息
list:
返回IDC列表
update:
更新IDC信息
destroy:
删除IDC记录
create:
创建IDC记录
partial_update:
更新IDC部分字段
"""
queryset = Idc.objects.all()
serializer_class = IdcsSerializers
# class IdcDetail(generics.RetrieveUpdateDestroyAPIView):
# queryset = Idc.objects.all()
# serializer_class = IdcsSerializers
|
[
"18655660717@163.com"
] |
18655660717@163.com
|
848b1cb82de29908abfeed4866e0e6fdb3fa6e4f
|
f0a722b1e6ab4fb3387adcc93cb42df834b03785
|
/stonehenge_game.py
|
4082456961a33f21af21dc01d92180796b281c9a
|
[] |
no_license
|
sultansidhu/MiniMax-SubtractSquare
|
5a54d44cfe859f3b1772522dc0f79783dbbee6e7
|
9837ba229bbfdafbd2bf3f8609b215ffca8a19a3
|
refs/heads/master
| 2020-04-05T13:16:37.984612
| 2018-11-09T17:24:05
| 2018-11-09T17:24:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,288
|
py
|
"""A file representing the class Stonehenge, which is
subclass of Game, and the implementation of its functions.
"""
from game import Game
from stonehenge_state import StonehengeState
from typing import Any
class StonehengeGame(Game):
"""A class representing the game of Stonehenge, inheriting
from Game and implementing all its methods.
"""
def __init__(self, p1_starts):
"""Initializes a game of Stonehenge with side length
side_length.
@param 'StonehengeGame' self: The current game of Stonehenge
@param bool p1_starts: boolean dictating if p1 starts.
@rtype: None
"""
self.side_length = input("What would you like the length " +
"of your board to be?")
self.p1_starts = p1_starts
self.current_state = StonehengeState(int(self.side_length), p1_starts)
def get_instructions(self):
"""Returns a string containing the instructions on how to
play the game.
"""
return "The game is played by each of the two players claiming " \
"a cell, which then changes from representing an alphabet" \
"to representing the number of the player that captured it." \
"When more than half the cells in a line have been claimed" \
"by a single user, the ley-line is claimed by the " \
"user completely." \
"when more than half the ley-lines have been claimed by " \
"a player, the player wins."
def is_over(self, currentstate: 'GameState') -> bool:
"""Returns True iff according to the current_state, the game ends.
@param 'StonehengeGame' self: the current game of Stonehenge
@param Any currentstate: the current state of the game of Stonehenge
@rtype: bool
"""
leyline_list = [d.head for d in currentstate.board.leyline_tracker]
total_length = 3 * (int(self.side_length) + 1)
if leyline_list.count(1) >= total_length / 2:
return True
elif leyline_list.count(2) >= total_length / 2:
return True
elif '@' not in leyline_list:
return True
return False
def is_winner(self, player: str):
"""Returns True if the player player is the winner of the game.
@param 'StonehengeGame' self: The current game of Stonehenge
@param str player: the player being entered to check if
he/she is the winner.
@rtype: bool
"""
headlist = [c.head for c in self.current_state.board.leyline_tracker]
if player == 'p1':
if headlist.count(1) >= len(headlist)/2:
# checks if the number of leylines claimed by p1 is more than
# half the number of total leylines
return True
elif "@" not in headlist and headlist.count(1) > headlist.count(2):
# checks that if all leylines are claimed, then the ones claimed
# by p1 are more than the ones claimed by p2
return True
return False
elif player == 'p2':
if headlist.count(2) >= len(headlist)/2:
# checks if the number of leylines claimed by p2 is more than
# half the number of total leylines
return True
elif "@" not in headlist and headlist.count(2) > headlist.count(1):
# checks that if all leylines are claimed, then the ones claimed
# by p2 are more than the ones claimed by p1
return True
return False
return False
def str_to_move(self, move: str) -> Any:
"""Returns a valid move based on the inputted string. If the inputted
string represents an invalid move, return an invalid move.
@param 'StonehengeGame' self: The current game of Stonehenge
@param str move: The entered string representing a valid
or an invalid move
@rtype: Any
"""
if move.upper() in self.current_state.get_possible_moves():
return move.upper()
if __name__ == "__main__":
from python_ta import check_all
check_all(config="a2_pyta.txt")
|
[
"sultansinghsidhu@gmail.com"
] |
sultansinghsidhu@gmail.com
|
fb590462194433540bece6c1a53347a3460d4404
|
2e2444bf0322f084fad4a0699166479cbdfeaad6
|
/Turtle graphics/Kaleido-spiral/kaleido2.py
|
02885fddf14fd11acea2c9d55fbe40856fb2e908
|
[] |
no_license
|
Puneeth1996/Coding-Projects-in-Python
|
ca5583541b23cdd5e897e2b0f7b4d1ee9411a17c
|
a061610392132b8682e13aa5a7d9b9cfcfb3efa3
|
refs/heads/master
| 2022-04-05T14:27:05.071075
| 2020-02-17T12:23:17
| 2020-02-17T12:23:17
| 239,437,402
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 653
|
py
|
import turtle
from itertools import cycle
colors = cycle(['red', 'orange', 'yellow', 'green', 'blue', 'purple'])
def draw_shape(size, angle, shift, shape):
turtle.pencolor(next(colors))
next_shape = ''
if shape == 'circle':
turtle.circle(size)
next_shape = 'square'
elif shape == 'square':
for i in range(4):
turtle.forward(size * 2)
turtle.left(90)
next_shape = 'circle'
turtle.right(angle)
turtle.forward(shift)
draw_shape(size + 5, angle + 1, shift + 1, next_shape)
turtle.bgcolor('black')
turtle.speed('fast')
turtle.pensize(4)
draw_shape(30, 0, 1, 'circle')
|
[
"38997799+Puneeth1996@users.noreply.github.com"
] |
38997799+Puneeth1996@users.noreply.github.com
|
b44e11cad4abf27ea17279b0c0b81774cd2a3641
|
3e84230328962ca40349694c3e11c651f46c1396
|
/predict.py
|
13a6760820de59bb5f36e4795d8faeff275b1d49
|
[
"MIT"
] |
permissive
|
Cozyzheng/RS_DL_Junyi
|
578b9a968f762f50b85beeb5f8e154c74b68845a
|
c2c894ee9a2c6d4d5ffba6504c3c5f55ffda03d3
|
refs/heads/master
| 2022-12-28T13:16:00.946021
| 2020-10-13T14:45:58
| 2020-10-13T14:45:58
| 281,114,585
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,375
|
py
|
def predict(args):
# load the trained convolutional neural network
print("[INFO] loading network...")
model = load_model(args["model"])
stride = args['stride']
for n in range(len(TEST_SET)):
path = TEST_SET[n]
#load the image
image = cv2.imread('./test/' + path)
# pre-process the image for classification
#image = image.astype("float") / 255.0
#image = img_to_array(image)
h,w,_ = image.shape
padding_h = (h//stride + 1) * stride
padding_w = (w//stride + 1) * stride
padding_img = np.zeros((padding_h,padding_w,3),dtype=np.uint8)
padding_img[0:h,0:w,:] = image[:,:,:]
padding_img = padding_img.astype("float") / 255.0
padding_img = img_to_array(padding_img)
print 'src:',padding_img.shape
mask_whole = np.zeros((padding_h,padding_w),dtype=np.uint8)
for i in range(padding_h//stride):
for j in range(padding_w//stride):
crop = padding_img[:3,i*stride:i*stride+image_size,j*stride:j*stride+image_size]
_,ch,cw = crop.shape
if ch != 256 or cw != 256:
print 'invalid size!'
continue
crop = np.expand_dims(crop, axis=0)
#print 'crop:',crop.shape
pred = model.predict_classes(crop,verbose=2)
pred = labelencoder.inverse_transform(pred[0])
#print (np.unique(pred))
pred = pred.reshape((256,256)).astype(np.uint8)
#print 'pred:',pred.shape
mask_whole[i*stride:i*stride+image_size,j*stride:j*stride+image_size] = pred[:,:]
cv2.imwrite('./predict/pre'+str(n+1)+'.png',mask_whole[0:h,0:w])
def saveResult(test_image_path, test_predict_path, model_predict, color_dict, output_size):
imageList = os.listdir(test_image_path)
for i, img in enumerate(model_predict):
channel_max = np.argmax(img, axis = -1)
img_out = np.uint8(color_dict[channel_max.astype(np.uint8)])
# 修改差值方式为最邻近差值
img_out = cv2.resize(img_out, (output_size[0], output_size[1]), interpolation = cv2.INTER_NEAREST)
# 保存为无损压缩png
cv2.imwrite(test_predict_path + "\\" + imageList[i][:-4] + ".png", img_out)
|
[
"45881976+Cozyzheng@users.noreply.github.com"
] |
45881976+Cozyzheng@users.noreply.github.com
|
994ea97237862eb028bec8858075b318c1940f77
|
7803d6689b89b8e903082ec7009ae9181c404422
|
/Array/XOR queries.py
|
0df7036c43ffef5e3f2f21d6e74186aad4026027
|
[] |
no_license
|
vivekpapnai/Python-DSA-Questions
|
96d0e23cd27bb4fda7fde56277215b9bd35e23b1
|
827a2199734476f3e74cc7bae281e74397e7cf47
|
refs/heads/master
| 2023-07-08T08:58:13.288292
| 2021-08-16T03:03:29
| 2021-08-16T03:03:29
| 396,594,002
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,173
|
py
|
"""https://www.codingninjas.com/codestudio/guided-paths/data-structures-algorithms/content/118820/offering/1381864?leftPanelTab=0
"""
def xorQuery(queries):
# Write your code here.
arr = []
xor_array = [0]*10001
for i in queries:
var = i[0]
value = i[1]
if var == 1:
arr.append(value)
else:
xor_array[i] ^= value
xor_array[len(queries)] ^= value
# here we are appending the values in xor_Array at index of len of array and then we are doing xor operation for
# before that index in array
for i in range(len(arr)):
if i == 0:
arr[i] ^= xor_array[i]
else:
xor_array[i] = xor_array[i] ^ xor_array[i-1]
arr[i] ^= xor_array[i]
return arr
def xorQueryBest(queries):
# Write your code here.
arr = []
flag = 0
for i in range(len(queries)):
var = queries[i][0]
value = queries[i][1]
if var == 1:
arr.append(value^flag)
else:
flag = flag ^ value
for i in range(len(arr)):
arr[i] = arr[i] ^ flag
return arr
|
[
"papnaivivek@gmail.com"
] |
papnaivivek@gmail.com
|
dd9e778e66687a02aa77859c092f93d9d7f1db24
|
4fe4f39238c0a3ca571ed4a986dbf60475a2cc34
|
/views/admin.py
|
8949d2aef8c5ceea0969a4cd075ea59bffcf59ef
|
[] |
no_license
|
Lucky13VW/HSTT
|
09af09a88b3dc716e744f2a8ec6cd45c7f362b75
|
b6bce1bb98d6d3d04c6e1dd5439d0223020506cb
|
refs/heads/master
| 2020-04-23T16:32:50.964973
| 2019-09-01T15:37:52
| 2019-09-01T15:37:52
| 171,301,751
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,569
|
py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Admin
~~~~~~
A microblog example application written as Flask tutorial with
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from flask import Blueprint, request, session, redirect, url_for, abort, \
render_template, flash
from functools import wraps
import sys
sys.path.append('views/')
from common import connect_db, get_db, do_hash
from math import ceil
# create our blueprint :)
admin = Blueprint('admin', __name__)
WeekDay = { 1:'Mon', 2:'Tue', 3:'Wed', 4:'Thu', 5:'Fri', 6:'Sat', 7:'Sun' }
DaySlot = { 1:'1st', 2:'2nd',3:'3rd', 4:'4th', 5:'5th', 6:'6th', 7:'7th' }
PageSizeTimeTable = 20
def check_login(func):
@wraps(func)
def cl_wrapper(*args,**kwargs):
if not session.get('logged_in'):
return redirect(url_for('admin.login'))
return func(*args,**kwargs)
return cl_wrapper
@admin.route('/')
@admin.route('/ttshow', methods=['GET', 'POST'])
@check_login
def show_timetable():
# request.form.get("key") post/ request.args.get("key") get/ request.values.get("key") all para
db = get_db()
with db.cursor() as cursor:
sql_str_arr = []
sql_count_arr = []
sql_str_arr.append('select m.id,m.day,m.slot,c.sname,t.name,cr.name from timetable as m,teachers as t \
,courses as c,classrooms as cr where m.t_id = t.id and m.c_id = c.id and m.cr_id = cr.id')
sql_count_arr.append('select count(m.id) from timetable as m,teachers as t \
,courses as c,classrooms as cr where m.t_id = t.id and m.c_id = c.id and m.cr_id = cr.id')
if request.method == 'POST':
sea_day = request.form.get('SeaDay', type=int, default=0)
sea_slot = request.form.get('SeaSlot', type=int, default=0)
sea_cid = request.form.get('SeaCourse', type=int, default=0)
sea_tid = request.form.get('SeaTeacher', type=int, default=0)
sea_crid = request.form.get('SeaClassroom', type=int, default=0)
if sea_day>0 :
sql_str_arr.append(' and m.day=%d'%sea_day)
sql_count_arr.append(' and m.day=%d'%sea_day)
if sea_slot>0:
sql_str_arr.append(' and m.slot=%d'%sea_slot )
sql_count_arr.append(' and m.slot=%d'%sea_slot )
if sea_cid>0:
sql_str_arr.append(' and c.id=%d'%sea_cid )
sql_count_arr.append(' and c.id=%d'%sea_cid )
if sea_tid>0:
sql_str_arr.append(' and t.id=%d'%sea_tid )
sql_count_arr.append(' and t.id=%d'%sea_tid )
if sea_crid>0:
sql_str_arr.append(' and cr.id=%d'%sea_crid )
sql_count_arr.append(' and cr.id=%d'%sea_crid )
else:
sea_day = sea_slot = sea_cid = sea_tid = sea_crid = 0
# count page and select section
count_sql = ''.join(sql_count_arr)
cursor.execute(count_sql)
ret_count = cursor.fetchone()
total_count = int(ret_count[0])
#page_index_para = request.cookies.get('PageIndex')
page_index = request.form.get('PageIndex', type=int, default=0)
if page_index == 0 and total_count > 0:
page_index = 1
sel_begin = 0
page_count = ceil(total_count/PageSizeTimeTable)
if page_index > page_count:
page_index = page_count
sel_begin = (page_index-1)*PageSizeTimeTable
elif page_index>0:
sel_begin = (page_index-1)*PageSizeTimeTable
entries = []
if total_count > 0:
# control select section
sql_str_arr.append((' order by m.id desc limit %d,%d')%(sel_begin,PageSizeTimeTable))
sel_sql = ''.join(sql_str_arr)
cursor.execute(sel_sql)
entries = cursor.fetchall()
sea_par = dict(S_day=sea_day,S_slot=sea_slot,S_cid=sea_cid,S_tid=sea_tid,S_crid=sea_crid)
if(not session.get('tbl_opt_map')):
#print('Loading from db for opt map...')
cursor.execute('select id,name from classrooms order by name')
cr_tbl = cursor.fetchall()
cursor.execute('select id,sname from courses order by sname')
co_tbl = cursor.fetchall()
cursor.execute('select id,name from teachers where id<200')
te_tbl = cursor.fetchall()
session['tbl_opt_map'] = dict(CR=cr_tbl,CO=co_tbl,TE=te_tbl)
return render_template('show_timetable.html',entries=entries,
page_index=page_index,page_count=page_count,
sel_day=WeekDay,sel_slot=DaySlot,S_par=sea_par)
@admin.route('/ttadd', methods=['POST'])
@check_login
def add_timetable():
db = get_db()
with db.cursor() as cursor:
add_day = request.form.get('WeekDay', type=int,default=0)
add_slot = request.form.get('DaySlot', type=int,default=0)
add_slen = request.form.get('SlotLen', type=int, default=1)
add_tid = request.form.get('OptTeacher', type=int,default=0)
add_cid= request.form.get('OptCourse', type=int,default=0)
add_crid = request.form.get('OptClassroom',type=int,default=0)
session['add_par'] = dict(A_day=add_day,A_slot=add_slot,A_slen=add_slen,
A_tid=add_tid,A_cid=add_cid,A_crid=add_crid)
for n in range(0,add_slen):
cursor.execute('insert into timetable (day,slot,t_id,c_id,cr_id) values (%s,%s,%s,%s,%s)',
(add_day, add_slot+n, add_tid, add_cid, add_crid))
db.commit()
return redirect(url_for('admin.show_timetable'))
@admin.route('/ttdelete', methods=['POST'])
@check_login
def delete_timetable():
ttid = request.form.getlist('TTID')
if(not ttid):
return redirect(url_for('admin.show_timetable'))
db = get_db()
with db.cursor() as cursor:
id_list = ','.join(ttid)
del_str = 'delete from timetable where id in (%s)'%id_list
cursor.execute(del_str)
db.commit()
return redirect(url_for('admin.show_timetable'))
@admin.route('/ttpurge', methods=['GET','POST'])
@check_login
def purge_timetable():
db = get_db()
with db.cursor() as cursor:
cursor.execute('truncate timetable')
db.commit()
return redirect(url_for('admin.show_timetable'))
@admin.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
db = get_db()
with db.cursor() as cursor:
cursor.execute('select authority,status from users where username=%s and password=%s limit 1',
(request.form['username'],do_hash(request.form['password'])))
one_user = cursor.fetchone()
if(one_user == None):
error = 'Wrong username or password!'
elif(one_user[1]!=0):
error = 'User suspend! Contact Administrator.'
elif(one_user[0]>1):
error = 'Insufficient privilege! Contact Administrator'
else:
session['logged_in'] = True
flash('You were logged in')
return redirect(url_for('admin.show_timetable'))
return render_template('login.html', error=error)
@admin.route('/logout')
def logout():
session.pop('logged_in', None)
session.pop('tbl_opt_map',None)
session.pop('add_par',None)
flash('You were logged out')
return redirect(url_for('admin.login'))
|
[
"vw01eipi@outlook.com"
] |
vw01eipi@outlook.com
|
11639a83ed7e80e7cb095bcca8e092b57a505042
|
b9767a977f939e43dbddd750cf04bdc0719de85b
|
/tests/stores/genericstoretests/generic_store_object_tests.py
|
ea4eda5afeacdac8b3331d216679dda243634e7e
|
[] |
no_license
|
mididecouverte/backupserver
|
211e9a0489ba28aefd3b7d94646aa2e358daa24d
|
8658429287ee73d6d9588812b92928c9b6b5fb2d
|
refs/heads/master
| 2020-04-21T11:19:17.937340
| 2019-02-07T04:51:40
| 2019-02-07T04:51:40
| 169,520,183
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 408
|
py
|
def test_object(store):
test_store_object(store)
test_apps_object(store.apps)
def test_store_object(store):
assert store
assert store.apps
assert store.reset
assert store.clean
def test_apps_object(apps):
assert apps.create
assert apps.get_all
assert apps.get
assert apps.update
assert apps.delete
assert apps.reset
assert apps.clean
|
[
"frederic.jacob.78@gmail.com"
] |
frederic.jacob.78@gmail.com
|
9d802b5518f8b855cfddced0fee8fd0122af90e4
|
e3df0e321e8bcf6e7d70644dccf5ea4f109580e8
|
/byteprint/bp/contrib/filebrowser/functions.py
|
8175694b0b8e1da84183ad3d443df8038ec46234
|
[
"BSD-2-Clause",
"MIT"
] |
permissive
|
colingourlay/byteprint
|
65af7b9e01299c1e62f4cb03c5641e12fcf39860
|
184ddb5eac48a48507e20553f82b2f16c1a29fda
|
refs/heads/master
| 2021-01-13T02:25:56.834846
| 2010-05-20T14:47:53
| 2010-05-20T14:47:53
| 677,022
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,805
|
py
|
# coding: utf-8
# imports
import os, re, decimal
from time import gmtime, strftime, localtime, mktime, time
from urlparse import urlparse
# django imports
from django.utils.translation import ugettext as _
from django.utils.safestring import mark_safe
from django.core.files import File
from django.core.files.storage import default_storage
# filebrowser imports
from bp.contrib.filebrowser.settings import *
# PIL import
if STRICT_PIL:
from PIL import Image
else:
try:
from PIL import Image
except ImportError:
import Image
def url_to_path(value):
"""
Change URL to PATH.
Value has to be an URL relative to MEDIA URL or a full URL (including MEDIA_URL).
Returns a PATH relative to MEDIA_ROOT.
"""
mediaurl_re = re.compile(r'^(%s)' % (MEDIA_URL))
value = mediaurl_re.sub('', value)
return value
def path_to_url(value):
"""
Change PATH to URL.
Value has to be a PATH relative to MEDIA_ROOT.
Return an URL relative to MEDIA_ROOT.
"""
mediaroot_re = re.compile(r'^(%s)' % (MEDIA_ROOT))
value = mediaroot_re.sub('', value)
return url_join(MEDIA_URL, value)
def dir_from_url(value):
"""
Get the relative server directory from a URL.
URL has to be an absolute URL including MEDIA_URL or
an URL relative to MEDIA_URL.
"""
mediaurl_re = re.compile(r'^(%s)' % (MEDIA_URL))
value = mediaurl_re.sub('', value)
directory_re = re.compile(r'^(%s)' % (DIRECTORY))
value = directory_re.sub('', value)
return os.path.split(value)[0]
def get_version_path(value, version_prefix):
"""
Construct the PATH to an Image version.
Value has to be server-path, relative to MEDIA_ROOT.
version_filename = filename + version_prefix + ext
Returns a path relative to MEDIA_ROOT.
"""
if os.path.isfile(os.path.join(MEDIA_ROOT, value)):
path, filename = os.path.split(value)
filename, ext = os.path.splitext(filename)
# check if this file is a version of an other file
# to return filename_<version>.ext instead of filename_<version>_<version>.ext
tmp = filename.split("_")
if tmp[len(tmp)-1] in ADMIN_VERSIONS:
# it seems like the "original" is actually a version of an other original
# so we strip the suffix (aka. version_perfix)
new_filename = filename.replace("_" + tmp[len(tmp)-1], "")
# check if the version exists when we use the new_filename
if os.path.isfile(os.path.join(MEDIA_ROOT, path, new_filename + "_" + version_prefix + ext)):
# our "original" filename seem to be filename_<version> construct
# so we replace it with the new_filename
filename = new_filename
# if a VERSIONS_BASEDIR is set we need to strip it from the path
# or we get a <VERSIONS_BASEDIR>/<VERSIONS_BASEDIR>/... construct
if VERSIONS_BASEDIR != "":
path = path.replace(VERSIONS_BASEDIR + "/", "")
version_filename = filename + "_" + version_prefix + ext
return os.path.join(VERSIONS_BASEDIR, path, version_filename)
else:
return None
def sort_by_attr(seq, attr):
"""
Sort the sequence of objects by object's attribute
Arguments:
seq - the list or any sequence (including immutable one) of objects to sort.
attr - the name of attribute to sort by
Returns:
the sorted list of objects.
"""
import operator
# Use the "Schwartzian transform"
# Create the auxiliary list of tuples where every i-th tuple has form
# (seq[i].attr, i, seq[i]) and sort it. The second item of tuple is needed not
# only to provide stable sorting, but mainly to eliminate comparison of objects
# (which can be expensive or prohibited) in case of equal attribute values.
intermed = map(None, map(getattr, seq, (attr,)*len(seq)), xrange(len(seq)), seq)
intermed.sort()
return map(operator.getitem, intermed, (-1,) * len(intermed))
def url_join(*args):
"""
URL join routine.
"""
if args[0].startswith("http://"):
url = "http://"
else:
url = "/"
for arg in args:
arg = unicode(arg).replace("\\", "/")
arg_split = arg.split("/")
for elem in arg_split:
if elem != "" and elem != "http:":
url = url + elem + "/"
# remove trailing slash for filenames
if os.path.splitext(args[-1])[1]:
url = url.rstrip("/")
return url
def get_path(path):
"""
Get Path.
"""
if path.startswith('.') or os.path.isabs(path) or not os.path.isdir(os.path.join(MEDIA_ROOT, DIRECTORY, path)):
return None
return path
def get_file(path, filename):
"""
Get File.
"""
if not os.path.isfile(os.path.join(MEDIA_ROOT, DIRECTORY, path, filename)) and not os.path.isdir(os.path.join(MEDIA_ROOT, DIRECTORY, path, filename)):
return None
return filename
def get_breadcrumbs(query, path):
"""
Get breadcrumbs.
"""
breadcrumbs = []
dir_query = ""
if path:
for item in path.split(os.sep):
dir_query = os.path.join(dir_query,item)
breadcrumbs.append([item,dir_query])
return breadcrumbs
def get_filterdate(filterDate, dateTime):
"""
Get filterdate.
"""
returnvalue = ''
dateYear = strftime("%Y", gmtime(dateTime))
dateMonth = strftime("%m", gmtime(dateTime))
dateDay = strftime("%d", gmtime(dateTime))
if filterDate == 'today' and int(dateYear) == int(localtime()[0]) and int(dateMonth) == int(localtime()[1]) and int(dateDay) == int(localtime()[2]): returnvalue = 'true'
elif filterDate == 'thismonth' and dateTime >= time()-2592000: returnvalue = 'true'
elif filterDate == 'thisyear' and int(dateYear) == int(localtime()[0]): returnvalue = 'true'
elif filterDate == 'past7days' and dateTime >= time()-604800: returnvalue = 'true'
elif filterDate == '': returnvalue = 'true'
return returnvalue
def get_settings_var():
"""
Get settings variables used for FileBrowser listing.
"""
settings_var = {}
# Main
settings_var['DEBUG'] = DEBUG
settings_var['MEDIA_ROOT'] = MEDIA_ROOT
settings_var['MEDIA_URL'] = MEDIA_URL
settings_var['DIRECTORY'] = DIRECTORY
# FileBrowser
settings_var['URL_FILEBROWSER_MEDIA'] = URL_FILEBROWSER_MEDIA
settings_var['PATH_FILEBROWSER_MEDIA'] = PATH_FILEBROWSER_MEDIA
# TinyMCE
settings_var['URL_TINYMCE'] = URL_TINYMCE
settings_var['PATH_TINYMCE'] = PATH_TINYMCE
# Extensions/Formats (for FileBrowseField)
settings_var['EXTENSIONS'] = EXTENSIONS
settings_var['SELECT_FORMATS'] = SELECT_FORMATS
# Versions
settings_var['VERSIONS_BASEDIR'] = VERSIONS_BASEDIR
settings_var['VERSIONS'] = VERSIONS
settings_var['ADMIN_VERSIONS'] = ADMIN_VERSIONS
settings_var['ADMIN_THUMBNAIL'] = ADMIN_THUMBNAIL
# FileBrowser Options
settings_var['MAX_UPLOAD_SIZE'] = MAX_UPLOAD_SIZE
# Convert Filenames
settings_var['CONVERT_FILENAME'] = CONVERT_FILENAME
return settings_var
def handle_file_upload(path, file):
"""
Handle File Upload.
"""
file_path = os.path.join(path, file.name)
uploadedfile = default_storage.save(file_path, file)
return uploadedfile
def get_file_type(filename):
"""
Get file type as defined in EXTENSIONS.
"""
file_extension = os.path.splitext(filename)[1].lower()
file_type = ''
for k,v in EXTENSIONS.iteritems():
for extension in v:
if file_extension == extension.lower():
file_type = k
return file_type
def is_selectable(filename, selecttype):
"""
Get select type as defined in FORMATS.
"""
file_extension = os.path.splitext(filename)[1].lower()
select_types = []
for k,v in SELECT_FORMATS.iteritems():
for extension in v:
if file_extension == extension.lower():
select_types.append(k)
return select_types
def version_generator(value, version_prefix, force=None):
"""
Generate Version for an Image.
value has to be a serverpath relative to MEDIA_ROOT.
"""
# PIL's Error "Suspension not allowed here" work around:
# s. http://mail.python.org/pipermail/image-sig/1999-August/000816.html
if STRICT_PIL:
from PIL import ImageFile
else:
try:
from PIL import ImageFile
except ImportError:
import ImageFile
ImageFile.MAXBLOCK = IMAGE_MAXBLOCK # default is 64k
try:
im = Image.open(os.path.join(MEDIA_ROOT, value))
version_path = get_version_path(value, version_prefix)
absolute_version_path = os.path.join(MEDIA_ROOT, version_path)
version_dir = os.path.split(absolute_version_path)[0]
if not os.path.isdir(version_dir):
os.makedirs(version_dir)
os.chmod(version_dir, 0775)
version = scale_and_crop(im, VERSIONS[version_prefix]['width'], VERSIONS[version_prefix]['height'], VERSIONS[version_prefix]['opts'])
try:
version.save(absolute_version_path, quality=90, optimize=(os.path.splitext(version_path)[1].lower() != '.gif'))
except IOError:
version.save(absolute_version_path, quality=90)
return version_path
except:
return None
def scale_and_crop(im, width, height, opts):
"""
Scale and Crop.
"""
x, y = [float(v) for v in im.size]
if width:
xr = float(width)
else:
xr = float(x*height/y)
if height:
yr = float(height)
else:
yr = float(y*width/x)
if 'crop' in opts:
r = max(xr/x, yr/y)
else:
r = min(xr/x, yr/y)
if r < 1.0 or (r > 1.0 and 'upscale' in opts):
im = im.resize((int(x*r), int(y*r)), resample=Image.ANTIALIAS)
if 'crop' in opts:
x, y = [float(v) for v in im.size]
ex, ey = (x-min(x, xr))/2, (y-min(y, yr))/2
if ex or ey:
im = im.crop((int(ex), int(ey), int(x-ex), int(y-ey)))
return im
# if 'crop' in opts:
# if 'top_left' in opts:
# #draw cropping box from upper left corner of image
# box = (0, 0, int(min(x, xr)), int(min(y, yr)))
# im = im.resize((int(x), int(y)), resample=Image.ANTIALIAS).crop(box)
# elif 'top_right' in opts:
# #draw cropping box from upper right corner of image
# box = (int(x-min(x, xr)), 0, int(x), int(min(y, yr)))
# im = im.resize((int(x), int(y)), resample=Image.ANTIALIAS).crop(box)
# elif 'bottom_left' in opts:
# #draw cropping box from lower left corner of image
# box = (0, int(y-min(y, yr)), int(xr), int(y))
# im = im.resize((int(x), int(y)), resample=Image.ANTIALIAS).crop(box)
# elif 'bottom_right' in opts:
# #draw cropping box from lower right corner of image
# (int(x-min(x, xr)), int(y-min(y, yr)), int(x), int(y))
# im = im.resize((int(x), int(y)), resample=Image.ANTIALIAS).crop(box)
# else:
# ex, ey = (x-min(x, xr))/2, (y-min(y, yr))/2
# if ex or ey:
# box = (int(ex), int(ey), int(x-ex), int(y-ey))
# im = im.resize((int(x), int(y)), resample=Image.ANTIALIAS).crop(box)
# return im
scale_and_crop.valid_options = ('crop', 'upscale')
def convert_filename(value):
"""
Convert Filename.
"""
if CONVERT_FILENAME:
return value.replace(" ", "_").lower()
else:
return value
|
[
"colin.j.gourlay@gmail.com"
] |
colin.j.gourlay@gmail.com
|
5735f84124f9710f4cc65bf8557a43eb141d1095
|
9519804cf9a0148ea357bcc26332389c6e006554
|
/byld/migrations/0010_team_atm_ongoing.py
|
72592ff16c30e0cf82b1d5205bff94403a5b9d78
|
[] |
no_license
|
darkryder/scripting-challenge-byld
|
ea8f619605d28fbfd016f87a1511350b877c1200
|
5c47eb00ca879a070d56316f2c3a3f1f13ab385d
|
refs/heads/master
| 2016-08-11T13:15:42.256291
| 2016-03-06T22:46:10
| 2016-03-06T22:46:10
| 43,915,334
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('byld', '0009_team_token'),
]
operations = [
migrations.AddField(
model_name='team',
name='ATM_ongoing',
field=models.BooleanField(default=False),
),
]
|
[
"sambhav13085@iiitd.ac.in"
] |
sambhav13085@iiitd.ac.in
|
744ff7cf7c01af7e919ca461dfa5ad71b2e664f8
|
0ed37e0f906981f93e210d32ab93735c31755c6e
|
/fib.py
|
930a51fbb2ba32dd43a7b98a5d0a379e681aabc6
|
[
"MIT"
] |
permissive
|
helmet33/python-intro
|
34541ab52d4faecb6cf6ed694b745603828dc51c
|
83c1805476c859fc089681c2bf9cef7c82c01bf3
|
refs/heads/master
| 2021-01-21T15:44:02.059195
| 2016-02-10T01:50:52
| 2016-02-10T01:50:52
| 49,538,182
| 0
| 0
| null | 2016-01-13T00:29:03
| 2016-01-13T00:29:03
| null |
UTF-8
|
Python
| false
| false
| 54
|
py
|
a, b = 0, 1
while b < 10:
print(b)
a, b = b, a+b
|
[
"garrettjor@gmail.com"
] |
garrettjor@gmail.com
|
a0f2431d4fbfc2daae25157ac2b08923e7b6e6bc
|
7ba48f82dac0c19d41d7da51cda3aef5173dd77c
|
/myvenv/Scripts/django-admin.py
|
cdd70cd34e3559582d8dce449f4794969a6c341a
|
[] |
no_license
|
saiful7/Betasmartz
|
09af8b11f816adf3c2dc41ad5a70f170d6dbb981
|
337a79b59498f42294f19e53eea9cd1c8019ee48
|
refs/heads/master
| 2022-05-10T04:14:31.382569
| 2018-11-23T06:50:45
| 2018-11-23T06:50:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 163
|
py
|
#!f:\myworks\django\betasmartz\myvenv\scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
[
"31435513+blueskaie@users.noreply.github.com"
] |
31435513+blueskaie@users.noreply.github.com
|
72809080d819eb5f8526eb2e26c9d06c758bdae5
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/galex_j11365+7506/sdB_GALEX_J11365+7506_coadd.py
|
86d194fd14c5f0af121d8195d953f4cb63043f28
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496
| 2016-08-08T16:49:53
| 2016-08-08T16:49:53
| 65,221,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 454
|
py
|
from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[174.140292,75.114933], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_GALEX_J11365+7506/sdB_GALEX_J11365+7506_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_GALEX_J11365+7506/sdB_GALEX_J11365+7506_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
|
[
"thomas@boudreauxmail.com"
] |
thomas@boudreauxmail.com
|
0d6acf4cd93adc8b3392b2568ad6027f977073b8
|
e91991aa2c515d538a485bdf93b7c99f94ff7b31
|
/Exercise Files/dash_example.py
|
84b631ec4688c9339458caf26921512d6099d438
|
[] |
no_license
|
alpitabrol/learning_basic_python
|
a05a22047cc5c6b852c1dbca4afbe388da3849bb
|
caa87d0449a845fb517ee7fdad3ce45dab9c96ef
|
refs/heads/master
| 2022-09-23T01:32:05.226796
| 2020-06-05T03:41:52
| 2020-06-05T03:41:52
| 261,611,145
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,008
|
py
|
import dash
import dash_core_components as dcc
import dash_html_components as html
external_stylesheets = [‘https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div(children=[
html.H1(children=’Weather Dashboard’),
html.Div(children=’’’Dash: A web application framework for Python. ‘’’),
dcc.Graph(
id=’example-graph’,
figure={
‘data’: [
{‘x’: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], ‘y’: [60.56, 150.43, 76.98, 80.49, 250.80, 110.56, 80.33,
20.44, 15.32, 90.11, 150.67, 150.55], ‘type’: ‘bar’, ‘name’: ‘Expected Rain’},
{‘x’: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], ‘y’: [40.09, 128.82, 51.57, 88.53, 295.47, 47.94, 42.05,
3.41, 14.4, 113.65, 226.95, 142.51], ‘type’: ‘bar’, ‘name’: Actual Rain’},
],
‘layout’: {
‘title’: ‘Rainfall in SL in 2016’
}
}
)
])
if __name__ == ‘__main__’:
app.run_server(debug=True)
|
[
"apt.abl@gmail.com"
] |
apt.abl@gmail.com
|
50e6b9307c5b51c44370f095ddb4e531cd67c800
|
1a1a9e1712ae5fe7b68b6063215df97d150887e8
|
/venv/Practice.py
|
0a5739eb286285343e07ad619296af190ae2034e
|
[] |
no_license
|
YashaswiniPython/pythoncodepractice
|
159835856b0122f7f041c8547e2bafd00fea0c38
|
66842413d1250a88b36c715f3413a7a02a136b62
|
refs/heads/master
| 2022-10-20T14:43:53.778464
| 2020-07-17T12:30:03
| 2020-07-17T12:30:03
| 280,417,290
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,471
|
py
|
#Class example
'''class Student:
print("first class program")
def __init__(self):
self.name = "yashu"
self.age = 26
self.marks = 80
def talk(self):
print("Student name is:",self.name)
print("Student age is:", self.age)
print("Student marks is:", self.marks)
obj=Student()
obj.talk()#'''
'''class Animals:
print("second program on animals")
name_of_forest = "Bandipur"
no_of_Tigers = 12
no_of_lions = 20
def Tigers(self):
print("total no of tigers", self.no_of_Tigers)
def lions(self):
print("total no of lions", self.no_of_lions)
o=Animals()
o.Tigers()
o.lions()'''
'''class Student:
def __init__(self,name,age,marks):
print("Program with constructor")
self.name=name
self.age=age
self.marks=marks
def report(self):
print("my name is",self.name)
print("my age is",self.age)
print("my marks is",self.marks)
obj=Student("Anu",25,100)
obj.report()
obj2=Student("Yashu",27,100)
obj2.report()'''
#constructor example
'''class Student:
def __init__(self,name,age): # self variable will be declared by python itself
self.name=name # instance variable
self.age=age # instance variable
print("my name is", self.name)
print("my age :", self.age)
s=Student("teju",10)
print("my name is",s.name) # we cannot call self outside the class so we use obj for reference
print("my age :",s.age)'''
'''class Test:
def __init__(self):
print("constructor overloading is not possible")
def __init__(self,x):
print("construtor o is possible")
o=Test(10)'''
# Hierarcheal Inheritance
'''class A:
print("inheritance example")
def Tigers(self):
print("carnivoros")
class B(A):
def Deer(self):
print("Herbivoros")
class C(A):
def Zebra(self):
print("i am also herbivoros")
obj=C()
obj.Tigers()'''
#Multi level inheritance
'''class A:
print("inheritance example")
def Tigers(self):
print("carnivoros")
class B(A):
def Deer(self):
print("Herbivoros")
class C(B):
def Zebra(self):
print("i am also herbivoros")
obj=C()
obj.Tigers()'''
#Multiple inheritance.
'''class A:
print("inheritance example")
def Tigers(self):
print("carnivoros")
class B:
def Tigers(self):
print("Herbivoros")
class C(A,B):
pass
#def Zebra(self):
#print("i am also herbivoros")
obj=C()
obj.Tigers()'''
#function example
'''def calculate(a,b):
print("addition is ",a+b)
print("differnece is ", a-b)
print("Multiplication is ", a*b)
calculate(20,10)
calculate(200,100)'''
#Factorial
'''import math
print(math.factorial(5))'''
'''a=11
if (a==12):
print("True")
elif (a==11):
print("exactly")
else:
print("Exit")'''
'''a=12
b=20
def addition():
print("addition of : a+b", a+b)
addition()'''
#Exception
'''a=(int(input("value of a")))
b=(int(input("value of b")))
try:
c=a/b
print("result",c)
except ZeroDivisionError:
print("dont divide by 0")
finally:
print("Program executed")'''
a=10
b=120
c=1
print(min(a,b,c))
'''if (a>b>c):
print("a is greater")
if(a<b>c):
print("b is greater")
if(a<b<c):
print("c is greater")
else:
print("exit from program")'''
f = open("yashu.txt","r")
f.read()
print(f.readline(1))
#def addition():
#a=10
#b=12
#print("Sum is",a+b)
#addition()"
|
[
"yashaswinicr@gmail.com"
] |
yashaswinicr@gmail.com
|
997575f07ab4df3c266c0a346dd7d37fef237fa3
|
1d996e00faa7f8fd7d0e6d1a6abfa6cba74c6f25
|
/wizard/__init__.py
|
2e344373032fadf72651b7961c67a20a1bf84398
|
[] |
no_license
|
Adiba07/Gestion-des-Formations
|
cc3cb28af56da5556ff6e5c9e008e4f66644edd1
|
0db85ad5ccb3d42e2d6b15daf1bda0d4e31625d5
|
refs/heads/master
| 2020-05-01T11:59:22.159990
| 2019-03-24T19:04:18
| 2019-03-24T19:04:18
| 177,456,626
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 32
|
py
|
from . import wizard_attestation
|
[
"boufeldjaadiba7@gmail.com"
] |
boufeldjaadiba7@gmail.com
|
95b8a2659fbccbdff8f71b7c5cbd59c09e079c30
|
5f830b9a4dddac8067224982d03c4fb67a175441
|
/geodata/estat.py
|
df0a7469e9bbf0a0260f314a2317fa9ab3501246
|
[] |
no_license
|
weatherbox/warning-area
|
741d7d75f44a21d24075d992c8214dcd7e6c1fb5
|
44b57d3752da53900cc070351cd79f97fbf88c4b
|
refs/heads/master
| 2021-07-01T08:58:40.211262
| 2021-06-24T06:49:08
| 2021-06-24T06:49:08
| 91,368,122
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,946
|
py
|
# -*- coding: utf-8 -*-
import requests
import json
import zipfile
import re
import os, sys
list_url = 'http://e-stat.go.jp/SG2/eStatGIS/Service.asmx/GetDownloadStep4ListTokeiTag'
file_url = 'http://e-stat.go.jp/SG2/eStatGIS/downloadfile.ashx'
def get_geojson(code):
filename = 'data/census-2010-' + code + '.zip'
if not os.path.exists(filename):
id = get_id(code)
download_file(code, id)
path = unzip(filename)
geojson = convert_geojson(path, code)
return geojson
def download_file(code, id):
payload = {
'state': '',
'pdf': '0',
'id': id,
'cmd': 'D001',
'type': '5',
'tcode': 'A002005212010',
'acode': '',
'ccode': code
}
req = requests.post(file_url, data=payload, stream=True)
filename = 'data/census-2010-' + code + '.zip'
print 'download', filename
with open(filename, 'wb') as f:
for chunk in req.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.flush()
return filename
def get_id(code):
payload = {
'censusId': 'A002005212010',
'statIds': 'T000572',
'cityIds': code,
'forHyou': False
}
req = requests.post(list_url, data=payload)
for match in re.finditer(r"dodownload\(0,'(\d+)','(\d)',.*?\)", req.text):
id = match.group(1)
type = match.group(2)
if type == '5':
return id
def unzip(filename):
path = filename[:-4]
if not os.path.exists(path):
os.mkdir(path)
zfile = zipfile.ZipFile(filename)
zfile.extractall(path)
return path
def convert_geojson(path, code):
output = path + '/' + code + '.json'
input = path + '/h22ka' + code + '.shp'
os.system('ogr2ogr -f GeoJSON ' + output + ' ' + input)
print 'geojson', output
return output
if __name__ == '__main__':
get_geojson(sys.argv[1])
|
[
"yuta.tatti@gmail.com"
] |
yuta.tatti@gmail.com
|
7794eae49f53c875ee5461f8a2f73095afbad8cb
|
485ebc2821699db03fcdbf4e323c3f8a213427c0
|
/catkin_ws/src/depth_controller/nodes/pressureToDepth.py
|
3b25270df523eaec86fc7d106fcdf369c01f9c31
|
[] |
no_license
|
CarlGiest/FormulasAndVehicles
|
5cf9039f72a8205142139bd3ddcc3c55270a8760
|
f14daa2d07fa7abbf360ec7a045b2539c6d4ccf0
|
refs/heads/main
| 2023-02-26T00:12:08.794670
| 2021-02-03T21:12:54
| 2021-02-03T21:12:54
| 333,060,933
| 2
| 1
| null | 2021-02-01T19:00:24
| 2021-01-26T11:17:40
|
Python
|
UTF-8
|
Python
| false
| false
| 1,072
|
py
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import Float64
from sensor_msgs.msg import FluidPressure
depth_buf = [-0.5] * 5
depth_buf_len = 5
def pressure_callback(msg, depth_pub):
pascal_per_meter = 9.81*1000.0
depth_tmp = - (msg.fluid_pressure - 101325) / pascal_per_meter
depth_buf.append(depth_tmp)
if len(depth_buf) > depth_buf_len:
depth_buf.pop(0)
depth = sum(depth_buf) / len(depth_buf)
depth_msg = Float64()
# depth_raw_msg = Float64()
# depth_msg.data = depth
depth_msg.data = depth # offset, water level is zero
# depth_raw_msg.data = depth_tmp + 10
depth_pub.publish(depth_msg)
# depth_raw_pub.publish(depth_raw_msg)
def main():
rospy.init_node("depth_calculator")
depth_pub = rospy.Publisher("depth", Float64, queue_size=1)
# depth_raw_pub = rospy.Publisher("depth_raw", Float64, queue_size=1)
rospy.Subscriber("pressure", FluidPressure,
pressure_callback,
(depth_pub))
rospy.spin()
if __name__ == "__main__":
main()
|
[
"carl.giest@tuhh.de"
] |
carl.giest@tuhh.de
|
853246bda598836b4a320111ce029dcc3fb723ae
|
583eb4f8331fbc6ab99b194602aa4bf7da766c68
|
/tests/array_in_struct.py
|
5048c82cf62aeeeb7a99e74bc1a43fff406e0a65
|
[] |
no_license
|
stephenrkell/pycallocs
|
127442140cbb815c419a3147357b316d26f03997
|
12439216bb7e774003657c61f19f4dcca45f89cc
|
refs/heads/master
| 2023-03-16T14:51:14.377589
| 2019-07-26T13:01:45
| 2019-07-26T13:01:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 296
|
py
|
import elflib
elflib.__path__.append("libs/")
from elflib import arrays as m
print(m.make_empty())
na = m.make_named_array(2)
frodo = na[0].first_name
frodo.__init__("Frodo")
na[0].last_name.__init__("Baggins")
na[1].first_name.__init__("Samsaget")
na[1].last_name.__init__("Gamgie")
print(na)
|
[
"guillaume.bertholon@ens.fr"
] |
guillaume.bertholon@ens.fr
|
78f942ff68dd68fe6a0c09e93d4bcc23a038b2ac
|
86f21b10e09eed254bc3b32f6ff00b7ba1544c0f
|
/4_labelGenes/read_gene_differences.py
|
e68a3257ee31fd61446af8bf0fce8c51cf14aace
|
[] |
no_license
|
friesen-labmember/transcriptome-qsubs
|
b68acff9fdf3509a5def83d48f34561d859316d5
|
6e3f12378dd885ebd8d24835579a69e0d2e4573a
|
refs/heads/master
| 2021-01-10T11:06:45.364224
| 2015-05-27T17:27:13
| 2015-05-27T17:27:13
| 36,379,767
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,952
|
py
|
id_dict = {}
for num in ['3', '5', '6', '7', '9', '10', '15', '17', '18']:
print(num + " running")
labelname = "/mnt/lustre_scratch_2012/keithzac/Tf_Analysis/GeneLabelOutput/Tf_" + num + "label"
genefile = open(labelname, 'r')
for line in genefile:
linesplit = line.split(';')
if len(linesplit) == 2:
ID = linesplit[0]
Note = linesplit[1]
if ID not in id_dict:
id_dict[ID] = (Note, [num])
elif num not in id_dict[ID][1]:
id_dict[ID][1].append(num)
outfile = open ("genedict", 'w')
for key in id_dict:
outfile.write(str(key) + " ; " + str(id_dict[key][0]).rstrip() + " ; " + str(id_dict[key][1]) + "\n")
onlyOne = {}
onlyEight = {}
onlyGroupA = {}
onlyNotGroupA = {}
onlyGroupB = {}
onlyNotGroupB = {}
onlyGroupC = {}
onlyNotGroupC = {}
GroupA = ['3', '5', '6']
GroupB = ['7', '9', '10']
GroupC = ['15', '17', '18']
for key in id_dict:
count = len(id_dict[key][1])
if count == 1:
onlyOne[key] = id_dict[key]
elif count == 8:
onlyEight[key] = id_dict[key]
elif count == 3:
if '3' in id_dict[key][1] and '5' in id_dict[key][1] and '6' in id_dict[key][1]:
onlyGroupA[key] = id_dict[key]
if '7' in id_dict[key][1] and '9' in id_dict[key][1] and '10' in id_dict[key][1]:
onlyGroupB[key] = id_dict[key]
if '15' in id_dict[key][1] and '17' in id_dict[key][1] and '18' in id_dict[key][1]:
onlyGroupC[key] = id_dict[key]
elif count == 6:
if '3' not in id_dict[key][1] and '5' not in id_dict[key][1] and '6' not in id_dict[key][1]:
onlyNotGroupA[key] = id_dict[key]
if '7' not in id_dict[key][1] and '9' not in id_dict[key][1] and '10' not in id_dict[key][1]:
onlyNotGroupB[key] = id_dict[key]
if '15' not in id_dict[key][1] and '17' not in id_dict[key][1] and '18' not in id_dict[key][1]:
onlyNotGroupC[key] = id_dict[key]
differencefile = open("gene_sample_differences.txt", 'w')
differencefile.write("Genes found only in group A (3, 5, 6):\n")
for key in onlyGroupA:
differencefile.write(str(key) + " ; " + str(onlyGroupA[key][0]).rstrip() + " ; " + str(onlyGroupA[key][1]) + "\n")
differencefile.write("\n")
differencefile.write("Genes found only in group B (7, 9, 10):\n")
for key in onlyGroupB:
differencefile.write(str(key) + " ; " + str(onlyGroupB[key][0]).rstrip() + " ; " + str(onlyGroupB[key][1]) + "\n")
differencefile.write("\n")
differencefile.write("Genes found only in group C (15, 17, 18):\n")
for key in onlyGroupC:
differencefile.write(str(key) + " ; " + str(onlyGroupC[key][0]).rstrip() + " ; " + str(onlyGroupC[key][1]) + "\n")
differencefile.write("\n")
differencefile.write("Genes only missing from group A (3, 5, 6):\n")
for key in onlyNotGroupA:
differencefile.write(str(key) + " ; " + str(onlyNotGroupA[key][0]).rstrip() + " ; " + str(onlyNotGroupA[key][1]) + "\n")
differencefile.write("\n")
differencefile.write("Genes only missing from group B (7, 9, 10):\n")
for key in onlyNotGroupB:
differencefile.write(str(key) + " ; " + str(onlyNotGroupB[key][0]).rstrip() + " ; " + str(onlyNotGroupB[key][1]) + "\n")
differencefile.write("\n")
differencefile.write("Genes only missing from group C (15, 17, 18):\n")
for key in onlyNotGroupC:
differencefile.write(str(key) + " ; " + str(onlyNotGroupC[key][0]).rstrip() + " ; " + str(onlyNotGroupC[key][1]) + "\n")
differencefile.write("\n")
differencefile.write("Genes found only in one sample:\n")
for key in onlyOne:
differencefile.write(str(key) + " ; " + str(onlyOne[key][0]).rstrip() + " ; " + str(onlyOne[key][1]) + "\n")
differencefile.write("\n")
differencefile.write("Genes found in all but one one sample:\n")
for key in onlyEight:
differencefile.write(str(key) + " ; " + str(onlyEight[key][0]).rstrip() + " ; " + str(onlyEight[key][1]) + "\n")
differencefile.write("\n")
|
[
"zkeith.inbox@gmail.com"
] |
zkeith.inbox@gmail.com
|
7bd59a5ba13b776902a6a545c67280b487cac7a0
|
66e4e9c80c10acbf1ef3de7f9fbda7ef9bf26cb1
|
/1/1.py
|
f2418d6e28683a553212deebe59ee952048e8b22
|
[] |
no_license
|
skabber/Euler
|
4cdf8d8e8876be1e3f0219dc3223c3baa23b6b12
|
7ef9ce27ea7826e1d77645a322abe3332172be54
|
refs/heads/master
| 2020-12-25T19:15:01.953835
| 2013-11-23T00:04:18
| 2013-11-23T00:04:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 77
|
py
|
i = 0
for x in range(1000):
if x % 3 == 0 or x % 5 == 0:
i = i + x
print i
|
[
"jay@doubleencore.com"
] |
jay@doubleencore.com
|
318c7af82b32580b12738438640323a737c89b19
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02756/s138002025.py
|
247e02cd309a3761321cc5397960055d992aff09
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 611
|
py
|
import sys
from collections import deque
sys.setrecursionlimit(10**7)
input = sys.stdin.readline
s = input().rstrip()
q = int(input())
d = deque()
d.append(s)
flag = False
for i in range(q):
qq = input().split()
if qq[0] == '1':
flag = not(flag)
else:
if flag:
if qq[1] == '1':
d.append(qq[2])
else:
d.appendleft(qq[2])
else:
if qq[1] == '1':
d.appendleft(qq[2])
else:
d.append(qq[2])
ans = ''.join(list(d))
if flag:
print(ans[::-1])
else:
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
c9309b6d0c534a669353c5daea9eb7db054ea482
|
6f530dae1cd9a1f978658c8a12ed391c922c61d4
|
/genetic.py
|
0672e45671503dba127b7d813e5180db9a8ef8b7
|
[] |
no_license
|
hoang6k/Evolutionary_stochastic_optimization_portfolio_selection
|
ce3a7c129b9ea0483dd72483f145217b369bd6e3
|
b5e97d597f38877903b008a1fc538175ce7c7894
|
refs/heads/master
| 2022-11-04T18:39:17.310278
| 2020-06-22T17:48:57
| 2020-06-22T17:48:57
| 243,721,536
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22,488
|
py
|
import numpy as np
import pandas as pd
import copy
from time import time
import matplotlib.pyplot as plt
class Chromosome:
def __init__(self, weight):
if weight is None:
weight = []
self._weight = weight
self.calculate_fitness()
def calculate_fitness(self):
if self._method == 'VaR':
return self.calculate_VaR_fitness()
elif self._method == 'VaRp':
return self.calculate_VaRp_fitness()
elif self._method == 'markovitz':
return self.calculate_markovitz_fitness()
elif self._method == 'markovitz_sqrt':
return self.calculate_markovitz_fitness_sqrt()
elif self._method == 'sharp_coef':
return self.calculate_sharp_coef_fitness()
elif self._method == 'sharp_coef_sqrt':
return self.calculate_sharp_coef_fitness_sqrt()
else:
exit()
def calculate_VaR_fitness(self):
value_ptf = self._pct_change * self._weight * 1e6
value_ptf['Value of Portfolio'] = value_ptf.sum(axis=1)
ptf_percentage = value_ptf['Value of Portfolio']
ptf_percentage = ptf_percentage.sort_values(axis=0, ascending=True)
_VaR = np.percentile(ptf_percentage, self._z_score)
self._fitness = -_VaR
return self._fitness
def calculate_VaRp_fitness(self):
port_variance = np.dot(self._weight, np.dot(self._annual_cov_matrix, self._weight.T))
port_standard_devitation = np.sqrt(port_variance)
port_returns_expected = np.sum(self._weight * self._annual_returns)
self._fitness = (- port_returns_expected + 2.33 * port_standard_devitation) * 1e6
if self._fitness < 0:
print('unexpected fitness < 0')
exit()
return self._fitness
def calculate_markovitz_fitness(self):
_lambda = self._lambda
port_variance = np.dot(self._weight, np.dot(self._annual_cov_matrix, self._weight.T))
port_standard_devitation = np.sqrt(port_variance)
port_returns_expected = np.sum(self._weight * self._annual_returns)
self._fitness = (_lambda * port_standard_devitation - (1 - _lambda) * port_returns_expected) * 1e6
return self._fitness
def calculate_markovitz_fitness_sqrt(self):
_lambda = self._lambda
port_variance = np.dot(self._weight, np.dot(self._annual_cov_matrix, self._weight.T))
port_standard_devitation = np.sqrt(port_variance)
port_returns_expected = np.sum(self._weight * self._annual_returns)
self._fitness = (_lambda * np.sqrt(port_standard_devitation) - (1 - _lambda) * port_returns_expected) * 1e6
return self._fitness
def calculate_sharp_coef_fitness(self):
port_variance = np.dot(self._weight, np.dot(self._annual_cov_matrix, self._weight.T))
port_standard_devitation = np.sqrt(port_variance)
port_returns_expected = np.sum(self._weight * self._annual_returns)
self._fitness = - port_returns_expected / port_standard_devitation * 1e6
return self._fitness
def calculate_sharp_coef_fitness_sqrt(self):
port_variance = np.dot(self._weight, np.dot(self._annual_cov_matrix, self._weight.T))
port_standard_devitation = np.sqrt(port_variance)
port_returns_expected = np.sum(self._weight * self._annual_returns)
self._fitness = - port_returns_expected / np.sqrt(port_standard_devitation) * 1e6
return self._fitness
@classmethod
def calculate_sd_e(cls, df, z_score, _lambda, optimize):
df.drop(['DTYYYYMMDD'], axis=1, inplace=True)
cls._method = optimize
cls._z_score = z_score
cls._lambda = _lambda
cls._pct_change = df.pct_change()
cls._annual_returns = cls._pct_change.mean()
cls._annual_cov_matrix = cls._pct_change.cov()
class Population:
def __init__(self, first_population: list = None):
if first_population is None:
first_population = []
self._population_size = len(first_population)
self._generations = [first_population]
_fitness = [chromo._fitness for chromo in first_population]
self._best_fitness = np.min(np.asarray(_fitness))
self._all_best_fitness = [self._best_fitness]
self._generations_solution = [first_population[np.argmin(_fitness)]]
self._best_solution = self._generations_solution[-1]
self.verbose = False
def mutation(self, children):
new_children = []
chosen_indexes = np.random.choice(len(children), size=self._mutation_size, replace=False)
for i in range(len(children)):
if i not in chosen_indexes:
new_children.append(copy.deepcopy(children[i]))
continue
chromosome = children[i]
if self.verbose > 1:
print('\t\tStarting mutation {}th child'.format(len(new_children) + 1))
_mutation = bool(np.random.rand(1) <= self._mutation_probability)
if _mutation:
mutation_genes_indexes = np.random.choice(
np.arange(len(chromosome._weight)),
size=np.random.randint(len(chromosome._weight)),
replace=False)
sorted_indexes = np.sort(mutation_genes_indexes)
new_weight = np.array(chromosome._weight)
new_weight[sorted_indexes] = chromosome._weight[mutation_genes_indexes]
new_children.append(Chromosome(new_weight))
return new_children
def crossover_all(self, parents, alpha=None):
pass
def crossover_random(self, parents, alpha=None):
genes_number = len(parents[0]._weight)
children = []
for i in range(int(self._offspring_number / 2)):
if self.verbose > 1:
print('\t\t{}_th 2 childs'.format(i + 1))
_crossover = bool(np.random.rand(1) <= self._crossover_probability)
if _crossover:
index = np.random.randint(len(parents))
father = parents.pop(index)._weight
index = np.random.randint(len(parents))
mother = parents.pop(index)._weight
crossover_genes_indexes = np.random.choice(
np.arange(genes_number),
size=np.random.randint(genes_number),
replace=False)
sorted_indexes = np.sort(crossover_genes_indexes)
_cs_genes_father = father[sorted_indexes]
_cs_genes_mother = mother[sorted_indexes]
cs_genes_father = _cs_genes_father * np.sum(_cs_genes_mother) / np.sum(_cs_genes_father)
cs_genes_mother = _cs_genes_mother * np.sum(_cs_genes_father) / np.sum(_cs_genes_mother)
father[sorted_indexes] = cs_genes_mother
mother[sorted_indexes] = cs_genes_father
weight_1 = father
weight_2 = mother
overweight_1 = np.where(weight_1 > 0.4)
if len(overweight_1[0]) == 1:
weight_1 += (weight_1[overweight_1[0][0]] - 0.4) / (len(weight_1) - 1)
weight_1[overweight_1[0][0]] = 0.4
elif len(overweight_1[0]) == 2:
weight_1 += (weight_1[overweight_1[0][0]] + weight_1[overweight_1[0][1]] - 0.8) / (len(weight_1) - 2)
weight_1[overweight_1[0][0]] = 0.4
weight_1[overweight_1[0][1]] = 0.4
overweight_2 = np.where(weight_2 > 0.4)
if len(overweight_2[0]) == 1:
weight_2 += (weight_2[overweight_2[0][0]] - 0.4) / (len(weight_2) - 1)
weight_2[overweight_2[0][0]] = 0.4
elif len(overweight_2[0]) == 2:
weight_2 += (weight_2[overweight_2[0][0]] + weight_2[overweight_2[0][1]] - 0.8) / (len(weight_2) - 2)
weight_2[overweight_2[0][0]] = 0.4
weight_2[overweight_2[0][1]] = 0.4
children.append(Chromosome(weight_1))
children.append(Chromosome(weight_2))
return children
def crossover_2points(self, parents, alpha=None):
genes_number = len(parents[0]._weight)
children = []
for i in range(int(self._offspring_number / 2)):
if self.verbose > 1:
print('\t\t{}_th 2 childs'.format(i + 1))
_crossover = bool(np.random.rand(1) <= self._crossover_probability)
if _crossover:
index = np.random.randint(len(parents))
father = parents.pop(index)._weight
index = np.random.randint(len(parents))
mother = parents.pop(index)._weight
two_points = np.random.choice(np.arange(genes_number), size=2, replace=False)
two_points.sort()
_cs_genes_father = father[two_points[0]:two_points[1] + 1]
_cs_genes_mother = mother[two_points[0]:two_points[1] + 1]
cs_genes_father = _cs_genes_father * np.sum(_cs_genes_mother) / np.sum(_cs_genes_father)
cs_genes_mother = _cs_genes_mother * np.sum(_cs_genes_father) / np.sum(_cs_genes_mother)
try:
weight_1 = np.concatenate((father[:two_points[0]], cs_genes_mother, father[two_points[1] + 1:]))
weight_2 = np.concatenate((mother[:two_points[0]], cs_genes_father, mother[two_points[1] + 1:]))
except IndexError:
weight_1 = np.concatenate((father[:two_points[0]], cs_genes_mother))
weight_2 = np.concatenate((mother[:two_points[0]], cs_genes_father))
overweight_1 = np.where(weight_1 > 0.4)
if len(overweight_1[0]) == 1:
weight_1 += (weight_1[overweight_1[0][0]] - 0.4) / (len(weight_1) - 1)
weight_1[overweight_1[0][0]] = 0.4
elif len(overweight_1[0]) == 2:
weight_1 += (weight_1[overweight_1[0][0]] + weight_1[overweight_1[0][1]] - 0.8) / (len(weight_1) - 2)
weight_1[overweight_1[0][0]] = 0.4
weight_1[overweight_1[0][1]] = 0.4
overweight_2 = np.where(weight_2 > 0.4)
if len(overweight_2[0]) == 1:
weight_2 += (weight_2[overweight_2[0][0]] - 0.4) / (len(weight_2) - 1)
weight_2[overweight_2[0][0]] = 0.4
elif len(overweight_2[0]) == 2:
weight_2 += (weight_2[overweight_2[0][0]] + weight_2[overweight_2[0][1]] - 0.8) / (len(weight_2) - 2)
weight_2[overweight_2[0][0]] = 0.4
weight_2[overweight_2[0][1]] = 0.4
children.append(Chromosome(weight_1))
children.append(Chromosome(weight_2))
return children
def crossover_1point(self, parents, alpha=None):
children = []
for i in range(int(self._offspring_number / 2)):
if self.verbose > 1:
print('\t\t{}_th 2 childs'.format(i + 1))
_crossover = bool(np.random.rand(1) <= self._crossover_probability)
if _crossover:
index = np.random.randint(len(parents))
father = parents.pop(index)
index = np.random.randint(len(parents))
mother = parents.pop(index)
if alpha is None:
alpha = father._fitness / (father._fitness + mother._fitness)
child_1 = Chromosome((1 - alpha) * father._weight + alpha * mother._weight)
child_2 = Chromosome(alpha * father._weight + (1 - alpha) * mother._weight)
children.append(child_1)
children.append(child_2)
return children
def roulette_wheel_selection(self, generation, k=5):
fitness = np.asarray([chromo._fitness for chromo in generation])
if Chromosome._method in ['VaR', 'VaRp']:
fitness = 1 - fitness / np.sum(fitness)
else:
# min-max scaling
_min = np.min(fitness)
_max = np.max(fitness)
fitness = (_max - fitness + 0.1) / (_max - _min + 0.2)
fitness /= np.sum(fitness)
parents = []
for _ in range(self._offspring_number):
chosen_indexes = np.random.choice(np.arange(len(fitness)), size=k, replace=False, p=fitness)
best_index = chosen_indexes[np.argmax(fitness[chosen_indexes])]
parents.append(copy.deepcopy(generation[best_index]))
return parents
def tournament_selection(self, generation, k):
fitness = np.asarray([chromo._fitness for chromo in generation])
parents = []
for _ in range(self._offspring_number):
chosen_indexes = np.random.choice(self._population_size, size=k, replace=False)
best_index = chosen_indexes[np.argmin(fitness[chosen_indexes])]
parents.append(copy.deepcopy(generation[best_index]))
return parents
def rank_selection(self, generation, k):
pass
def boltzmann_selection(self, generation, k):
pass
def elitism_selection(self, generation, k):
pass
def generate_next_population(self):
if self.verbose > 0:
print('\nIteration {}'.format(len(self._all_best_fitness)))
# print('\nIteration {} - Record {}'.format(len(self._all_best_fitness), self._best_solution._fitness))
generation = self._generations[-1]
np.random.shuffle(generation)
generation_fitness = np.asarray([chromo._fitness for chromo in generation])
# selection phase
selection_switcher = {
'roulette_wheel': self.roulette_wheel_selection,
'tournament': self.tournament_selection,
'rank': self.rank_selection,
'boltzmann': self.boltzmann_selection,
'elitism': self.elitism_selection
}
parents = selection_switcher.get(
self._selection_method['type'],
lambda: 'Invalid selection method')(
generation,
self._selection_method['k']
)
# cross-over phase
if self.verbose > 1:
print('----CROSS-OVER PHASE')
start_time = time()
crossover_switcher = {
'1point': self.crossover_1point,
'2points': self.crossover_2points,
'random': self.crossover_random
}
children = crossover_switcher.get(
self._crossover_method['type'],
lambda: 'Invalid crossover method')(
parents,
self._crossover_method['parameters']
)
best_fitness = np.min([chromo._fitness for chromo in children])
if self.verbose > 0:
if self.verbose > 1:
print('Time of cross-over: {} seconds'.format(time() - start_time))
print('\tCROSS-OVER best fitness: {}'.format(best_fitness))
# mutation phase
if self.verbose > 1:
print('****MUTATION PHASE')
start_time = time()
new_children = self.mutation(children)
best_fitness = np.min(np.asarray([chromo._fitness for chromo in new_children]))
if self.verbose > 0:
if self.verbose > 1:
print('Time of mutation: {} seconds'.format(time() - start_time))
print('\tMUTATION best fitness: {}'.format(best_fitness))
# replace worst chromosomes
sorted_indexes = np.argsort(generation_fitness)
worst_indexes = sorted_indexes[-self._chromosomes_replace:]
worst_indexes.sort()
worst_indexes = np.flip(worst_indexes)
for idx in worst_indexes:
generation.pop(idx)
new_generation = generation + new_children
new_generation_fitness = np.asarray([chromo._fitness for chromo in new_generation])
self._generations.append(new_generation)
self._all_best_fitness.append(np.min(new_generation_fitness))
self._generations_solution.append(new_generation[np.argmin(new_generation_fitness)])
# if self._all_best_fitness[-1] < self._best_fitness:
# self._best_solution = self._generations_solution[-1]
# self._best_fitness = self._all_best_fitness[-1]
return self._all_best_fitness[-1]
def print(self):
print('Population size: ' + str(self._population_size))
print('Offspring number: ' + str(self._offspring_number))
print('Selection type: ' + self._selection_method['type'].capitalize())
print('Crossover method: ' + self._crossover_method['type'].capitalize())
print('Crossover probability: ' + str(self._crossover_probability))
print('Mutation probability: ' + str(self._mutation_probability))
print('Mutation size: ' + str(self._mutation_size))
print('Max generations number: ' + str(self._generations_number))
print('Stop criterion depth: ' + str(self._stop_criterion_depth), end='\n\n')
def generate_populations(self, config: dict, verbose=False):
self._offspring_number = int(self._population_size * config['offspring_ratio'])
if self._offspring_number % 2 == 1:
self._offspring_number += 1
self._crossover_probability = config['crossover_probability']
self._selection_method = config['selection_method']
self._crossover_method = config['crossover_method']
self._mutation_probability = config['mutation_probability']
self._mutation_size = int(self._offspring_number * config['mutation_ratio'])
self._chromosomes_replace = self._offspring_number
self._generations_number = config['generations_number']
self._stop_criterion_depth = config['stop_criterion_depth']
self.verbose = verbose
self.print()
print('Initial fitness: {}'.format(self._best_fitness))
depth = 0
for epoch in range(self._generations_number):
new_best_fitness = self.generate_next_population()
print('Generation {}: fitness {}'.format(epoch + 1, new_best_fitness))
if new_best_fitness >= self._best_fitness:
depth += 1
if self.verbose > 0:
print('\tFitness not improved for {} generations'.format(depth))
if depth > self._stop_criterion_depth:
if self.verbose > 0:
print('**********STOP CRITERION DEPTH REACHED**********')
break
elif self._best_fitness - new_best_fitness < 1e-5:
self._best_solution = self._generations_solution[-1]
self._best_fitness = self._all_best_fitness[-1]
depth += 1
if self.verbose > 0:
print('\tFitness improved a little for {} generations'.format(depth))
if depth > self._stop_criterion_depth:
if self.verbose > 0:
print('**********STOP CRITERION DEPTH REACHED**********')
break
else:
self._best_solution = self._generations_solution[-1]
self._best_fitness = self._all_best_fitness[-1]
depth = 0
if self.verbose > 0:
print('\tFitness improved')
return self._best_solution, self._best_fitness, self._all_best_fitness
@classmethod
def population_initialization(cls, df, z_score: float = 1.0, _lambda=0.4, optimize='VaR',
population_size=100, genes_number: int = None):
Chromosome.calculate_sd_e(df, z_score, _lambda, optimize)
new_population = np.random.dirichlet(np.ones(genes_number), size=population_size)
return cls([Chromosome(chromo) for chromo in new_population])
if __name__ == '__main__':
np.random.seed(0)
#optimize function: VaR, VaRp, markovitz, markovitz_sqrt, sharp_coef, sharp_coef_sqrt
config = {'optimize_function': 'VaR',
'population_size': 500, 'offspring_ratio': 0.5,
'crossover_probability': 1.0,
'selection_method': {'type': 'roulette_wheel', 'k': 25},
'crossover_method': {'type': 'random', 'parameters': None},
'mutation_probability': 1.0, 'mutation_ratio': 0.1,
'generations_number': 1000, 'stop_criterion_depth': 100}
# path = 'data/data_concat.csv'
path = 'data/dulieudetai.csv'
df = pd.read_csv(path)
genes_number = len(df.columns) - 1
z_score = 1.0
_lambda = 0.4
if config['optimize_function'] not in ['markovitz', 'markovitz_sqrt']:
population = Population.population_initialization(df, z_score, _lambda,
optimize=config['optimize_function'],
population_size=config['population_size'],
genes_number=genes_number)
solution, fitness, all_best_fitness = population.generate_populations(config=config, verbose=1)
# draw fitness improving line
epochs = np.arange(len(all_best_fitness))
plt.plot(epochs, all_best_fitness)
plt.xlabel('Epoch')
plt.ylabel('Best fitness')
plt.title('Genetic algorithm')
plt.show()
cs_type = 'rd' if config['crossover_method']['type'] == 'random' else '2p'
print(solution._weight)
print(fitness)
if config['optimize_function'] in ['sharp_coef', 'sharp_coef_sqrt']:
fitness = -fitness
fitness = np.asarray([fitness])
solution = np.reshape(solution._weight, (genes_number))
data = np.reshape(np.concatenate([fitness, solution]), (1,-1))
result = pd.DataFrame(data, columns=[config['optimize_function']] + list(df))
result.to_csv('result/result_' + path[path.rfind('/') + 1:-4] + '_' + config['optimize_function'] + '_' +
cs_type + str(config['population_size']) + '.csv', index=False)
else:
pass
|
[
"daohoang.hust@gmail.com"
] |
daohoang.hust@gmail.com
|
3799ecc3442c8d26890be986edb656236b1a0acd
|
5d61acc1f9595047861f76b916cd28a167496f9e
|
/Configuration/GenProduction/python/ThirteenTeV/GMSB/GMSB_L150TeV_Ctau200cm_Pythia8_13TeV_cff.py
|
d64917c620c1efc06a244556425f385a3c34f2ef
|
[
"Apache-2.0"
] |
permissive
|
zhangzc11/cms-gmsb-sps8-configs
|
1bbd3cf2a45ee187f3e41ff51c409976fd59f586
|
838e6aac1d13251e050c0ee8c4ed26ca0c6cef7e
|
refs/heads/master
| 2020-06-24T05:28:46.872990
| 2019-09-24T20:05:33
| 2019-09-24T20:05:33
| 198,862,590
| 0
| 0
|
Apache-2.0
| 2019-07-25T16:03:09
| 2019-07-25T16:03:08
| null |
UTF-8
|
Python
| false
| false
| 58,252
|
py
|
SLHA_TABLE = '''
# ISAJET SUSY parameters in SUSY Les Houches Accord 2 format
# Created by ISALHA 2.0 Last revision: H Baer 27 May 2014
Block SPINFO # Program information
1 ISASUGRA/ISASUSY from ISAJET # Spectrum Calculator
2 7.87 18-JUL-2017 13:55:55 # Version number
Block MODSEL # Model selection
1 2 # Minimal gauge mediated (GMSB) model
Block SMINPUTS # Standard Model inputs
1 1.28000000E+02 # alpha_em^(-1)
2 1.16570000E-05 # G_Fermi
3 1.19999997E-01 # alpha_s(M_Z)
4 9.11699982E+01 # m_{Z}(pole)
5 4.19999981E+00 # m_{b}(m_{b})
6 1.73100006E+02 # m_{top}(pole)
7 1.77699995E+00 # m_{tau}(pole)
Block MINPAR # SUSY breaking input parameters
1 1.50000000E+05 # Lambda scale of soft SSB
2 3.00000000E+05 # M_mess overall messenger scale
3 1.50000000E+01 # tan(beta)
4 1.00000000E+00 # sign(mu)
5 1.00000000E+00 # N_5 messenger index
6 1.93076996E+02 # c_grav gravitino mass factor
51 1.00000000E+00 # N5_1 U(1)_Y messenger index
52 1.00000000E+00 # N5_2 SU(2)_L messenger index
53 1.00000000E+00 # N5_3 SU(3)_C messenger index
101 1.00000000E+00 # Rsl
102 0.00000000E+00 # dmH_d^2
103 0.00000000E+00 # dmH_u^2
104 0.00000000E+00 # d_Y
Block MASS # Scalar and gaugino mass spectrum
# PDG code mass particle
6 1.73100006E+02 # top
24 8.04229965E+01 # W^+
25 1.15257454E+02 # h^0
35 7.75676636E+02 # H^0
36 7.70498718E+02 # A^0
37 7.79765015E+02 # H^+
1000001 1.63067773E+03 # dnl
1000002 1.62869592E+03 # upl
1000003 1.63067773E+03 # stl
1000004 1.62869641E+03 # chl
1000005 1.54123206E+03 # b1
1000006 1.41675330E+03 # t1
1000011 5.33721985E+02 # el-
1000012 5.24308228E+02 # nuel
1000013 5.33721985E+02 # mul-
1000014 5.24308228E+02 # numl
1000015 2.60203400E+02 # tau1
1000016 5.21300720E+02 # nutl
1000021 1.20669397E+03 # glss
1000022 2.12120834E+02 # z1ss
1000023 4.04282532E+02 # z2ss
1000024 4.04834717E+02 # w1ss
1000025 -5.88877930E+02 # z3ss
1000035 6.06467773E+02 # z4ss
1000037 6.06392151E+02 # w2ss
1000039 2.09011978E-06 # gvss
2000001 1.55092615E+03 # dnr
2000002 1.55760840E+03 # upr
2000003 1.55092615E+03 # str
2000004 1.55760913E+03 # chr
2000005 1.56845886E+03 # b2
2000006 1.58141382E+03 # t2
2000011 2.60437805E+02 # er-
2000013 2.60437805E+02 # mur-
2000015 5.31612732E+02 # tau2
Block ALPHA # Effective Higgs mixing parameter
-6.89145699E-02 # alpha
Block STOPMIX # stop mixing matrix
1 1 7.31927305E-02 # O_{11}
1 2 9.97317791E-01 # O_{12}
2 1 -9.97317791E-01 # O_{21}
2 2 7.31927305E-02 # O_{22}
Block SBOTMIX # sbottom mixing matrix
1 1 2.91983813E-01 # O_{11}
1 2 9.56423283E-01 # O_{12}
2 1 -9.56423283E-01 # O_{21}
2 2 2.91983813E-01 # O_{22}
Block STAUMIX # stau mixing matrix
1 1 6.82063177E-02 # O_{11}
1 2 9.97671247E-01 # O_{12}
2 1 -9.97671247E-01 # O_{21}
2 2 6.82063177E-02 # O_{22}
Block NMIX # neutralino mixing matrix
1 1 9.95301366E-01 #
1 2 -1.65267047E-02 #
1 3 8.81438702E-02 #
1 4 -3.65044884E-02 #
2 1 4.38650511E-02 #
2 2 9.52582121E-01 #
2 3 -2.43164480E-01 #
2 4 1.77579150E-01 #
3 1 3.54977734E-02 #
3 2 -4.91524190E-02 #
3 3 -7.03363895E-01 #
3 4 -7.08239377E-01 #
4 1 7.86817744E-02 #
4 2 -2.99830496E-01 #
4 3 -6.62103355E-01 #
4 4 6.82297468E-01 #
Block UMIX # chargino U mixing matrix
1 1 -9.40547407E-01 # U_{11}
1 2 3.39662373E-01 # U_{12}
2 1 -3.39662373E-01 # U_{21}
2 2 -9.40547407E-01 # U_{22}
Block VMIX # chargino V mixing matrix
1 1 -9.69057024E-01 # V_{11}
1 2 2.46836960E-01 # V_{12}
2 1 -2.46836960E-01 # V_{21}
2 2 -9.69057024E-01 # V_{22}
Block GAUGE Q= 1.44477710E+03 #
1 3.57524991E-01 # g`
2 6.52378619E-01 # g_2
3 1.21928000E+00 # g_3
Block YU Q= 1.44477710E+03 #
3 3 8.56151879E-01 # y_t
Block YD Q= 1.44477710E+03 #
3 3 1.93365723E-01 # y_b
Block YE Q= 1.44477710E+03 #
3 3 1.52594313E-01 # y_tau
Block HMIX Q= 1.44477710E+03 # Higgs mixing parameters
1 5.80108521E+02 # mu(Q)
2 1.44371462E+01 # tan(beta)(Q)
3 2.51120468E+02 # Higgs vev at Q
4 5.93668250E+05 # m_A^2(Q)
Block MSOFT Q= 1.44477710E+03 # DRbar SUSY breaking parameters
1 2.17775024E+02 # M_1(Q)
2 4.05397003E+02 # M_2(Q)
3 1.09064551E+03 # M_3(Q)
21 2.45626734E+05 # MHd^2(Q)
22 -3.02578062E+05 # MHu^2(Q)
31 5.27866882E+02 # MeL(Q)
32 5.27866882E+02 # MmuL(Q)
33 5.24929504E+02 # MtauL(Q)
34 2.57610229E+02 # MeR(Q)
35 2.57610229E+02 # MmuR(Q)
36 2.53839172E+02 # MtauR(Q)
41 1.57540186E+03 # MqL1(Q)
42 1.57540186E+03 # MqL2(Q)
43 1.51619727E+03 # MqL3(Q)
44 1.50283862E+03 # MuR(Q)
45 1.50283862E+03 # McR(Q)
46 1.37672119E+03 # MtR(Q)
47 1.49542053E+03 # MdR(Q)
48 1.49542053E+03 # MsR(Q)
49 1.48986389E+03 # MbR(Q)
Block AU Q= 1.44477710E+03 #
1 1 -3.30153137E+02 # A_u
2 2 -3.30153137E+02 # A_c
3 3 -3.30153137E+02 # A_t
Block AD Q= 1.44477710E+03 #
1 1 -3.69239410E+02 # A_d
2 2 -3.69239410E+02 # A_s
3 3 -3.69239410E+02 # A_b
Block AE Q= 1.44477710E+03 #
1 1 -3.95861778E+01 # A_e
2 2 -3.95861778E+01 # A_mu
3 3 -3.95861778E+01 # A_tau
# ISAJET decay tables in SUSY Les Houches accord format
# Created by ISALHD. Last revision: C. Balazs, 2005 May 25
Block DCINFO # Program information
1 ISASUGRA from ISAJET # Spectrum Calculator
2 7.87 18-JUL-2017 13:55:55 # Version number
# PDG Width
DECAY 6 1.48575687E+00 # TP decays
# BR NDA ID1 ID2 ID3 ID4
3.33333313E-01 3 2 -1 5 # TP --> UP DB BT
3.33333313E-01 3 4 -3 5 # TP --> CH SB BT
1.11111097E-01 3 -11 12 5 # TP --> E+ NUE BT
1.11111097E-01 3 -13 14 5 # TP --> MU+ NUM BT
1.11111097E-01 3 -15 16 5 # TP --> TAU+ NUT BT
# PDG Width
DECAY 1000021 2.89846212E-02 # GLSS decays
# BR NDA ID1 ID2 ID3 ID4
6.81992620E-02 3 1000024 1 -2 # GLSS --> W1SS+ DN UB
6.81992620E-02 3 -1000024 2 -1 # GLSS --> W1SS- UP DB
6.81992024E-02 3 1000024 3 -4 # GLSS --> W1SS+ ST CB
6.81992024E-02 3 -1000024 4 -3 # GLSS --> W1SS- CH SB
5.41609712E-02 3 1000024 5 -6 # GLSS --> W1SS+ BT TB
5.41609712E-02 3 -1000024 6 -5 # GLSS --> W1SS- TP BB
2.79660104E-03 3 1000037 1 -2 # GLSS --> W2SS+ DN UB
2.79660104E-03 3 -1000037 2 -1 # GLSS --> W2SS- UP DB
2.79659918E-03 3 1000037 3 -4 # GLSS --> W2SS+ ST CB
2.79659918E-03 3 -1000037 4 -3 # GLSS --> W2SS- CH SB
7.54439682E-02 3 1000037 5 -6 # GLSS --> W2SS+ BT TB
7.54439682E-02 3 -1000037 6 -5 # GLSS --> W2SS- TP BB
3.43112970E-06 2 1000022 21 # GLSS --> Z1SS GL
3.98591757E-02 3 1000022 2 -2 # GLSS --> Z1SS UP UB
1.20224962E-02 3 1000022 1 -1 # GLSS --> Z1SS DN DB
1.20224962E-02 3 1000022 3 -3 # GLSS --> Z1SS ST SB
3.98590676E-02 3 1000022 4 -4 # GLSS --> Z1SS CH CB
1.31045561E-02 3 1000022 5 -5 # GLSS --> Z1SS BT BB
3.79431993E-02 3 1000022 6 -6 # GLSS --> Z1SS TP TB
6.08985778E-04 2 1000023 21 # GLSS --> Z2SS GL
3.47379856E-02 3 1000023 2 -2 # GLSS --> Z2SS UP UB
3.33178937E-02 3 1000023 1 -1 # GLSS --> Z2SS DN DB
3.33178937E-02 3 1000023 3 -3 # GLSS --> Z2SS ST SB
3.47379297E-02 3 1000023 4 -4 # GLSS --> Z2SS CH CB
4.32726182E-02 3 1000023 5 -5 # GLSS --> Z2SS BT BB
1.50960684E-02 3 1000023 6 -6 # GLSS --> Z2SS TP TB
4.99100797E-03 2 1000025 21 # GLSS --> Z3SS GL
2.02833580E-05 3 1000025 2 -2 # GLSS --> Z3SS UP UB
2.45927531E-05 3 1000025 1 -1 # GLSS --> Z3SS DN DB
2.45927531E-05 3 1000025 3 -3 # GLSS --> Z3SS ST SB
2.02833071E-05 3 1000025 4 -4 # GLSS --> Z3SS CH CB
4.56616702E-03 3 1000025 5 -5 # GLSS --> Z3SS BT BB
2.90887393E-02 3 1000025 6 -6 # GLSS --> Z3SS TP TB
4.40470735E-03 2 1000035 21 # GLSS --> Z4SS GL
1.38745841E-03 3 1000035 2 -2 # GLSS --> Z4SS UP UB
1.60228217E-03 3 1000035 1 -1 # GLSS --> Z4SS DN DB
1.60228217E-03 3 1000035 3 -3 # GLSS --> Z4SS ST SB
1.38745573E-03 3 1000035 4 -4 # GLSS --> Z4SS CH CB
5.76041080E-03 3 1000035 5 -5 # GLSS --> Z4SS BT BB
5.20226397E-02 3 1000035 6 -6 # GLSS --> Z4SS TP TB
2.32628083E-11 2 1000039 21 # GLSS --> GVSS GL
# PDG Width
DECAY 1000002 3.75090065E+01 # UPL decays
# BR NDA ID1 ID2 ID3 ID4
4.85655060E-03 2 1000022 2 # UPL --> Z1SS UP
1.48518354E-01 2 1000023 2 # UPL --> Z2SS UP
2.51214457E-04 2 1000025 2 # UPL --> Z3SS UP
1.10465297E-02 2 1000035 2 # UPL --> Z4SS UP
5.16632020E-01 2 1000021 2 # UPL --> GLSS UP
3.02169770E-01 2 1000024 1 # UPL --> W1SS+ DN
1.65255554E-02 2 1000037 1 # UPL --> W2SS+ DN
# PDG Width
DECAY 1000001 3.75598946E+01 # DNL decays
# BR NDA ID1 ID2 ID3 ID4
6.98527927E-03 2 1000022 1 # DNL --> Z1SS DN
1.43615767E-01 2 1000023 1 # DNL --> Z2SS DN
4.28061001E-04 2 1000025 1 # DNL --> Z3SS DN
1.33994827E-02 2 1000035 1 # DNL --> Z4SS DN
5.19556582E-01 2 1000021 1 # DNL --> GLSS DN
2.84702957E-01 2 -1000024 2 # DNL --> W1SS- UP
3.13118994E-02 2 -1000037 2 # DNL --> W2SS- UP
# PDG Width
DECAY 1000003 3.75598755E+01 # STL decays
# BR NDA ID1 ID2 ID3 ID4
6.98528299E-03 2 1000022 3 # STL --> Z1SS ST
1.43615842E-01 2 1000023 3 # STL --> Z2SS ST
4.28061234E-04 2 1000025 3 # STL --> Z3SS ST
1.33994892E-02 2 1000035 3 # STL --> Z4SS ST
5.19556820E-01 2 1000021 3 # STL --> GLSS ST
2.84702629E-01 2 -1000024 4 # STL --> W1SS- CH
3.13118584E-02 2 -1000037 4 # STL --> W2SS- CH
# PDG Width
DECAY 1000004 3.75088997E+01 # CHL decays
# BR NDA ID1 ID2 ID3 ID4
4.85655898E-03 2 1000022 4 # CHL --> Z1SS CH
1.48518592E-01 2 1000023 4 # CHL --> Z2SS CH
2.51214835E-04 2 1000025 4 # CHL --> Z3SS CH
1.10465456E-02 2 1000035 4 # CHL --> Z4SS CH
5.16630769E-01 2 1000021 4 # CHL --> GLSS CH
3.02170724E-01 2 1000024 3 # CHL --> W1SS+ ST
1.65256113E-02 2 1000037 3 # CHL --> W2SS+ ST
# PDG Width
DECAY 1000005 1.87745609E+01 # BT1 decays
# BR NDA ID1 ID2 ID3 ID4
4.59881872E-02 2 1000022 5 # BT1 --> Z1SS BT
4.22851183E-02 2 1000023 5 # BT1 --> Z2SS BT
2.00922079E-02 2 1000025 5 # BT1 --> Z3SS BT
8.68590642E-03 2 1000035 5 # BT1 --> Z4SS BT
7.28064835E-01 2 1000021 5 # BT1 --> GLSS BT
8.33717659E-02 2 -1000024 6 # BT1 --> W1SS- TP
7.14160725E-02 2 -1000037 6 # BT1 --> W2SS- TP
9.58872697E-05 2 -24 1000006 # BT1 --> W- TP1
# PDG Width
DECAY 1000006 3.33991432E+01 # TP1 decays
# BR NDA ID1 ID2 ID3 ID4
1.13187879E-01 2 1000021 6 # TP1 --> GLSS TP
8.86589140E-02 2 1000022 6 # TP1 --> Z1SS TP
2.25969758E-02 2 1000023 6 # TP1 --> Z2SS TP
2.01451853E-01 2 1000025 6 # TP1 --> Z3SS TP
1.68596715E-01 2 1000035 6 # TP1 --> Z4SS TP
4.54972908E-02 2 1000024 5 # TP1 --> W1SS+ BT
3.60010356E-01 2 1000037 5 # TP1 --> W2SS+ BT
# PDG Width
DECAY 2000002 1.80091648E+01 # UPR decays
# BR NDA ID1 ID2 ID3 ID4
1.86564982E-01 2 1000022 2 # UPR --> Z1SS UP
3.27219284E-04 2 1000023 2 # UPR --> Z2SS UP
1.80972347E-04 2 1000025 2 # UPR --> Z3SS UP
8.71225609E-04 2 1000035 2 # UPR --> Z4SS UP
8.12055588E-01 2 1000021 2 # UPR --> GLSS UP
# PDG Width
DECAY 2000001 1.50348740E+01 # DNR decays
# BR NDA ID1 ID2 ID3 ID4
5.56102730E-02 2 1000022 1 # DNR --> Z1SS DN
9.74459035E-05 2 1000023 1 # DNR --> Z2SS DN
5.38055538E-05 2 1000025 1 # DNR --> Z3SS DN
2.58973829E-04 2 1000035 1 # DNR --> Z4SS DN
9.43979502E-01 2 1000021 1 # DNR --> GLSS DN
# PDG Width
DECAY 2000003 1.50348740E+01 # STR decays
# BR NDA ID1 ID2 ID3 ID4
5.56102730E-02 2 1000022 3 # STR --> Z1SS ST
9.74459035E-05 2 1000023 3 # STR --> Z2SS ST
5.38055538E-05 2 1000025 3 # STR --> Z3SS ST
2.58973829E-04 2 1000035 3 # STR --> Z4SS ST
9.43979502E-01 2 1000021 3 # STR --> GLSS ST
# PDG Width
DECAY 2000004 1.80090694E+01 # CHR decays
# BR NDA ID1 ID2 ID3 ID4
1.86565772E-01 2 1000022 4 # CHR --> Z1SS CH
3.27220652E-04 2 1000023 4 # CHR --> Z2SS CH
1.80973060E-04 2 1000025 4 # CHR --> Z3SS CH
8.71229102E-04 2 1000035 4 # CHR --> Z4SS CH
8.12054813E-01 2 1000021 4 # CHR --> GLSS CH
# PDG Width
DECAY 2000005 4.63343010E+01 # BT2 decays
# BR NDA ID1 ID2 ID3 ID4
5.09223854E-03 2 1000022 5 # BT2 --> Z1SS BT
9.59487781E-02 2 1000023 5 # BT2 --> Z2SS BT
1.10309739E-02 2 1000025 5 # BT2 --> Z3SS BT
2.32683122E-02 2 1000035 5 # BT2 --> Z4SS BT
3.28632861E-01 2 1000021 5 # BT2 --> GLSS BT
1.96992010E-01 2 -1000024 6 # BT2 --> W1SS- TP
3.38025898E-01 2 -1000037 6 # BT2 --> W2SS- TP
1.00899034E-03 2 -24 1000006 # BT2 --> W- TP1
# PDG Width
DECAY 2000006 4.73359070E+01 # TP2 decays
# BR NDA ID1 ID2 ID3 ID4
2.76327282E-01 2 1000021 6 # TP2 --> GLSS TP
2.20960647E-01 2 1000024 5 # TP2 --> W1SS+ BT
3.99595797E-02 2 1000037 5 # TP2 --> W2SS+ BT
6.51078648E-04 2 23 1000006 # TP2 --> Z0 TP1
2.47832318E-03 2 25 1000006 # TP2 --> HL0 TP1
3.87403346E-03 2 1000022 6 # TP2 --> Z1SS TP
1.12571657E-01 2 1000023 6 # TP2 --> Z2SS TP
1.66292906E-01 2 1000025 6 # TP2 --> Z3SS TP
1.76884547E-01 2 1000035 6 # TP2 --> Z4SS TP
# PDG Width
DECAY 1000011 1.55445719E+00 # EL- decays
# BR NDA ID1 ID2 ID3 ID4
2.88408488E-01 2 1000022 11 # EL- --> Z1SS E-
2.50464320E-01 2 1000023 11 # EL- --> Z2SS E-
4.61126924E-01 2 -1000024 12 # EL- --> W1SS- NUE
1.41428131E-07 3 1000015 11 -15 # EL- --> TAU1- E- TAU+
1.52477625E-07 3 -1000015 11 15 # EL- --> TAU1+ E- TAU-
7.34244581E-15 2 11 1000039 # EL- --> E- GVSS
# PDG Width
DECAY 1000013 1.55445707E+00 # MUL- decays
# BR NDA ID1 ID2 ID3 ID4
2.88408488E-01 2 1000022 13 # MUL- --> Z1SS MU-
2.50464261E-01 2 1000023 13 # MUL- --> Z2SS MU-
4.61126953E-01 2 -1000024 14 # MUL- --> W1SS- NUM
1.41428131E-07 3 1000015 13 -15 # MUL- --> TAU1- MU- TAU+
1.52477639E-07 3 -1000015 13 15 # MUL- --> TAU1+ MU- TAU-
7.34244666E-15 2 13 1000039 # MUL- --> MU- GVSS
# PDG Width
DECAY 1000015 1.47312775E-01 # TAU1- decays
# BR NDA ID1 ID2 ID3 ID4
1.00000000E+00 2 1000022 15 # TAU1- --> Z1SS TAU-
2.13347525E-15 2 15 1000039 # TAU1- --> TAU- GVSS
# PDG Width
DECAY 1000012 1.47892344E+00 # NUEL decays
# BR NDA ID1 ID2 ID3 ID4
3.31502914E-01 2 1000022 12 # NUEL --> Z1SS NUE
2.11466834E-01 2 1000023 12 # NUEL --> Z2SS NUE
4.57028717E-01 2 1000024 11 # NUEL --> W1SS+ E-
2.48563623E-07 3 1000015 12 -15 # NUEL --> TAU1- NUE TAU+
1.87748796E-07 3 -1000015 12 15 # NUEL --> TAU1+ NUE TAU-
1.13648616E-06 3 -1000015 11 16 # NUEL --> TAU1+ E- NUT
7.06043932E-15 2 12 1000039 # NUEL --> NUE GVSS
# PDG Width
DECAY 1000014 1.47892320E+00 # NUML decays
# BR NDA ID1 ID2 ID3 ID4
3.31502974E-01 2 1000022 14 # NUML --> Z1SS NUM
2.11466864E-01 2 1000023 14 # NUML --> Z2SS NUM
4.57028598E-01 2 1000024 13 # NUML --> W1SS+ MU-
2.48563680E-07 3 1000015 14 -15 # NUML --> TAU1- NUM TAU+
1.87748824E-07 3 -1000015 14 15 # NUML --> TAU1+ NUM TAU-
1.13722251E-06 3 -1000015 13 16 # NUML --> TAU1+ MU- NUT
7.06044059E-15 2 14 1000039 # NUML --> NUM GVSS
# PDG Width
DECAY 1000016 1.59188509E+00 # NUTL decays
# BR NDA ID1 ID2 ID3 ID4
3.04827154E-01 2 1000022 16 # NUTL --> Z1SS NUT
1.88760757E-01 2 1000023 16 # NUTL --> Z2SS NUT
4.09750283E-01 2 1000024 15 # NUTL --> W1SS+ TAU-
9.66611281E-02 2 24 1000015 # NUTL --> W+ TAU1-
4.12666878E-07 3 -1000015 16 15 # NUTL --> TAU1+ NUT TAU-
1.64333656E-07 3 1000015 16 -15 # NUTL --> TAU1- NUT TAU+
6.37344054E-15 2 16 1000039 # NUTL --> NUT GVSS
# PDG Width
DECAY 2000011 1.48698553E-01 # ER- decays
# BR NDA ID1 ID2 ID3 ID4
1.00000000E+00 2 1000022 11 # ER- --> Z1SS E-
5.60248591E-28 3 1000015 12 -16 # ER- --> TAU1- NUE ANUT
2.12352557E-15 2 11 1000039 # ER- --> E- GVSS
# PDG Width
DECAY 2000013 1.48698121E-01 # MUR- decays
# BR NDA ID1 ID2 ID3 ID4
1.00000000E+00 2 1000022 13 # MUR- --> Z1SS MU-
2.38478316E-23 3 1000015 14 -16 # MUR- --> TAU1- NUM ANUT
2.12353087E-15 2 13 1000039 # MUR- --> MU- GVSS
# PDG Width
DECAY 2000015 1.69303417E+00 # TAU2- decays
# BR NDA ID1 ID2 ID3 ID4
2.65493393E-01 2 1000022 15 # TAU2- --> Z1SS TAU-
2.21787930E-01 2 1000023 15 # TAU2- --> Z2SS TAU-
4.05920714E-01 2 -1000024 16 # TAU2- --> W1SS- NUT
4.87918109E-02 2 23 1000015 # TAU2- --> Z0 TAU1-
5.80061525E-02 2 25 1000015 # TAU2- --> HL0 TAU1-
# PDG Width
DECAY 1000022 9.86077310E-17 # Z1SS decays
# BR NDA ID1 ID2 ID3 ID4
8.57347310E-01 2 1000039 22 # Z1SS --> GVSS GM
1.83872022E-02 3 1000039 11 -11 # Z1SS --> GVSS E- E+
1.24135055E-01 2 1000039 23 # Z1SS --> GVSS Z0
1.30394241E-04 2 1000039 25 # Z1SS --> GVSS HL0
# PDG Width
DECAY 1000023 4.29373942E-02 # Z2SS decays
# BR NDA ID1 ID2 ID3 ID4
2.12917098E-06 2 1000022 22 # Z2SS --> Z1SS GM
5.33094369E-02 2 1000022 23 # Z2SS --> Z1SS Z0
2.71946504E-07 3 1000022 2 -2 # Z2SS --> Z1SS UP UB
3.67156133E-07 3 1000022 1 -1 # Z2SS --> Z1SS DN DB
3.67156133E-07 3 1000022 3 -3 # Z2SS --> Z1SS ST SB
2.71946107E-07 3 1000022 4 -4 # Z2SS --> Z1SS CH CB
1.90382138E-06 3 1000022 5 -5 # Z2SS --> Z1SS BT BB
1.80427989E-04 3 1000022 11 -11 # Z2SS --> Z1SS E- E+
1.80427989E-04 3 1000022 13 -13 # Z2SS --> Z1SS MU- MU+
1.82706077E-04 3 1000022 15 -15 # Z2SS --> Z1SS TAU- TAU+
2.06208715E-04 3 1000022 12 -12 # Z2SS --> Z1SS NUE ANUE
2.06208715E-04 3 1000022 14 -14 # Z2SS --> Z1SS NUM ANUM
2.14074622E-04 3 1000022 16 -16 # Z2SS --> Z1SS NUT ANUT
5.60157299E-01 2 1000022 25 # Z2SS --> Z1SS HL0
1.57681685E-02 2 2000011 -11 # Z2SS --> ER- E+
1.57681685E-02 2 -2000011 11 # Z2SS --> ER+ E-
1.57681685E-02 2 2000013 -13 # Z2SS --> MUR- MU+
1.57681685E-02 2 -2000013 13 # Z2SS --> MUR+ MU-
1.61142647E-01 2 1000015 -15 # Z2SS --> TAU1- TAU+
1.61142647E-01 2 -1000015 15 # Z2SS --> TAU1+ TAU-
1.63911802E-14 2 1000039 22 # Z2SS --> GVSS GM
3.69061758E-16 3 1000039 11 -11 # Z2SS --> GVSS E- E+
3.66231873E-14 2 1000039 23 # Z2SS --> GVSS Z0
6.07606220E-16 2 1000039 25 # Z2SS --> GVSS HL0
# PDG Width
DECAY 1000025 2.36257219E+00 # Z3SS decays
# BR NDA ID1 ID2 ID3 ID4
4.90372010E-08 2 1000022 22 # Z3SS --> Z1SS GM
4.26831633E-07 2 1000023 22 # Z3SS --> Z2SS GM
2.73928016E-01 2 1000024 -24 # Z3SS --> W1SS+ W-
2.73928016E-01 2 -1000024 24 # Z3SS --> W1SS- W+
1.34958193E-01 2 1000022 23 # Z3SS --> Z1SS Z0
2.46930555E-01 2 1000023 23 # Z3SS --> Z2SS Z0
7.98537458E-10 3 1000022 2 -2 # Z3SS --> Z1SS UP UB
2.45507031E-10 3 1000022 1 -1 # Z3SS --> Z1SS DN DB
2.45507031E-10 3 1000022 3 -3 # Z3SS --> Z1SS ST SB
7.98535960E-10 3 1000022 4 -4 # Z3SS --> Z1SS CH CB
1.95150960E-06 3 1000022 5 -5 # Z3SS --> Z1SS BT BB
3.20959487E-07 3 1000022 15 -15 # Z3SS --> Z1SS TAU- TAU+
1.45638751E-10 3 1000023 2 -2 # Z3SS --> Z2SS UP UB
2.38382453E-10 3 1000023 1 -1 # Z3SS --> Z2SS DN DB
2.38382453E-10 3 1000023 3 -3 # Z3SS --> Z2SS ST SB
1.45638571E-10 3 1000023 4 -4 # Z3SS --> Z2SS CH CB
2.55241190E-07 3 1000023 5 -5 # Z3SS --> Z2SS BT BB
3.88988397E-08 3 1000023 15 -15 # Z3SS --> Z2SS TAU- TAU+
2.22993959E-02 2 1000022 25 # Z3SS --> Z1SS HL0
5.02910931E-03 2 1000023 25 # Z3SS --> Z2SS HL0
1.46951388E-05 2 1000011 -11 # Z3SS --> EL- E+
1.46951388E-05 2 -1000011 11 # Z3SS --> EL+ E-
1.46951143E-05 2 1000013 -13 # Z3SS --> MUL- MU+
1.46951143E-05 2 -1000013 13 # Z3SS --> MUL+ MU-
5.16845612E-04 2 2000011 -11 # Z3SS --> ER- E+
5.16845612E-04 2 -2000011 11 # Z3SS --> ER+ E-
5.16845612E-04 2 2000013 -13 # Z3SS --> MUR- MU+
5.16845612E-04 2 -2000013 13 # Z3SS --> MUR+ MU-
1.89872906E-02 2 1000015 -15 # Z3SS --> TAU1- TAU+
1.89872906E-02 2 -1000015 15 # Z3SS --> TAU1+ TAU-
1.08305865E-03 2 2000015 -15 # Z3SS --> TAU2- TAU+
1.08305865E-03 2 -2000015 15 # Z3SS --> TAU2+ TAU-
1.06256768E-04 2 1000012 -12 # Z3SS --> NUEL ANUE
1.06256768E-04 2 -1000012 12 # Z3SS --> ANUEL NUE
1.06256768E-04 2 1000014 -14 # Z3SS --> NUML ANUM
1.06256768E-04 2 -1000014 14 # Z3SS --> ANUML NUM
1.15757677E-04 2 1000016 -16 # Z3SS --> NUTL ANUT
1.15757677E-04 2 -1000016 16 # Z3SS --> ANUTL NUT
4.36517271E-19 2 1000039 22 # Z3SS --> GVSS GM
1.01007501E-20 3 1000039 11 -11 # Z3SS --> GVSS E- E+
1.58674534E-15 2 1000039 23 # Z3SS --> GVSS Z0
1.92568049E-15 2 1000039 25 # Z3SS --> GVSS HL0
# PDG Width
DECAY 1000035 2.90244341E+00 # Z4SS decays
# BR NDA ID1 ID2 ID3 ID4
1.62100182E-08 2 1000022 22 # Z4SS --> Z1SS GM
1.17369737E-07 2 1000023 22 # Z4SS --> Z2SS GM
2.00787897E-09 2 1000025 22 # Z4SS --> Z3SS GM
2.99590379E-01 2 1000024 -24 # Z4SS --> W1SS+ W-
2.99590379E-01 2 -1000024 24 # Z4SS --> W1SS- W+
2.02624910E-02 2 1000022 23 # Z4SS --> Z1SS Z0
8.55493546E-03 2 1000023 23 # Z4SS --> Z2SS Z0
1.64892597E-08 3 1000022 2 -2 # Z4SS --> Z1SS UP UB
1.51271422E-08 3 1000022 1 -1 # Z4SS --> Z1SS DN DB
1.51271422E-08 3 1000022 3 -3 # Z4SS --> Z1SS ST SB
1.64892313E-08 3 1000022 4 -4 # Z4SS --> Z1SS CH CB
1.50134576E-06 3 1000022 5 -5 # Z4SS --> Z1SS BT BB
2.45239505E-07 3 1000022 15 -15 # Z4SS --> Z1SS TAU- TAU+
2.32405206E-08 3 1000023 2 -2 # Z4SS --> Z2SS UP UB
2.70900067E-08 3 1000023 1 -1 # Z4SS --> Z2SS DN DB
2.70900067E-08 3 1000023 3 -3 # Z4SS --> Z2SS ST SB
2.32404886E-08 3 1000023 4 -4 # Z4SS --> Z2SS CH CB
2.47776029E-07 3 1000023 5 -5 # Z4SS --> Z2SS BT BB
3.27777769E-08 3 1000023 15 -15 # Z4SS --> Z2SS TAU- TAU+
6.47571170E-08 3 1000025 2 -2 # Z4SS --> Z3SS UP UB
8.35220035E-08 3 1000025 1 -1 # Z4SS --> Z3SS DN DB
8.35220035E-08 3 1000025 3 -3 # Z4SS --> Z3SS ST SB
6.47571170E-08 3 1000025 4 -4 # Z4SS --> Z3SS CH CB
3.27969190E-08 3 1000025 5 -5 # Z4SS --> Z3SS BT BB
1.89447196E-08 3 1000025 11 -11 # Z4SS --> Z3SS E- E+
1.89447196E-08 3 1000025 13 -13 # Z4SS --> Z3SS MU- MU+
1.65280110E-08 3 1000025 15 -15 # Z4SS --> Z3SS TAU- TAU+
3.76940363E-08 3 1000025 12 -12 # Z4SS --> Z3SS NUE ANUE
3.76940363E-08 3 1000025 14 -14 # Z4SS --> Z3SS NUM ANUM
3.76940363E-08 3 1000025 16 -16 # Z4SS --> Z3SS NUT ANUT
1.01677276E-01 2 1000022 25 # Z4SS --> Z1SS HL0
2.01991379E-01 2 1000023 25 # Z4SS --> Z2SS HL0
1.47241028E-03 2 1000011 -11 # Z4SS --> EL- E+
1.47241028E-03 2 -1000011 11 # Z4SS --> EL+ E-
1.47240877E-03 2 1000013 -13 # Z4SS --> MUL- MU+
1.47240877E-03 2 -1000013 13 # Z4SS --> MUL+ MU-
2.18827138E-03 2 2000011 -11 # Z4SS --> ER- E+
2.18827138E-03 2 -2000011 11 # Z4SS --> ER+ E-
2.18827138E-03 2 2000013 -13 # Z4SS --> MUR- MU+
2.18827138E-03 2 -2000013 13 # Z4SS --> MUR+ MU-
1.38089955E-02 2 1000015 -15 # Z4SS --> TAU1- TAU+
1.38089955E-02 2 -1000015 15 # Z4SS --> TAU1+ TAU-
2.89983954E-03 2 2000015 -15 # Z4SS --> TAU2- TAU+
2.89983954E-03 2 -2000015 15 # Z4SS --> TAU2+ TAU-
3.30250943E-03 2 1000012 -12 # Z4SS --> NUEL ANUE
3.30250943E-03 2 -1000012 12 # Z4SS --> ANUEL NUE
3.30250943E-03 2 1000014 -14 # Z4SS --> NUML ANUM
3.30250943E-03 2 -1000014 14 # Z4SS --> ANUML NUM
3.52986553E-03 2 1000016 -16 # Z4SS --> NUTL ANUT
3.52986553E-03 2 -1000016 16 # Z4SS --> ANUTL NUT
4.24229867E-17 2 1000039 22 # Z4SS --> GVSS GM
9.83712653E-19 3 1000039 11 -11 # Z4SS --> GVSS E- E+
2.40041577E-15 2 1000039 23 # Z4SS --> GVSS Z0
1.29672814E-15 2 1000039 25 # Z4SS --> GVSS HL0
# PDG Width
DECAY 1000024 3.65882814E-02 # W1SS+ decays
# BR NDA ID1 ID2 ID3 ID4
7.50197671E-07 3 1000022 2 -1 # W1SS+ --> Z1SS UP DB
7.50197216E-07 3 1000022 4 -3 # W1SS+ --> Z1SS CH SB
4.65550664E-04 3 1000022 -11 12 # W1SS+ --> Z1SS E+ NUE
4.65550664E-04 3 1000022 -13 14 # W1SS+ --> Z1SS MU+ NUM
4.78234637E-04 3 1000022 -15 16 # W1SS+ --> Z1SS TAU+ NUT
6.60680115E-01 2 1000022 24 # W1SS+ --> Z1SS W+
7.23206217E-13 3 1000023 -11 12 # W1SS+ --> Z2SS E+ NUE
7.23206217E-13 3 1000023 -13 14 # W1SS+ --> Z2SS MU+ NUM
3.37909043E-01 2 -1000015 16 # W1SS+ --> TAU1+ NUT
# PDG Width
DECAY 1000037 2.59318376E+00 # W2SS+ decays
# BR NDA ID1 ID2 ID3 ID4
2.53174282E-08 3 1000022 2 -1 # W2SS+ --> Z1SS UP DB
2.53174175E-08 3 1000022 4 -3 # W2SS+ --> Z1SS CH SB
2.37249651E-07 3 1000022 -15 16 # W2SS+ --> Z1SS TAU+ NUT
1.18757665E-01 2 1000022 24 # W2SS+ --> Z1SS W+
2.00113295E-08 3 1000023 2 -1 # W2SS+ --> Z2SS UP DB
2.00113188E-08 3 1000023 4 -3 # W2SS+ --> Z2SS CH SB
6.94330566E-08 3 1000023 -15 16 # W2SS+ --> Z2SS TAU+ NUT
2.96759427E-01 2 1000023 24 # W2SS+ --> Z2SS W+
2.47575201E-07 3 1000025 2 -1 # W2SS+ --> Z3SS UP DB
2.47575201E-07 3 1000025 4 -3 # W2SS+ --> Z3SS CH SB
8.25228241E-08 3 1000025 -11 12 # W2SS+ --> Z3SS E+ NUE
8.25228241E-08 3 1000025 -13 14 # W2SS+ --> Z3SS MU+ NUM
8.25228739E-08 3 1000025 -15 16 # W2SS+ --> Z3SS TAU+ NUT
3.82075901E-03 2 1000012 -11 # W2SS+ --> NUEL E+
3.82075645E-03 2 1000014 -13 # W2SS+ --> NUML MU+
7.30222231E-03 2 1000016 -15 # W2SS+ --> NUTL TAU+
5.76530071E-03 2 -1000011 12 # W2SS+ --> EL+ NUE
5.76530071E-03 2 -1000013 14 # W2SS+ --> MUL+ NUM
2.64926776E-02 2 -1000015 16 # W2SS+ --> TAU1+ NUT
6.61414070E-03 2 -2000015 16 # W2SS+ --> TAU2+ NUT
2.80126721E-01 2 1000024 23 # W2SS+ --> W1SS+ Z0
2.63940692E-08 3 1000024 1 -1 # W2SS+ --> W1SS+ DN DB
2.63940390E-08 3 1000024 3 -3 # W2SS+ --> W1SS+ ST SB
4.68273704E-08 3 1000024 2 -2 # W2SS+ --> W1SS+ UP UB
4.68273704E-08 3 1000024 4 -4 # W2SS+ --> W1SS+ CH CB
2.44773641E-01 2 1000024 25 # W2SS+ --> W1SS+ HL0
# PDG Width
DECAY 25 3.10336798E-03 # HL0 decays
# BR NDA ID1 ID2 ID3 ID4
6.76349821E-09 2 11 -11 # HL0 --> E- E+
2.85565649E-04 2 13 -13 # HL0 --> MU- MU+
8.16741660E-02 2 15 -15 # HL0 --> TAU- TAU+
8.48861418E-06 2 1 -1 # HL0 --> DN DB
3.42982542E-03 2 3 -3 # HL0 --> ST SB
7.34183073E-01 2 5 -5 # HL0 --> BT BB
2.53373923E-06 2 2 -2 # HL0 --> UP UB
4.76811305E-02 2 4 -4 # HL0 --> CH CB
2.42134673E-03 2 22 22 # HL0 --> GM GM
5.16557023E-02 2 21 21 # HL0 --> GL GL
4.06437693E-03 3 24 11 -12 # HL0 --> W+ E- ANUE
4.06437693E-03 3 24 13 -14 # HL0 --> W+ MU- ANUM
4.06437693E-03 3 24 15 -16 # HL0 --> W+ TAU- ANUT
1.21931303E-02 3 24 -2 1 # HL0 --> W+ UB DN
1.21931303E-02 3 24 -4 3 # HL0 --> W+ CB ST
4.06437693E-03 3 -24 -11 12 # HL0 --> W- E+ NUE
4.06437693E-03 3 -24 -13 14 # HL0 --> W- MU+ NUM
4.06437693E-03 3 -24 -15 16 # HL0 --> W- TAU+ NUT
1.21931303E-02 3 -24 2 -1 # HL0 --> W- UP DB
1.21931303E-02 3 -24 4 -3 # HL0 --> W- CH SB
3.76073236E-04 3 23 12 -12 # HL0 --> Z0 NUE ANUE
3.76073236E-04 3 23 14 -14 # HL0 --> Z0 NUM ANUM
3.76073236E-04 3 23 16 -16 # HL0 --> Z0 NUT ANUT
1.89273866E-04 3 23 11 -11 # HL0 --> Z0 E- E+
1.89273866E-04 3 23 13 -13 # HL0 --> Z0 MU- MU+
1.89273866E-04 3 23 15 -15 # HL0 --> Z0 TAU- TAU+
6.48436253E-04 3 23 2 -2 # HL0 --> Z0 UP UB
6.48436253E-04 3 23 4 -4 # HL0 --> Z0 CH CB
8.35345592E-04 3 23 1 -1 # HL0 --> Z0 DN DB
8.35345592E-04 3 23 3 -3 # HL0 --> Z0 ST SB
8.35345592E-04 3 23 5 -5 # HL0 --> Z0 BT BB
# PDG Width
DECAY 35 2.82916927E+00 # HH0 decays
# BR NDA ID1 ID2 ID3 ID4
1.04799387E-08 2 11 -11 # HH0 --> E- E+
4.42481862E-04 2 13 -13 # HH0 --> MU- MU+
1.26729608E-01 2 15 -15 # HH0 --> TAU- TAU+
1.28664215E-05 2 1 -1 # HH0 --> DN DB
5.19868592E-03 2 3 -3 # HH0 --> ST SB
8.00406814E-01 2 5 -5 # HH0 --> BT BB
8.71726441E-11 2 2 -2 # HH0 --> UP UB
1.26653276E-06 2 4 -4 # HH0 --> CH CB
5.16276322E-02 2 6 -6 # HH0 --> TP TB
3.22989422E-07 2 22 22 # HH0 --> GM GM
4.18609161E-05 2 21 21 # HH0 --> GL GL
2.77070299E-04 2 24 -24 # HH0 --> W+ W-
1.39628508E-04 2 23 23 # HH0 --> Z0 Z0
3.13858525E-03 2 1000022 1000022 # HH0 --> Z1SS Z1SS
1.09394677E-02 2 1000022 1000023 # HH0 --> Z1SS Z2SS
9.61998187E-04 2 25 25 # HH0 --> HL0 HL0
3.06083748E-05 2 2000011 -2000011 # HH0 --> ER- ER+
3.05700305E-05 2 2000013 -2000013 # HH0 --> MUR- MUR+
2.05029100E-05 2 1000015 -1000015 # HH0 --> TAU1- TAU1+
# PDG Width
DECAY 36 2.89333510E+00 # HA0 decays
# BR NDA ID1 ID2 ID3 ID4
1.01823590E-08 2 11 -11 # HA0 --> E- E+
4.29917563E-04 2 13 -13 # HA0 --> MU- MU+
1.23133682E-01 2 15 -15 # HA0 --> TAU- TAU+
1.25018096E-05 2 1 -1 # HA0 --> DN DB
5.05136419E-03 2 3 -3 # HA0 --> ST SB
7.77785242E-01 2 5 -5 # HA0 --> BT BB
7.90159535E-11 2 2 -2 # HA0 --> UP UB
1.14896170E-06 2 4 -4 # HA0 --> CH CB
5.68091162E-02 2 6 -6 # HA0 --> TP TB
8.12995609E-07 2 22 22 # HA0 --> GM GM
1.06506835E-04 2 21 21 # HA0 --> GL GL
4.85492684E-03 2 1000022 1000022 # HA0 --> Z1SS Z1SS
3.15583833E-02 2 1000022 1000023 # HA0 --> Z1SS Z2SS
2.56249274E-04 2 25 23 # HA0 --> HL0 Z0
# PDG Width
DECAY 37 2.52115512E+00 # H+ decays
# BR NDA ID1 ID2 ID3 ID4
1.18260424E-08 2 12 -11 # H+ --> NUE E+
4.99316782E-04 2 14 -13 # H+ --> NUM MU+
1.43010512E-01 2 16 -15 # H+ --> NUT TAU+
1.33165595E-05 2 2 -1 # H+ --> UP DB
5.38171874E-03 2 4 -3 # H+ --> CH SB
8.04553807E-01 2 6 -5 # H+ --> TP BB
4.62366156E-02 2 1000024 1000022 # H+ --> W1SS+ Z1SS
3.04667803E-04 2 25 24 # H+ --> HL0 W+
'''
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.MCTunes2017.PythiaCP5Settings_cfi import *
from Configuration.Generator.PSweightsPythia.PythiaPSweightsSettings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
comEnergy = cms.double(13000.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
pythiaPylistVerbosity = cms.untracked.int32(1),
filterEfficiency = cms.untracked.double(1.0),
SLHATableForPythia8 = cms.string('%s' % SLHA_TABLE),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CP5SettingsBlock,
pythia8PSweightsSettingsBlock,
processParameters = cms.vstring(
'ParticleDecays:limitTau0 = off',
'ParticleDecays:tau0Max = 10000000',
'SUSY:all on',
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CP5Settings',
'pythia8PSweightsSettings',
'processParameters')
)
)
ProductionFilterSequence = cms.Sequence(generator)
|
[
"zzhang2@caltech.edu"
] |
zzhang2@caltech.edu
|
b88fa8018053bde432a54f39bb27e9597983acb1
|
6d6996ac49c162217dfaf177d7c403402ebc312c
|
/Part3_Classification/Section17_Kernel_SVM/kernel_svm.py
|
54949b6ff7e9b5ff6a6d0302064f55733ca04251
|
[] |
no_license
|
skols/machine-learning-a-to-z
|
8b8c777f53a7d04ad672dd676e0527472c5c48ba
|
524de4c65240f4a1ad961377d10623bf4023e1cb
|
refs/heads/master
| 2021-07-12T16:33:51.608303
| 2017-10-15T15:57:45
| 2017-10-15T15:57:45
| 103,762,593
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,465
|
py
|
# Kernel SVM
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import os
# from sklearn.cross_validation import train_test_split # deprecated
from sklearn.model_selection import train_test_split # splitting the dataset
from sklearn.preprocessing import StandardScaler # feature scaling
from sklearn.metrics import confusion_matrix
from matplotlib.colors import ListedColormap # prediction regions plot
from sklearn.svm import SVC
# Importing the dataset
os.chdir("C:/Development/Courses/Kirill Eremenko Data Science Courses/\
Machine_Learning_A-Z/Part3_Classification/Section17_Kernel_SVM")
dataset = pd.read_csv("Social_Network_Ads.csv")
# Create a matrix of features with Age and EstimatedSalary
X = dataset.iloc[:, [2, 3]].values
# Create the dependent variable vector; last column only
y = dataset.iloc[:, 4].values
# To see the full array, run the following
# np.set_printoptions(threshold=np.nan)
# Splitting the dataset into Training set and Test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25,
random_state=0)
# random_state set to 0 so we all get the same result
# 42 is a good choice for random_state otherwise
# Feature Scaling
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
# Fitting Kernel SVM to the Training set
classifier = SVC(kernel="rbf", random_state=0)
classifier.fit(X_train, y_train)
# Predicting the Test set results
y_pred = classifier.predict(X_test)
# Making the Confusion Matrix
cm = confusion_matrix(y_test, y_pred)
# Visualising the Training set results
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start=X_set[:, 0].min() - 1,
stop=X_set[:, 0].max() + 1,
step = 0.01),
np.arange(start=X_set[:, 1].min() - 1,
stop=X_set[:, 1].max() + 1,
step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(),X2.ravel()]).T).
reshape(X1.shape), alpha=0.75, cmap=ListedColormap(("red",
"green")))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title("Kernel SVM (Training Set)")
plt.xlabel("Age")
plt.ylabel("Estimated Salary")
plt.legend()
plt.show()
# Visualising the Test set results
X_set, y_set = X_test, y_test
X1, X2 = np.meshgrid(np.arange(start=X_set[:, 0].min() - 1,
stop=X_set[:, 0].max() + 1,
step = 0.01),
np.arange(start=X_set[:, 1].min() - 1,
stop=X_set[:, 1].max() + 1,
step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(),X2.ravel()]).T).
reshape(X1.shape), alpha=0.75, cmap=ListedColormap(("red",
"green")))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title("Kernel SVM (Test Set)")
plt.xlabel("Age")
plt.ylabel("Estimated Salary")
plt.legend()
plt.show()
|
[
"mikeskolnik75@gmail.com"
] |
mikeskolnik75@gmail.com
|
17614446989bb73801e73c432e30df3513ccf45e
|
b21967bb5cf52d766b0c0b5e42581e7c6480f8ae
|
/Python/No_Idea/main.py
|
7fa6352088b63c2915545caae8d4effd025ec081
|
[] |
no_license
|
pawel200020/Hackerrank
|
39e70dd0f6d57bdef699541688e913b66e798e0c
|
5dccafcc57a276408734b1e7e0065cc89ffaf574
|
refs/heads/master
| 2023-07-04T23:03:54.676525
| 2021-08-28T21:48:59
| 2021-08-28T21:48:59
| 349,766,180
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 686
|
py
|
def is_in_set(search, set, size):
for i in range(size):
if set[i] == search:
return True
return False
def happines(data, aLike, bDislike):
happyscore = 0
for i in data:
if i in aLike:
happyscore += 1
if i in bDislike:
happyscore -= 1
print(happyscore)
if __name__ == '__main__':
dataQuantity = list(map(int, input().split()))
n = dataQuantity[0]
m = dataQuantity[1]
data = map(int, input().split())
aLike = set(map(int, input().split())) #Sets are used to store multiple items in a single variable.
bDislike = set(map(int, input().split()))
happines(data, aLike, bDislike)
|
[
"38167430+pawel200020@users.noreply.github.com"
] |
38167430+pawel200020@users.noreply.github.com
|
15469192d2d585dadbad141e9287142563e3abb2
|
b10ecb3464ae7270e6e522c0ae912d35c1be73cd
|
/src/Tensorflow Example/main.py
|
cacc13bbbd0bd73d98c31e9bbd092d7e641a01f1
|
[] |
no_license
|
LucDupuy/MachineLearningCourseProject
|
c5ad7254890af32924dde29cb1dd4c0b4a0eef24
|
c4742848d6c5de6727bf01fd53b59d211c347e36
|
refs/heads/main
| 2023-03-18T00:37:30.311880
| 2020-12-15T11:36:38
| 2020-12-15T11:36:38
| 345,754,326
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 996
|
py
|
import train
import cv2 as cv
import matplotlib.pyplot as plt
import numpy as np
from tensorflow.keras import datasets, models
def main():
(x_train, y_train), (x_test, y_test) = datasets.cifar10.load_data()
x_train, x_test = x_train / 255, x_test / 255
# Order pre defined by the existing dataset
classification_arr = ["Plane", "Car", "Bird", "Cat", "Deer", "Dog", "Frog", "Horse", "Ship", "Truck"]
# train_model(x_train, y_train, x_test, y_test)
prediction = predict()
print(f'Prediction is {classification_arr[prediction]}')
def train_model(x_train, y_train, x_test, y_test):
train.train_model(x_train, y_train, x_test, y_test)
def predict():
model = models.load_model('image_classifier')
img = cv.imread("car.jpg")
img = cv.cvtColor(img, cv.COLOR_BGR2RGB)
plt.imshow(img, cmap=plt.cm.binary)
prediction = model.predict(np.array(img) / 255)
index = np.argmax(prediction)
return index
if __name__ == '__main__':
main()
|
[
"luc.dupuy@ryerson.ca"
] |
luc.dupuy@ryerson.ca
|
f1b59d3b5c0669abd07c3ab4a3957fc93ca318c2
|
1f4f8484fb2cf8c9833e20da37556741d18decf3
|
/networks/facenet_resnet50/facenet.py
|
37a7f550b9b5d5cf01081b3cdefbae7a6673fe6a
|
[
"MIT"
] |
permissive
|
liumusicforever/tf_models_develop_framework
|
c0d58a3d0860dff7967dac8f9c4e2c573d82f9f6
|
3ce5f02ca61547c1daac8cdacf459411e26cd81b
|
refs/heads/master
| 2020-03-28T02:46:50.419651
| 2018-10-19T12:30:51
| 2018-10-19T12:30:51
| 147,595,921
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 24,139
|
py
|
"""Functions for building the face recognition network.
"""
# MIT License
#
# Copyright (c) 2016 David Sandberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from subprocess import Popen, PIPE
import tensorflow as tf
import numpy as np
from scipy import misc
from sklearn.model_selection import KFold
from scipy import interpolate
from tensorflow.python.training import training
import random
import re
from tensorflow.python.platform import gfile
import math
from six import iteritems
def triplet_loss(anchor, positive, negative, alpha):
"""Calculate the triplet loss according to the FaceNet paper
Args:
anchor: the embeddings for the anchor images.
positive: the embeddings for the positive images.
negative: the embeddings for the negative images.
Returns:
the triplet loss according to the FaceNet paper as a float tensor.
"""
with tf.variable_scope('triplet_loss'):
pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), 1)
neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), 1)
basic_loss = tf.add(tf.subtract(pos_dist,neg_dist), alpha)
loss = tf.reduce_mean(tf.maximum(basic_loss, 0.0), 0)
return loss
def center_loss(features, label, alfa, nrof_classes):
"""Center loss based on the paper "A Discriminative Feature Learning Approach for Deep Face Recognition"
(http://ydwen.github.io/papers/WenECCV16.pdf)
"""
nrof_features = features.get_shape()[1]
centers = tf.get_variable('centers', [nrof_classes, nrof_features], dtype=tf.float32,
initializer=tf.constant_initializer(0), trainable=False)
label = tf.reshape(label, [-1])
centers_batch = tf.gather(centers, label)
diff = (1 - alfa) * (centers_batch - features)
centers = tf.scatter_sub(centers, label, diff)
with tf.control_dependencies([centers]):
loss = tf.reduce_mean(tf.square(features - centers_batch))
return loss, centers
def get_image_paths_and_labels(dataset):
image_paths_flat = []
labels_flat = []
for i in range(len(dataset)):
image_paths_flat += dataset[i].image_paths
labels_flat += [i] * len(dataset[i].image_paths)
return image_paths_flat, labels_flat
def shuffle_examples(image_paths, labels):
shuffle_list = list(zip(image_paths, labels))
random.shuffle(shuffle_list)
image_paths_shuff, labels_shuff = zip(*shuffle_list)
return image_paths_shuff, labels_shuff
def random_rotate_image(image):
angle = np.random.uniform(low=-10.0, high=10.0)
return misc.imrotate(image, angle, 'bicubic')
# 1: Random rotate 2: Random crop 4: Random flip 8: Fixed image standardization 16: Flip
RANDOM_ROTATE = 1
RANDOM_CROP = 2
RANDOM_FLIP = 4
FIXED_STANDARDIZATION = 8
FLIP = 16
def create_input_pipeline(input_queue, image_size, nrof_preprocess_threads, batch_size_placeholder):
images_and_labels_list = []
for _ in range(nrof_preprocess_threads):
filenames, label, control = input_queue.dequeue()
images = []
for filename in tf.unstack(filenames):
file_contents = tf.read_file(filename)
image = tf.image.decode_image(file_contents, 3)
image = tf.cond(get_control_flag(control[0], RANDOM_ROTATE),
lambda:tf.py_func(random_rotate_image, [image], tf.uint8),
lambda:tf.identity(image))
image = tf.cond(get_control_flag(control[0], RANDOM_CROP),
lambda:tf.random_crop(image, image_size + (3,)),
lambda:tf.image.resize_image_with_crop_or_pad(image, image_size[0], image_size[1]))
image = tf.cond(get_control_flag(control[0], RANDOM_FLIP),
lambda:tf.image.random_flip_left_right(image),
lambda:tf.identity(image))
image = tf.cond(get_control_flag(control[0], FIXED_STANDARDIZATION),
lambda:(tf.cast(image, tf.float32) - 127.5)/128.0,
lambda:tf.image.per_image_standardization(image))
image = tf.cond(get_control_flag(control[0], FLIP),
lambda:tf.image.flip_left_right(image),
lambda:tf.identity(image))
#pylint: disable=no-member
image.set_shape(image_size + (3,))
images.append(image)
images_and_labels_list.append([images, label])
image_batch, label_batch = tf.train.batch_join(
images_and_labels_list, batch_size=batch_size_placeholder,
shapes=[image_size + (3,), ()], enqueue_many=True,
capacity=4 * nrof_preprocess_threads * 100,
allow_smaller_final_batch=True)
return image_batch, label_batch
def get_control_flag(control, field):
return tf.equal(tf.mod(tf.floor_div(control, field), 2), 1)
def _add_loss_summaries(total_loss):
"""Add summaries for losses.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summmary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.summary.scalar(l.op.name +' (raw)', l)
tf.summary.scalar(l.op.name, loss_averages.average(l))
return loss_averages_op
def train(total_loss, global_step, optimizer, learning_rate, moving_average_decay, update_gradient_vars, log_histograms=True):
# Generate moving averages of all losses and associated summaries.
loss_averages_op = _add_loss_summaries(total_loss)
# Compute gradients.
with tf.control_dependencies([loss_averages_op]):
if optimizer=='ADAGRAD':
opt = tf.train.AdagradOptimizer(learning_rate)
elif optimizer=='ADADELTA':
opt = tf.train.AdadeltaOptimizer(learning_rate, rho=0.9, epsilon=1e-6)
elif optimizer=='ADAM':
opt_backbone = tf.train.AdamOptimizer(learning_rate * 0.1, beta1=0.9, beta2=0.999, epsilon=0.1)
opt_lastlayer = tf.train.AdamOptimizer(learning_rate, beta1=0.9, beta2=0.999, epsilon=0.1)
elif optimizer=='RMSPROP':
opt = tf.train.RMSPropOptimizer(learning_rate, decay=0.9, momentum=0.9, epsilon=1.0)
elif optimizer=='MOM':
opt_backbone = tf.train.MomentumOptimizer(learning_rate * 0.1, 0.9, use_nesterov=True)
opt_lastlayer = tf.train.MomentumOptimizer(learning_rate, 0.9, use_nesterov=True)
else:
raise ValueError('Invalid optimization algorithm')
# print (update_gradient_vars[0])
backbone_vars = [i for i in update_gradient_vars if 'resnet_v2_50/' in i.name]
last_vars = [i for i in update_gradient_vars if 'resnet_v2_50/' not in i.name]
backbone_grads = opt_backbone.compute_gradients(total_loss, backbone_vars)
lastlayer_grads = opt_lastlayer.compute_gradients(total_loss, last_vars)
# Apply gradients.
op_backbone = opt_backbone.apply_gradients(backbone_grads, global_step=global_step)
op_lastlayer = opt_lastlayer.apply_gradients(lastlayer_grads, global_step=global_step)
apply_gradient_op = tf.group(op_backbone, op_lastlayer)
# Add histograms for trainable variables.
if log_histograms:
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
# Add histograms for gradients.
if log_histograms:
for grad, var in backbone_grads:
if grad is not None:
tf.summary.histogram(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
moving_average_decay, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies([apply_gradient_op, variables_averages_op]+update_ops):
train_op = tf.no_op(name='train')
return train_op
def prewhiten(x):
mean = np.mean(x)
std = np.std(x)
std_adj = np.maximum(std, 1.0/np.sqrt(x.size))
y = np.multiply(np.subtract(x, mean), 1/std_adj)
return y
def crop(image, random_crop, image_size):
if image.shape[1]>image_size:
sz1 = int(image.shape[1]//2)
sz2 = int(image_size//2)
if random_crop:
diff = sz1-sz2
(h, v) = (np.random.randint(-diff, diff+1), np.random.randint(-diff, diff+1))
else:
(h, v) = (0,0)
image = image[(sz1-sz2+v):(sz1+sz2+v),(sz1-sz2+h):(sz1+sz2+h),:]
return image
def flip(image, random_flip):
if random_flip and np.random.choice([True, False]):
image = np.fliplr(image)
return image
def to_rgb(img):
w, h = img.shape
ret = np.empty((w, h, 3), dtype=np.uint8)
ret[:, :, 0] = ret[:, :, 1] = ret[:, :, 2] = img
return ret
def load_data(image_paths, do_random_crop, do_random_flip, image_size, do_prewhiten=True):
nrof_samples = len(image_paths)
images = np.zeros((nrof_samples, image_size, image_size, 3))
for i in range(nrof_samples):
img = misc.imread(image_paths[i])
if img.ndim == 2:
img = to_rgb(img)
if do_prewhiten:
img = prewhiten(img)
img = crop(img, do_random_crop, image_size)
img = flip(img, do_random_flip)
images[i,:,:,:] = img
return images
def get_label_batch(label_data, batch_size, batch_index):
nrof_examples = np.size(label_data, 0)
j = batch_index*batch_size % nrof_examples
if j+batch_size<=nrof_examples:
batch = label_data[j:j+batch_size]
else:
x1 = label_data[j:nrof_examples]
x2 = label_data[0:nrof_examples-j]
batch = np.vstack([x1,x2])
batch_int = batch.astype(np.int64)
return batch_int
def get_batch(image_data, batch_size, batch_index):
nrof_examples = np.size(image_data, 0)
j = batch_index*batch_size % nrof_examples
if j+batch_size<=nrof_examples:
batch = image_data[j:j+batch_size,:,:,:]
else:
x1 = image_data[j:nrof_examples,:,:,:]
x2 = image_data[0:nrof_examples-j,:,:,:]
batch = np.vstack([x1,x2])
batch_float = batch.astype(np.float32)
return batch_float
def get_triplet_batch(triplets, batch_index, batch_size):
ax, px, nx = triplets
a = get_batch(ax, int(batch_size/3), batch_index)
p = get_batch(px, int(batch_size/3), batch_index)
n = get_batch(nx, int(batch_size/3), batch_index)
batch = np.vstack([a, p, n])
return batch
def get_learning_rate_from_file(filename, epoch):
with open(filename, 'r') as f:
for line in f.readlines():
line = line.split('#', 1)[0]
if line:
par = line.strip().split(':')
e = int(par[0])
if par[1]=='-':
lr = -1
else:
lr = float(par[1])
if e <= epoch:
learning_rate = lr
else:
return learning_rate
class ImageClass():
"Stores the paths to images for a given class"
def __init__(self, name, image_paths):
self.name = name
self.image_paths = image_paths
def __str__(self):
return self.name + ', ' + str(len(self.image_paths)) + ' images'
def __len__(self):
return len(self.image_paths)
def get_dataset(path, has_class_directories=True):
dataset = []
path_exp = os.path.expanduser(path)
classes = [path for path in os.listdir(path_exp) \
if os.path.isdir(os.path.join(path_exp, path))]
classes.sort()
nrof_classes = len(classes)
for i in range(nrof_classes):
class_name = classes[i]
facedir = os.path.join(path_exp, class_name)
image_paths = get_image_paths(facedir)
dataset.append(ImageClass(class_name, image_paths))
return dataset
def get_image_paths(facedir):
image_paths = []
if os.path.isdir(facedir):
images = os.listdir(facedir)
image_paths = [os.path.join(facedir,img) for img in images]
return image_paths
def split_dataset(dataset, split_ratio, min_nrof_images_per_class, mode):
if mode=='SPLIT_CLASSES':
nrof_classes = len(dataset)
class_indices = np.arange(nrof_classes)
np.random.shuffle(class_indices)
split = int(round(nrof_classes*(1-split_ratio)))
train_set = [dataset[i] for i in class_indices[0:split]]
test_set = [dataset[i] for i in class_indices[split:-1]]
elif mode=='SPLIT_IMAGES':
train_set = []
test_set = []
for cls in dataset:
paths = cls.image_paths
np.random.shuffle(paths)
nrof_images_in_class = len(paths)
split = int(math.floor(nrof_images_in_class*(1-split_ratio)))
if split==nrof_images_in_class:
split = nrof_images_in_class-1
if split>=min_nrof_images_per_class and nrof_images_in_class-split>=1:
train_set.append(ImageClass(cls.name, paths[:split]))
test_set.append(ImageClass(cls.name, paths[split:]))
else:
raise ValueError('Invalid train/test split mode "%s"' % mode)
return train_set, test_set
def load_model(model, input_map=None):
# Check if the model is a model directory (containing a metagraph and a checkpoint file)
# or if it is a protobuf file with a frozen graph
model_exp = os.path.expanduser(model)
if (os.path.isfile(model_exp)):
print('Model filename: %s' % model_exp)
with gfile.FastGFile(model_exp,'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, input_map=input_map, name='')
else:
print('Model directory: %s' % model_exp)
meta_file, ckpt_file = get_model_filenames(model_exp)
print('Metagraph file: %s' % meta_file)
print('Checkpoint file: %s' % ckpt_file)
saver = tf.train.import_meta_graph(os.path.join(model_exp, meta_file), input_map=input_map)
saver.restore(tf.get_default_session(), os.path.join(model_exp, ckpt_file))
def get_model_filenames(model_dir):
files = os.listdir(model_dir)
meta_files = [s for s in files if s.endswith('.meta')]
if len(meta_files)==0:
raise ValueError('No meta file found in the model directory (%s)' % model_dir)
elif len(meta_files)>1:
raise ValueError('There should not be more than one meta file in the model directory (%s)' % model_dir)
meta_file = meta_files[0]
ckpt = tf.train.get_checkpoint_state(model_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_file = os.path.basename(ckpt.model_checkpoint_path)
return meta_file, ckpt_file
meta_files = [s for s in files if '.ckpt' in s]
max_step = -1
for f in files:
step_str = re.match(r'(^model-[\w\- ]+.ckpt-(\d+))', f)
if step_str is not None and len(step_str.groups())>=2:
step = int(step_str.groups()[1])
if step > max_step:
max_step = step
ckpt_file = step_str.groups()[0]
return meta_file, ckpt_file
def distance(embeddings1, embeddings2, distance_metric=0):
if distance_metric==0:
# Euclidian distance
diff = np.subtract(embeddings1, embeddings2)
dist = np.sum(np.square(diff),1)
elif distance_metric==1:
# Distance based on cosine similarity
dot = np.sum(np.multiply(embeddings1, embeddings2), axis=1)
norm = np.linalg.norm(embeddings1, axis=1) * np.linalg.norm(embeddings2, axis=1)
similarity = dot / norm
dist = np.arccos(similarity) / math.pi
else:
raise 'Undefined distance metric %d' % distance_metric
return dist
def calculate_roc(thresholds, embeddings1, embeddings2, actual_issame, nrof_folds=10, distance_metric=0, subtract_mean=False):
assert(embeddings1.shape[0] == embeddings2.shape[0])
assert(embeddings1.shape[1] == embeddings2.shape[1])
nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
nrof_thresholds = len(thresholds)
k_fold = KFold(n_splits=nrof_folds, shuffle=False)
tprs = np.zeros((nrof_folds,nrof_thresholds))
fprs = np.zeros((nrof_folds,nrof_thresholds))
accuracy = np.zeros((nrof_folds))
indices = np.arange(nrof_pairs)
for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):
if subtract_mean:
mean = np.mean(np.concatenate([embeddings1[train_set], embeddings2[train_set]]), axis=0)
else:
mean = 0.0
dist = distance(embeddings1-mean, embeddings2-mean, distance_metric)
# Find the best threshold for the fold
acc_train = np.zeros((nrof_thresholds))
for threshold_idx, threshold in enumerate(thresholds):
_, _, acc_train[threshold_idx] = calculate_accuracy(threshold, dist[train_set], actual_issame[train_set])
best_threshold_index = np.argmax(acc_train)
for threshold_idx, threshold in enumerate(thresholds):
tprs[fold_idx,threshold_idx], fprs[fold_idx,threshold_idx], _ = calculate_accuracy(threshold, dist[test_set], actual_issame[test_set])
_, _, accuracy[fold_idx] = calculate_accuracy(thresholds[best_threshold_index], dist[test_set], actual_issame[test_set])
tpr = np.mean(tprs,0)
fpr = np.mean(fprs,0)
return tpr, fpr, accuracy
def calculate_accuracy(threshold, dist, actual_issame):
predict_issame = np.less(dist, threshold)
tp = np.sum(np.logical_and(predict_issame, actual_issame))
fp = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))
tn = np.sum(np.logical_and(np.logical_not(predict_issame), np.logical_not(actual_issame)))
fn = np.sum(np.logical_and(np.logical_not(predict_issame), actual_issame))
tpr = 0 if (tp+fn==0) else float(tp) / float(tp+fn)
fpr = 0 if (fp+tn==0) else float(fp) / float(fp+tn)
acc = float(tp+tn)/dist.size
return tpr, fpr, acc
def calculate_val(thresholds, embeddings1, embeddings2, actual_issame, far_target, nrof_folds=10, distance_metric=0, subtract_mean=False):
assert(embeddings1.shape[0] == embeddings2.shape[0])
assert(embeddings1.shape[1] == embeddings2.shape[1])
nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
nrof_thresholds = len(thresholds)
k_fold = KFold(n_splits=nrof_folds, shuffle=False)
val = np.zeros(nrof_folds)
far = np.zeros(nrof_folds)
indices = np.arange(nrof_pairs)
for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):
if subtract_mean:
mean = np.mean(np.concatenate([embeddings1[train_set], embeddings2[train_set]]), axis=0)
else:
mean = 0.0
dist = distance(embeddings1-mean, embeddings2-mean, distance_metric)
# Find the threshold that gives FAR = far_target
far_train = np.zeros(nrof_thresholds)
for threshold_idx, threshold in enumerate(thresholds):
_, far_train[threshold_idx] = calculate_val_far(threshold, dist[train_set], actual_issame[train_set])
if np.max(far_train)>=far_target:
f = interpolate.interp1d(far_train, thresholds, kind='slinear')
threshold = f(far_target)
else:
threshold = 0.0
val[fold_idx], far[fold_idx] = calculate_val_far(threshold, dist[test_set], actual_issame[test_set])
val_mean = np.mean(val)
far_mean = np.mean(far)
val_std = np.std(val)
return val_mean, val_std, far_mean
def calculate_val_far(threshold, dist, actual_issame):
predict_issame = np.less(dist, threshold)
true_accept = np.sum(np.logical_and(predict_issame, actual_issame))
false_accept = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))
n_same = np.sum(actual_issame)
n_diff = np.sum(np.logical_not(actual_issame))
val = float(true_accept) / float(n_same)
far = float(false_accept) / float(n_diff)
return val, far
def store_revision_info(src_path, output_dir, arg_string):
try:
# Get git hash
cmd = ['git', 'rev-parse', 'HEAD']
gitproc = Popen(cmd, stdout = PIPE, cwd=src_path)
(stdout, _) = gitproc.communicate()
git_hash = stdout.strip()
except OSError as e:
git_hash = ' '.join(cmd) + ': ' + e.strerror
try:
# Get local changes
cmd = ['git', 'diff', 'HEAD']
gitproc = Popen(cmd, stdout = PIPE, cwd=src_path)
(stdout, _) = gitproc.communicate()
git_diff = stdout.strip()
except OSError as e:
git_diff = ' '.join(cmd) + ': ' + e.strerror
# Store a text file in the log directory
rev_info_filename = os.path.join(output_dir, 'revision_info.txt')
with open(rev_info_filename, "w") as text_file:
text_file.write('arguments: %s\n--------------------\n' % arg_string)
text_file.write('tensorflow version: %s\n--------------------\n' % tf.__version__) # @UndefinedVariable
text_file.write('git hash: %s\n--------------------\n' % git_hash)
text_file.write('%s' % git_diff)
def list_variables(filename):
reader = training.NewCheckpointReader(filename)
variable_map = reader.get_variable_to_shape_map()
names = sorted(variable_map.keys())
return names
def put_images_on_grid(images, shape=(16,8)):
nrof_images = images.shape[0]
img_size = images.shape[1]
bw = 3
img = np.zeros((shape[1]*(img_size+bw)+bw, shape[0]*(img_size+bw)+bw, 3), np.float32)
for i in range(shape[1]):
x_start = i*(img_size+bw)+bw
for j in range(shape[0]):
img_index = i*shape[0]+j
if img_index>=nrof_images:
break
y_start = j*(img_size+bw)+bw
img[x_start:x_start+img_size, y_start:y_start+img_size, :] = images[img_index, :, :, :]
if img_index>=nrof_images:
break
return img
def write_arguments_to_file(args, filename):
with open(filename, 'w') as f:
for key, value in iteritems(vars(args)):
f.write('%s: %s\n' % (key, str(value)))
|
[
"liumusicforever@gmail.com"
] |
liumusicforever@gmail.com
|
764d6d15c14ad873f0234ccb4ffb024f415c6235
|
d5c6dc4ecb02ab945f645321ea2e751fa76e11ac
|
/RL_AtoB.py
|
e120c83f069ca09183d2a6dfc0f4b2a9c115fcf9
|
[] |
no_license
|
7enTropy7/Indoor_Nav_Paradigm
|
87ccb26b587fc6405277cd93c77dbda3ee3e098b
|
84049bddd11671eab5f0060eb62385a6271a9148
|
refs/heads/master
| 2020-08-22T12:12:09.059482
| 2019-10-22T06:51:59
| 2019-10-22T06:51:59
| 216,391,963
| 1
| 1
| null | 2019-10-21T03:49:20
| 2019-10-20T16:28:22
|
Python
|
UTF-8
|
Python
| false
| false
| 1,426
|
py
|
import random
def optimized_Q_route(R, source, destination):
n = len(R)
iterations=10000
lr=0.8
Q = [[0 for x in range(n)] for y in range(n)]
dest = ord(destination)-65
R[dest][dest] = 100
for i in range(n):
if R[i][dest]==0:
R[i][dest]=100
#Training begins
for s in range(0,iterations):
starter=[]
for i in range(0,n):
starter.append(chr(i+65))
start=random.choice(starter)
k=ord(start)-65
randomizer_array=[]
for j in range(0,n):
if R[k][j]>-1:
randomizer_array.append(j)
next=random.choice(randomizer_array)
largest=[]
for x in range(0,n):
if R[next][x]>-1:
largest.append(Q[next][x])
p=max(largest)
Q[k][next]=R[k][next]+lr*p
k=next
for i in range(0, n):
for j in range(0, n):
Q[i][j]=int(Q[i][j])
#Testing
track=[]
seq = [source]
u=ord(source)-65
while(u!=dest):
for j in range(0, n):
if Q[u][j]>0:
track.append(Q[u][j])
t=max(track)
tx=[]
for y in range(0,n):
if Q[u][y]==t:
tx.append(y)
tind=random.choice(tx)
seq.append(ord(chr(tind+65))-65)
u=tind
if u==dest:
break
return seq
|
[
"menon.uk1998@gmail.com"
] |
menon.uk1998@gmail.com
|
b88fb3856c8f966d03bf464ecd5d95f0239f2d37
|
46318cd429ffeea8200636126e0d1d5a2afb1cb6
|
/leapp/tool/commands/new-project.py
|
69fcd31c71ddbca1616ef520a98077eecbdffd5f
|
[
"Apache-2.0"
] |
permissive
|
shaded-enmity/leapp-actors-stdlib
|
724852cdf012a7c7579ba8391cb78902b14eafb2
|
c5803d193ede0921fc5685b07cfbe9f346c41fc1
|
refs/heads/master
| 2021-04-27T05:01:15.800353
| 2018-02-21T21:15:22
| 2018-02-21T21:15:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 553
|
py
|
import json
import os
import click
@click.command('new-project')
@click.argument('name')
def cli(name):
basedir = os.path.join('.', name)
if not os.path.isdir(basedir):
os.mkdir(basedir)
project_dir = os.path.join(basedir, '.leapp')
os.mkdir(project_dir)
with open(os.path.join(project_dir, 'info'), 'w') as f:
json.dump({
'name': name,
'channel_data': {}
}, f)
print "New project {} has been created in {}".format(name, os.path.realpath(name))
|
[
"vfeenstr@redhat.com"
] |
vfeenstr@redhat.com
|
6a04f79c41b23e87813a590c81d246aec9c7629c
|
9d3121d96cac1614466f3e4962306841583c4e7b
|
/project/trails/pl2.py
|
86e0c6695d9c11d6e7719c9be8211611321d9a32
|
[
"Apache-2.0"
] |
permissive
|
shivanshseth/Digit-Addition
|
8953ad4fc8b1f2104395fd32c27cf79189c54f0b
|
f4b3fd936f0bc4d208fb7ea460a05afa37911379
|
refs/heads/master
| 2023-07-18T04:35:06.580121
| 2021-09-04T13:25:46
| 2021-09-04T13:25:46
| 332,754,328
| 0
| 0
|
Apache-2.0
| 2021-09-04T13:27:13
| 2021-01-25T13:22:00
|
Python
|
UTF-8
|
Python
| false
| false
| 7,321
|
py
|
# -*- coding: utf-8 -*-
# Rapid prototyping notebook
# Commented out IPython magic to ensure Python compatibility.
# %%capture
# ! pip install pytorch-lightning
# ! pip install pytorch-lightning-bolts
import pytorch_lightning as pl
import torch
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader, Dataset
import torch.nn as nn
import torch.nn.functional as F
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
X = np.load('../Data/data0.npy')
y = np.load('../Data/lab0.npy')
for i in [1]:
Xt = np.load('../Data/data' + str(i) + '.npy')
yt = np.load('../Data/lab' + str(i) + '.npy')
X = np.concatenate((X, Xt))
y = np.concatenate((y, yt))
Xt = np.load('../Data/data2.npy')[:5000]
yt = np.load('../Data/lab2.npy')[:5000]
X = np.concatenate((X, Xt))
y = np.concatenate((y, yt))
class DigitAdditionDataset(Dataset):
def __init__(self, X, y):
self.x = X
self.n_samples = X.shape[0]
self.y = torch.Tensor(y).long()
def __getitem__(self, index):
return self.x[index], self.y[index]
def __len__(self):
return self.n_samples
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, shuffle=True)
X_train = torch.Tensor([[i] for i in X_train])
X_test = torch.Tensor([[i] for i in X_test])
batch_size = 300
traindataset = DigitAdditionDataset(X_train, y_train)
valdataset = DigitAdditionDataset(X_test, y_test)
#train = DataLoader(dataset=traindataset, batch_size=batch_size, shuffle=True, num_workers=40)
#val = DataLoader(dataset=valdataset, batch_size=batch_size, num_workers=40)
class Data(pl.LightningDataModule):
def __init__(self, batch_size = 300):
super().__init__()
self.batch_size = batch_size
def setup(self, stage = None):
self.train_data = traindataset
self.val_data = valdataset
def train_dataloader(self):
return DataLoader(dataset=traindataset, batch_size=batch_size, shuffle=True, num_workers=40)
def val_dataloader(self):
return DataLoader(dataset=valdataset, batch_size=batch_size, num_workers=40)
"""---
## Model
"""
class Lulz(pl.LightningModule):
def __init__(self):
self.ep_num = 0
super().__init__()
self.criterion = nn.CrossEntropyLoss()
self.layerR1 = nn.Sequential(
nn.Conv2d(1, 48, kernel_size=5, stride=1, padding=2),
nn.ReLU())
#self.layerR2 = nn.Sequential(
# nn.Conv2d(32, 64, kernel_size=5, stride=1, padding=2),
# nn.ReLU()
# )
#self.layerR3 = nn.Sequential(
# nn.Conv2d(64, 64, kernel_size=5, stride=1, padding=2),
# nn.ReLU()
# )
# (32, 40 , 168) -> (4, 40, 84)
self.layerR4 = nn.Sequential(
nn.Conv2d(48, 64, kernel_size=5, stride=1, padding=2),
nn.ReLU(),
nn.MaxPool2d(kernel_size=(1,2), stride=(1,2)))
# (4, 40, 84) -> (48, 40, 42)
self.layer1 = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=5, stride=1, padding=2),
nn.ReLU(),
nn.MaxPool2d(kernel_size=(1,2), stride=(1,2)))
# (48, 40, 42) -> (128, 22, 22)
self.layer2 = nn.Sequential(
nn.Conv2d(64, 128, kernel_size=5, stride=1, padding=(4,3)),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2))
# (128, 22, 22) -> (192, 11, 11)
self.layer3 = nn.Sequential(
nn.Conv2d(128, 192, kernel_size=5, stride=1, padding=2),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2))
# (192, 11, 11) -> (192, 12, 12)
self.layer4 = nn.Sequential(
nn.Conv2d(192, 192, kernel_size=4, stride=1, padding=2),
nn.ReLU())
# (192, 12, 12) -> (128, 6, 6)
self.layer5 = nn.Sequential(
nn.Conv2d(192, 128, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2))
self.fc1 = nn.Linear(128*6*6, 128*6*6)
self.drop1 = nn.Dropout(p=0.5)
self.fc2 = nn.Linear(128*6*6, 2000)
self.drop2 = nn.Dropout(p=0.5)
self.fc3 = nn.Linear(2000, 37)
#self.res1 = nn.Linear(2000, 10)
#self.res2 = nn.Linear(2000, 10)
#self.res3 = nn.Linear(2000, 10)
#self.res4 = nn.Linear(2000, 10)
## (10, 1, 4) -> (50, 1, 1)
#self.lconv = nn.Conv2d(10, 50, kernel_size=(4,1),stride=1,padding=0)
def forward(self, x):
out = self.layerR1(x)
#out = self.layerR2(out)
#out = self.layerR3(out)
out = self.layerR4(out)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.layer5(out)
out = out.reshape(out.size(0), -1)
#print(out.shape)
out = F.relu(self.fc1(out))
#print(out.shape)
out = self.drop1(out)
out = F.relu(self.fc2(out))
out = self.fc3(out)
return out
def training_step(self, batch, batch_idx):
# --------------------------
images, label = batch
outputs = self(images)
loss = self.criterion(outputs, label)
total = label.size(0)
_, predicted = torch.max(outputs.data, 1)
#print(predicted)
correct = (predicted == label).sum().item()
accuracy = correct/total * 100
self.log('train_loss', loss)
self.log('train_accuracy', accuracy)
#print("Training: loss:", loss.item(), "accuracy:", accuracy)
#return {'loss': loss, 'accuracy': accuracy}
return loss
# --------------------------
def validation_step(self, batch, batch_idx):
# --------------------------
images, label = batch
outputs = self(images)
loss = self.criterion(outputs, label)
total = label.size(0)
_, predicted = torch.max(outputs.data, 1)
correct = (predicted == label).sum().item()
accuracy = correct/total * 100
self.log('val_loss', loss)
self.log('val_accuracy', accuracy)
print("Validation", "accuracy:", accuracy)
#return {'loss': loss, 'accuracy': accuracy}
#return loss, accuracy
# ---------#-----------------
def training_epoch_end(self, outs):
print("Epoch:", self.ep_num, "Training: loss:", outs[0])
self.ep_num+=1
# #print("Training:", outs)
#def validation_epoch_end(self, outs):
# for out in outs:
# pass
# print("Validation: loss:", outs[0]['loss'].item(), "accuracy:", outs[0]['accuracy'])
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=0.001)
return optimizer
"""---
## Train
NOTE: in colab, set progress_bar_refresh_rate high or the screen will freeze because of the rapid tqdm update speed.
"""
if __name__ == '__main__':
# init model
ae = Lulz()
digits = Data()
# Initialize a trainer
trainer = pl.Trainer(progress_bar_refresh_rate=0, gpus=4, max_epochs=100, distributed_backend='ddp')
# Train the model ⚡
trainer.fit(ae, digits)
|
[
"shivansh.seth@research.iiit.ac.in"
] |
shivansh.seth@research.iiit.ac.in
|
ced87177d99551888e0c399e44434bd6c0a51f74
|
1193248ad31f7bf7d05669b001a0bdfac2fc68c6
|
/Algorithm_3D.py
|
4a794f2e3d487a9a8585ac8442d4df2b884a89f2
|
[] |
no_license
|
alina-rubina/MAV-based-2D-Localization-Simulator
|
a7e8e76e52e8c044698429347a4a011673d0d35c
|
d23f6f2079fc71b1f86548a8521949cdb1fcf5d2
|
refs/heads/master
| 2020-07-01T17:45:55.792269
| 2019-08-08T11:30:57
| 2019-08-08T11:30:57
| 201,243,538
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20,025
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 11 14:06:04 2015
@author: AHMAD
"""
line_colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k', 'w'];
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import matplotlib.cm as cmx
import matplotlib.colors as colors
import numpy as np
from At_UAV_3D import *
from At_Node_3D import *
from localization import *
from Dist_to_ss import dist_to_ss
from ss_to_dist import *
from pylab import *
import matplotlib.pyplot as plt
from Setup import *
from rwalk_3D import random_walk
from rwalk_3D import circle_walk
from rwalk_3D_node import random_walk_node
from operator import itemgetter
from scipy.spatial import distance
import time
from triangle import triangle #Trajectory 1
from Doublescan import DoubleScan #Trajectory 2
from circle import circle
final_conclusions_length_of_loc_nodes = []
final_conclusions_Average_loc_error = []
final_conclusions_total_route_length = []
conclusions = []
nodes_real_for_each_step = []
nodes_estim_for_each_step = []
def next_copter_coordinates(come_uav_current_coord, come_max_x, come_max_y, come_max_z, come_step, come_copter_trajectory):
if come_copter_trajectory == 0: # random walk
return random_walk(come_uav_current_coord, come_max_x, come_max_y, come_max_z, come_step);
if come_copter_trajectory == 1: # circle
return circle (come_uav_current_coord, come_max_x, come_max_y, come_max_z, come_step);
if come_copter_trajectory == 2: # triangle
return triangle (come_uav_current_coord, come_max_x, come_max_y, come_max_z, come_step);
if come_copter_trajectory == 3: # circle_walk
return circle_walk (come_uav_current_coord, come_max_x, come_max_y, come_max_z, come_step);
if come_copter_trajectory == 4: # double_scan
return DoubleScan (come_uav_current_coord, come_max_x, come_max_y, come_max_z, come_step);
if come_copter_trajectory == 5: # another_random
return DoubleScan (come_uav_current_coord, come_max_x, come_max_y, come_max_z, come_step);
if come_copter_trajectory == 6: # hybrid-circle
return DoubleScan (come_uav_current_coord, come_max_x, come_max_y, come_max_z, come_step);
# Setup.main(uavs, nodes, n_uavs, n_nodes, max_x, max_y, max_z, step, steps, max_distance, track_uavs, track_uav, track_nodes, average_nr_neigh, line_colors);
def main(uavs, nodes, n_uavs, n_nodes, max_x, max_y, max_z, threshold, step, steps, max_distance, track_uavs, track_uav,
track_nodes, average_nr_neigh, localised_unlocalised_ngr, step_for_node, build = 0, localisation_method = 0, localisation_filtering = 0, copter_trajectory = 0):
#create and fill in with initial data: uavId, step, steps and x, y, z coordinates of UAV
#max_distance = ss_to_dist.ss_to_dist(threshold);
#start here
global max_range; #for hybrid-circle
max_range = ss_to_dist(threshold); #for hybrid-circle
#print max_range
global temp;
global R
global shai; #updates its valve by one for every new cycle
shai=0;
R=ss_to_dist(threshold);
#----------------------------------------starting coordinates selection for trajectories--------------
temp=[ step , 0, max_z]
if copter_trajectory == 2: # for triangle
temp=[-max_x, -max_y, max_z]; #Provides the first point,which is important to obtain the next points in Dualscan trajectory
if copter_trajectory == 4: # for double_scan
temp=[-max_x, -max_y, max_z]; #Provides the first point,which is important to obtain the next points in Dualscan trajectory
if copter_trajectory == 1: #for circle
temp=[ step , 0, max_z]; #Provides the first point,which is important to obtain the next points in Dualscan trajectory
#stop here
uavs = [mUav(0, step, steps, 0, 0, 0) for count in range(0, n_uavs)]
for uav in range(len(uavs)):
track_uav.append([])
uavs[uav].uavId = uav;
#uavs[uav].uav_current_coord = [0, 0, 0];
if copter_trajectory == 5 or copter_trajectory ==0:
uavs[uav].uav_current_coord = [-max_x, -max_y, max_z];
else:
uavs[uav].uav_current_coord = [-max_x,-max_y, 0];
track_uavs.append(uavs[uav].uav_current_coord);
nodes = [mNode(0, 0, 0, 0) for count1 in range(0, n_nodes)]
#print len(nodes)
#--------------------------------------------modif according to original map
for node in range(len(nodes)):
nodes[node].myId = node;
#nodes[node].x_y_real=[np.random.uniform(-500,500), np.random.uniform(-500,500)];
# nodes[node].x_y_zreal=[np.random.uniform(-1*max_x,max_x), np.random.uniform(-1*max_y,max_y),0];
#mixed
# if (node <= int(0.25 * n_nodes)):
# nodes[node].x_y_zreal = [np.random.uniform(-0.4 * max_x, -0.8 * max_x),
# np.random.uniform(-0.8*max_y, -0.4 * max_y),
# np.random.uniform(0,0)]; # idmt
#
#
# if (node > int(0.25 * n_nodes) and node <= int(0.5 * n_nodes)):
# nodes[node].x_y_zreal = [np.random.uniform(-0.4 * max_x, -0.8 * max_x), np.random.uniform(0.4*max_y, 0.8 * max_y),
# np.random.uniform(0, 0)]; # usz
# if (node > int(0.5 * n_nodes) and node <= int(0.75 * n_nodes)):
# nodes[node].x_y_zreal = [np.random.uniform(0 * max_x, 1* max_x),
# np.random.uniform(-1*max_y, 0 * max_y),
# #np.random.uniform(-0.0 * max_z, 0.8 * max_z)]; # haus H
# np.random.uniform(0, 0)]; # haus H
##
# if (node > int(0.75 * n_nodes) and node <= int( n_nodes)):
# nodes[node].x_y_zreal = [np.random.uniform(0 * max_x, 1 * max_x),
# np.random.uniform(0*max_y, 1 * max_y),
# #np.random.uniform(0.28264 * max_z, 0.41864 * max_z)]; # haus I
# np.random.uniform(0, 0)]; # haus I
# #clusters
if (node <= int(0.25 * n_nodes)):
nodes[node].x_y_zreal = [np.random.uniform(-0.2 * max_x, -0.7 * max_x),
np.random.uniform(-0.7*max_y, -0.2 * max_y),
np.random.uniform(0, 0.6666 * max_z)]; # idmt
if (node > int(0.25 * n_nodes) and node <= int(0.5 * n_nodes)):
nodes[node].x_y_zreal = [np.random.uniform(-0.2 * max_x, -0.7 * max_x), np.random.uniform(0.2*max_y, 0.7 * max_y),
np.random.uniform(0.4 * max_z, 0.7 * max_z)]; # usz
if (node > int(0.5 * n_nodes) and node <= int(0.75 * n_nodes)):
nodes[node].x_y_zreal = [np.random.uniform(0.2 * max_x, 0.7* max_x),
np.random.uniform(-0.7*max_y, -0.2 * max_y),
#np.random.uniform(-0.0 * max_z, 0.8 * max_z)]; # haus H
np.random.uniform(-0.3 * max_z, -0.8 * max_z)]; # haus H
if (node > int(0.75 * n_nodes) and node <= int( n_nodes)):
nodes[node].x_y_zreal = [np.random.uniform(0.2 * max_x, 0.7 * max_x),
np.random.uniform(0.2*max_y, 0.7 * max_y),
#np.random.uniform(0.28264 * max_z, 0.41864 * max_z)]; # haus I
np.random.uniform(-0.28264 * max_z, -0.41864 * max_z)]; # haus I
#
track_nodes.append(nodes[node].x_y_zreal);
# print 'bla' , nodes[node].x_y_zreal
dist_inter = []
# print test.size
#create_list_of_poten_neighb()
for i in range(len(nodes)):
nodes[i].NULoN = 0; # clean the local number of unlocalized neighbors
if (step_for_node == 0):
# creating list of potential neighboars
for i in range(len(nodes)):
for j in range(i + 1, len(nodes)):
dist = distance.euclidean(nodes[i].x_y_zreal, nodes[j].x_y_zreal);
# test[i]=dist.append(i)
dist_inter=np.append(dist_inter,dist)
# print test
if dist < max_distance * 1.5: # we take 50% more than max possibe distance for current treshould
ss = dist_to_ss(dist);
if (ss > threshold): #if within range , receive packets. Threshold = -100dBm
if (nodes[i].mystatus == 0):
nodes[j].NULoN += 1;
if (nodes[j].mystatus == 0):
nodes[i].NULoN += 1;
nodes[i].visible_neigbords.append(j);
nodes[j].visible_neigbords.append(i);
# print 'test',test.size
# dist_inter_mean=np.mean(dist_inter)
# dist_inter_std=np.std(dist_inter)
## test=np.sum(dist_inter)/len(dist_inter)
# print max_distance
#simulation for N steps for each UAV
finished_flag = 0; # will become TRUE when all nodes are detected
j = 0;
for s in range(0, steps): #main simulation loop
#print" step ",s," started-----------------------------------------------" #trace
for uav in range(len(uavs)):
#print" step ",s," uav ",uav #trace
uavs[uav].unlocalized_nodes = []
if (s == 1):
for node in range(len(nodes)):
average_nr_neigh += nodes[node].NULoN;
#step 1: receive at UAV
for node in range(len(nodes)):
dist = distance.euclidean(uavs[uav].uav_current_coord, nodes[node].x_y_zreal);
ss = dist_to_ss(dist);
if (ss > threshold): #if within range , receive packets. Threshold = -100dBm
RX_packet = nodes[node].packet_transmit(); #receive the packet transmitted by the node
nodes[node].update_database(RX_packet, ss, uavs[
uav].uav_current_coord); # store the packet and ss, uav coordinates in the object nodes directly
#uavs[uav].update_database(RX_packet,ss,uavs[uav].uav_current_coord); #store packet also inside UAV,
nodes[node].NULoN_UAV_view = RX_packet[3]; # how many unlocalized nodes around node
uavs[uav].save_node_NULON(node, RX_packet[3]); #add node number and how many unlocalized nodes around it to UAV DB
#step 2: Perform localisation and Update of localised_nodes, since new data arrieved
#localisation_method, localisation_filtering are coming from simulation via main parameters
res = localization(node, nodes, localisation_method, localisation_filtering);
if (res != 0):#Localization was successful: update the local view of the UAV and inform the node
#print" localized node ", node , " neighb here", nodes[node].my_neigbords, " known ", nodes[node].mystatus
nodes[node].mystatus = 1;
nodes[node].x_y_zestim = res;
#step 3: UAV moves to a new position. Decision is based on his local view on the network. The UAV moves towards the node with the bigger number of unloc neighbours or does the random walk if the number of neighbors is 0
localised_nodes = [];
loc_nodes = 0;
for i in range(len(nodes)):
if (nodes[i].mystatus == 1):
loc_nodes += 1;
if (len(nodes)==len(localised_nodes)):
# if(steps==20):
finished_flag = 1;
break;
for i in range(len(uavs[uav].unlocalized_nodes)):
if ((nodes[uavs[uav].unlocalized_nodes[i][0]].mystatus == 1) & (uavs[uav].unlocalized_nodes[i][1] > 0)):
# UAV received packet from localized node and node has unlocalized memebers
localised_nodes.append([uavs[uav].unlocalized_nodes[i][0], uavs[uav].unlocalized_nodes[i][1]]);
localised_nodes.sort(key=itemgetter(1)); #sorting according to localised nodes with highest NULoN(Number of Unlocalised Neighbours)
localised_nodes.reverse();
#localised to unlocalised neighbors
localised_unlocalised_ngr.append(['Number of loc nodes with unloc neighbors =', len(localised_nodes)]);
if (len(localised_nodes) > 0):
#print "that is what first node know "," node number ", nodes[localised_nodes[0][0]].myId , " estim", nodes[localised_nodes[0][0]].x_y_zestim, " real ", nodes[localised_nodes[0][0]].x_y_zreal
i = 0;
j += 1;
if (distance.euclidean(uavs[uav].uav_current_coord, nodes[localised_nodes[i][0]].x_y_zestim) < step / 4):
# this limits small movement
if i + 1 < len(localised_nodes):
#print "that is what second node know "," node number ", nodes[localised_nodes[1][0]].myId , " neighboard", nodes[localised_nodes[1][0]].my_neigbords , " estim", nodes[localised_nodes[1][0]].x_y_zestim, " real ", nodes[localised_nodes[1][0]].x_y_zreal
#uavs[uav].uav_current_coord = nodes[localised_nodes[i+1][0]].x_y_zestim;#we go to next localized node if we already in place of node with max unlocal. memebers
if copter_trajectory == 5:
uavs[uav].uav_current_coord = circle_walk(uavs[uav].uav_current_coord, max_x, max_y, max_z,step, j);
else:
uavs[uav].uav_current_coord = temp;
temp = next_copter_coordinates(uavs[uav].uav_current_coord, max_x, max_y, max_z, step, copter_trajectory);
else:
if copter_trajectory == 5:
uavs[uav].uav_current_coord = circle_walk(uavs[uav].uav_current_coord, max_x, max_y, max_z,
step, j);
else:
uavs[uav].uav_current_coord = temp;
temp = next_copter_coordinates(uavs[uav].uav_current_coord, max_x, max_y, max_z, step, copter_trajectory);
#print "made random walk bcs don't have more unknown", uavs[uav].uav_current_coord #trace
else:
if copter_trajectory == 0 or copter_trajectory == 5: # moving to estimated position of node when random_walk is used, next_copter_coordinates can not do it
uavs[uav].uav_current_coord = nodes[localised_nodes[i][0]].x_y_zestim;
temp = next_copter_coordinates(uavs[uav].uav_current_coord, max_x, max_y, max_z, step, copter_trajectory);
else:
uavs[uav].uav_current_coord = temp;
temp = next_copter_coordinates(uavs[uav].uav_current_coord, max_x, max_y, max_z, step, copter_trajectory);
#print "moved to node with max unknown",uavs[uav].uav_current_coord #trace
else: # we don't have any visible nodes with known unlocalized neighboard, do random walk
if copter_trajectory == 5:
uavs[uav].uav_current_coord = random_walk(uavs[uav].uav_current_coord, max_x, max_y, max_z, step);
j = 0;
else:
uavs[uav].uav_current_coord = temp; #DOuble scan trajectory
temp = next_copter_coordinates(uavs[uav].uav_current_coord, max_x, max_y, max_z, step, copter_trajectory);
#print "made random walk to " ,uavs[uav].uav_current_coord #trace
#track_uav.insert(uav+1, uavs[uav].uav_current_coord); #original code
if copter_trajectory!=5:
if(uavs[uav].uav_current_coord == [-max_x,-max_y, 0]):
shai=shai+1;
track_uav[uav].append(uavs[uav].uav_current_coord);
#print 'UAV coordinates log ',track_uav[uav]; #trace
#print ' Step 4 is here uav= ',uav, ' step = ', s ;
#step 4: nodes update their local view receiving neighbors beacons
for i in range(len(nodes)):
nodes[i].NULoN = 0; # clean the local number of unlocalized neighbors
nodes[i].my_neigbords = [] #used for debugging
for i in range(len(nodes)):
if (step_for_node > 0):
#print "full scan executed" #trace
for j in range(i + 1, len(nodes)):
dist = distance.euclidean(nodes[i].x_y_zreal, nodes[j].x_y_zreal);
ss = dist_to_ss(dist);
if (ss > threshold): #if within range , receive packets. Threshold = -100dBm
if (nodes[i].mystatus == 0):
nodes[j].NULoN += 1;
nodes[j].my_neigbords.append(i)
if (nodes[j].mystatus == 0):
nodes[i].NULoN += 1;
nodes[i].my_neigbords.append(j)
else:
#print "only through visible neighbords scan";
for j in range(len(nodes[i].visible_neigbords)):
dist = distance.euclidean(nodes[i].x_y_zreal, nodes[nodes[i].visible_neigbords[j]].x_y_zreal);
#print " node i is ", i, " node j is ",nodes[i].visible_neigbords[j]
ss = dist_to_ss(dist);
if (ss > threshold): #if within range , receive packets. Threshold = -100dBm
if (nodes[i].mystatus == 0):
nodes[nodes[i].visible_neigbords[j]].NULoN += 1;
if (nodes[nodes[i].visible_neigbords[j]].my_neigbords.count(i) == 0):
nodes[nodes[i].visible_neigbords[j]].my_neigbords.append(i)
if (nodes[nodes[i].visible_neigbords[j]].mystatus == 0):
nodes[i].NULoN += 1;
if (nodes[i].my_neigbords.count(j) == 0):
nodes[i].my_neigbords.append(j)
#step 5: nodes update their location
nodes_real_for_each_step.append([])
nodes_estim_for_each_step.append([])
for i_node in range(len(nodes)): #logging data
nodes_real_for_each_step[s].append(nodes[i_node].x_y_zreal)
if (step_for_node > 0):
#nodes[i_node].x_y_zreal = nodes[i_node].x_y_zreal
nodes[i_node].x_y_zreal = random_walk_node(nodes[i_node].x_y_zreal, max_x, max_y, max_z, step_for_node);
if (nodes[i_node].mystatus == 1):
nodes_estim_for_each_step[s].append(nodes[i_node].x_y_zestim)
#print" step ",s," finished-----------------------------------------------"
if (finished_flag or shai==2): #all nodes already localized not 5
#if (finished_flag): #all nodes already localized
break;
#print time.clock() - start_time_cycle, " for 1 step seconds"
return [uavs, nodes, track_uavs, track_uav, track_nodes, average_nr_neigh, max_distance, localised_unlocalised_ngr,
nodes_real_for_each_step, nodes_estim_for_each_step]
|
[
"noreply@github.com"
] |
alina-rubina.noreply@github.com
|
23b48d1161fc731e0cd5b5ae57b36e873bdba44d
|
1f5a52f905629e5be80ed3511eb9e82a20bfed36
|
/research/deeplab/deeplab_demo.py
|
d58ee576e204d008a2cad19e363121be269d8b30
|
[
"Apache-2.0"
] |
permissive
|
ytusdc/models
|
69d0e500c658cb9426b0a5539ada900d9c40073f
|
6f448355ed3bd82a3d29a65d20c30124da71fc6d
|
refs/heads/master
| 2020-07-09T06:03:08.573446
| 2019-09-23T06:22:21
| 2019-09-23T06:22:21
| 203,901,352
| 0
| 0
|
Apache-2.0
| 2019-09-21T12:33:42
| 2019-08-23T01:32:05
|
Python
|
UTF-8
|
Python
| false
| false
| 8,730
|
py
|
#!/usr/bin/env python
# coding: utf-8
# # Overview
#
# This colab demonstrates the steps to use the DeepLab model to perform semantic segmentation on a sample input image. Expected outputs are semantic labels overlayed on the sample image.
#
# ### About DeepLab
# The models used in this colab perform semantic segmentation. Semantic segmentation models focus on assigning semantic labels, such as sky, person, or car, to multiple objects and stuff in a single image.
# # Instructions
# <h3><a href="https://cloud.google.com/tpu/"><img valign="middle" src="https://raw.githubusercontent.com/GoogleCloudPlatform/tensorflow-without-a-phd/master/tensorflow-rl-pong/images/tpu-hexagon.png" width="50"></a> Use a free TPU device</h3>
#
# 1. On the main menu, click Runtime and select **Change runtime type**. Set "TPU" as the hardware accelerator.
# 1. Click Runtime again and select **Runtime > Run All**. You can also run the cells manually with Shift-ENTER.
# ## Import Libraries
# In[ ]:
import os
from io import BytesIO
import tarfile
import tempfile
from six.moves import urllib
from matplotlib import gridspec
from matplotlib import pyplot as plt
import numpy as np
from PIL import Image
import tensorflow as tf
# ## Import helper methods
# These methods help us perform the following tasks:
# * Load the latest version of the pretrained DeepLab model
# * Load the colormap from the PASCAL VOC dataset
# * Adds colors to various labels, such as "pink" for people, "green" for bicycle and more
# * Visualize an image, and add an overlay of colors on various regions
# In[ ]:
class DeepLabModel(object):
"""Class to load deeplab model and run inference."""
INPUT_TENSOR_NAME = 'ImageTensor:0'
OUTPUT_TENSOR_NAME = 'SemanticPredictions:0'
INPUT_SIZE = 513
FROZEN_GRAPH_NAME = 'frozen_inference_graph'
def __init__(self, tarball_path):
"""Creates and loads pretrained deeplab model."""
self.graph = tf.Graph()
graph_def = None
# Extract frozen graph from tar archive.
tar_file = tarfile.open(tarball_path)
for tar_info in tar_file.getmembers():
if self.FROZEN_GRAPH_NAME in os.path.basename(tar_info.name):
file_handle = tar_file.extractfile(tar_info)
graph_def = tf.GraphDef.FromString(file_handle.read())
break
tar_file.close()
if graph_def is None:
raise RuntimeError('Cannot find inference graph in tar archive.')
with self.graph.as_default():
tf.import_graph_def(graph_def, name='')
self.sess = tf.Session(graph=self.graph)
def run(self, image):
"""Runs inference on a single image.
Args:
image: A PIL.Image object, raw input image.
Returns:
resized_image: RGB image resized from original input image.
seg_map: Segmentation map of `resized_image`.
"""
width, height = image.size
resize_ratio = 1.0 * self.INPUT_SIZE / max(width, height)
target_size = (int(resize_ratio * width), int(resize_ratio * height))
resized_image = image.convert('RGB').resize(target_size, Image.ANTIALIAS)
batch_seg_map = self.sess.run(
self.OUTPUT_TENSOR_NAME,
feed_dict={self.INPUT_TENSOR_NAME: [np.asarray(resized_image)]})
seg_map = batch_seg_map[0]
return resized_image, seg_map
def create_pascal_label_colormap():
"""Creates a label colormap used in PASCAL VOC segmentation benchmark.
Returns:
A Colormap for visualizing segmentation results.
"""
colormap = np.zeros((256, 3), dtype=int)
ind = np.arange(256, dtype=int)
for shift in reversed(range(8)):
for channel in range(3):
colormap[:, channel] |= ((ind >> channel) & 1) << shift
ind >>= 3
return colormap
def label_to_color_image(label):
"""Adds color defined by the dataset colormap to the label.
Args:
label: A 2D array with integer type, storing the segmentation label.
Returns:
result: A 2D array with floating type. The element of the array
is the color indexed by the corresponding element in the input label
to the PASCAL color map.
Raises:
ValueError: If label is not of rank 2 or its value is larger than color
map maximum entry.
"""
if label.ndim != 2:
raise ValueError('Expect 2-D input label')
colormap = create_pascal_label_colormap()
if np.max(label) >= len(colormap):
raise ValueError('label value too large.')
return colormap[label]
def vis_segmentation(image, seg_map):
"""Visualizes input image, segmentation map and overlay view."""
plt.figure(figsize=(15, 5))
grid_spec = gridspec.GridSpec(1, 4, width_ratios=[6, 6, 6, 1])
plt.subplot(grid_spec[0])
plt.imshow(image)
plt.axis('off')
plt.title('input image')
plt.subplot(grid_spec[1])
seg_image = label_to_color_image(seg_map).astype(np.uint8)
plt.imshow(seg_image)
plt.axis('off')
plt.title('segmentation map')
plt.subplot(grid_spec[2])
plt.imshow(image)
plt.imshow(seg_image, alpha=0.7)
plt.axis('off')
plt.title('segmentation overlay')
unique_labels = np.unique(seg_map)
ax = plt.subplot(grid_spec[3])
plt.imshow(
FULL_COLOR_MAP[unique_labels].astype(np.uint8), interpolation='nearest')
ax.yaxis.tick_right()
plt.yticks(range(len(unique_labels)), LABEL_NAMES[unique_labels])
plt.xticks([], [])
ax.tick_params(width=0.0)
plt.grid('off')
plt.show()
LABEL_NAMES = np.asarray([
'background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus',
'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike',
'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tv'
])
FULL_LABEL_MAP = np.arange(len(LABEL_NAMES)).reshape(len(LABEL_NAMES), 1)
FULL_COLOR_MAP = label_to_color_image(FULL_LABEL_MAP)
# ## Select a pretrained model
# We have trained the DeepLab model using various backbone networks. Select one from the MODEL_NAME list.
# In[ ]:
MODEL_NAME = 'mobilenetv2_coco_voctrainaug' # @param ['mobilenetv2_coco_voctrainaug', 'mobilenetv2_coco_voctrainval', 'xception_coco_voctrainaug', 'xception_coco_voctrainval']
_DOWNLOAD_URL_PREFIX = 'http://download.tensorflow.org/models/'
_MODEL_URLS = {
'mobilenetv2_coco_voctrainaug':
'deeplabv3_mnv2_pascal_train_aug_2018_01_29.tar.gz',
'mobilenetv2_coco_voctrainval':
'deeplabv3_mnv2_pascal_trainval_2018_01_29.tar.gz',
'xception_coco_voctrainaug':
'deeplabv3_pascal_train_aug_2018_01_04.tar.gz',
'xception_coco_voctrainval':
'deeplabv3_pascal_trainval_2018_01_04.tar.gz',
}
_TARBALL_NAME = 'deeplab_model.tar.gz'
model_dir = tempfile.mkdtemp()
tf.gfile.MakeDirs(model_dir)
download_path = os.path.join(model_dir, _TARBALL_NAME)
print('downloading model, this might take a while...')
urllib.request.urlretrieve(_DOWNLOAD_URL_PREFIX + _MODEL_URLS[MODEL_NAME],
download_path)
print('download completed! loading DeepLab model...')
MODEL = DeepLabModel(download_path)
print('model loaded successfully!')
# ## Run on sample images
#
# Select one of sample images (leave `IMAGE_URL` empty) or feed any internet image
# url for inference.
#
# Note that this colab uses single scale inference for fast computation,
# so the results may slightly differ from the visualizations in the
# [README](https://github.com/tensorflow/models/blob/master/research/deeplab/README.md) file,
# which uses multi-scale and left-right flipped inputs.
# In[ ]:
SAMPLE_IMAGE = 'image1' # @param ['image1', 'image2', 'image3']
IMAGE_URL = '' #@param {type:"string"}
_SAMPLE_URL = ('https://github.com/tensorflow/models/blob/master/research/'
'deeplab/g3doc/img/%s.jpg?raw=true')
def run_visualization(url):
"""Inferences DeepLab model and visualizes result."""
try:
f = urllib.request.urlopen(url)
jpeg_str = f.read()
original_im = Image.open(BytesIO(jpeg_str))
except IOError:
print('Cannot retrieve image. Please check url: ' + url)
return
print('running deeplab on image %s...' % url)
resized_im, seg_map = MODEL.run(original_im)
vis_segmentation(resized_im, seg_map)
image_url = IMAGE_URL or _SAMPLE_URL % SAMPLE_IMAGE
run_visualization(image_url)
# ## What's next
#
# * Learn about [Cloud TPUs](https://cloud.google.com/tpu/docs) that Google designed and optimized specifically to speed up and scale up ML workloads for training and inference and to enable ML engineers and researchers to iterate more quickly.
# * Explore the range of [Cloud TPU tutorials and Colabs](https://cloud.google.com/tpu/docs/tutorials) to find other examples that can be used when implementing your ML project.
# * For more information on running the DeepLab model on Cloud TPUs, see the [DeepLab tutorial](https://cloud.google.com/tpu/docs/tutorials/deeplab).
#
|
[
"ytusdc@126.com"
] |
ytusdc@126.com
|
3f6c643a24b78dc5927f748ae6aa4fd814e71808
|
6206ad73052b5ff1b6690c225f000f9c31aa4ff7
|
/Code/Largest Color Value in a Directed Graph.py
|
56a05033e7f54f9136ec0df4e44763603d24a482
|
[] |
no_license
|
mws19901118/Leetcode
|
7f9e3694cb8f0937d82b6e1e12127ce5073f4df0
|
752ac00bea40be1e3794d80aa7b2be58c0a548f6
|
refs/heads/master
| 2023-09-01T10:35:52.389899
| 2023-09-01T03:37:22
| 2023-09-01T03:37:22
| 21,467,719
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,734
|
py
|
class Solution:
def largestPathValue(self, colors: str, edges: List[List[int]]) -> int:
n = len(colors) #Get n.
adjacentList = [[] for _ in range(n)] #Build adjacent list.
for x, y in edges:
adjacentList[x].append(y)
cache = {} #Cache intermedia result.
def DFS(node: int, visited: Set[int]) -> Counter: #DFS.
if node in cache: #If node is in cache, return cache[node].
return cache[node]
count = Counter() #Initialize a counter to store the max count of each color starting from current node.
for x in adjacentList[node]: #Traverse each neighbor of node.
if x in visited: #If x is visited, return none because there is a cycle.
return None
visited.add(x) #Add x to visited.
next_count = DFS(x, visited) #Get the counter from x.
if next_count == None: #If it is none, there is a cycle, return none as well.
return None
for y in next_count: #For each color in next_count, update its max value in count.
count[y] = max(count[y], next_count[y])
visited.remove(x) #Remove x from visited.
count[colors[node]] += 1 #Increase the count of current node color.
cache[node] = count #Set count in cache for current node.
return count #Return count.
result = -1 #Initialize result.
for x in range(n): #DFS from each node.
count = DFS(x, set([x]))
if not count: #If there is a cycle, return -1.
return -1
result = max(result, max(count.values())) #Update result if the longest color from current DFS is longer.
return result #Return result.
|
[
"noreply@github.com"
] |
mws19901118.noreply@github.com
|
720ff468060f91a29093f7fde50ff2efc68544b7
|
673e829dda9583c8dd2ac8d958ba1dc304bffeaf
|
/data/multilingual/Cyrl.TUK/Mono_16/pdf_to_json_test_Cyrl.TUK_Mono_16.py
|
36dffeac7c5530245ad098709b2f4432a643d279
|
[
"BSD-3-Clause"
] |
permissive
|
antoinecarme/pdf_to_json_tests
|
58bab9f6ba263531e69f793233ddc4d33b783b7e
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
refs/heads/master
| 2021-01-26T08:41:47.327804
| 2020-02-27T15:54:48
| 2020-02-27T15:54:48
| 243,359,934
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 303
|
py
|
import pdf_to_json as p2j
import json
url = "file:data/multilingual/Cyrl.TUK/Mono_16/udhr_Cyrl.TUK_Mono_16.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
003468457e12873c6f587e20be65f2e64a7329ee
|
54277288865f738e44d7be1d6b41b19c63af267e
|
/tools/test_recognizer.py
|
56c7f0585fbb0b4f4fc07e33968e2d9425667338
|
[] |
no_license
|
scenarios/SR-SVRL
|
7b41d29e16cff3020f333efc28a624d85bba4537
|
26e89ecb29355635b10a355f2f16f1b5db9c4e9b
|
refs/heads/master
| 2023-02-26T06:16:13.314491
| 2021-01-30T16:30:57
| 2021-01-30T16:30:57
| 307,295,720
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,285
|
py
|
import _init_paths
import os
import argparse
from pyvrl.builder import build_model, build_dataset
from pyvrl.apis import test_recognizer, get_root_logger
from mmcv import Config
def parse_args():
parser = argparse.ArgumentParser(description='Evaluation an action recognizer')
parser.add_argument('--cfg', default='', type=str, help='config file path')
parser.add_argument('--work_dir', help='the dir to save logs and models')
parser.add_argument('--data_dir', default='data/', type=str, help='the dir that save training data')
parser.add_argument('--checkpoint', help='the checkpoint file to resume from')
parser.add_argument('--gpus', type=int, default=1,
help='number of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--batchsize', type=int, default=6)
parser.add_argument('--seed', type=int, default=7, help='random seed')
parser.add_argument('--progress', action='store_true')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
cfg = Config.fromfile(args.cfg)
# update configs according to CLI args
cfg.gpus = args.gpus
cfg.data.videos_per_gpu = args.batchsize
if 'pretrained' in cfg['model']['backbone']:
cfg['model']['backbone']['pretrained'] = None
if args.work_dir is not None:
cfg.work_dir = args.work_dir
if args.data_dir is not None:
if 'test' in cfg.data:
cfg.data.test.root_dir = args.data_dir
if args.checkpoint is not None:
chkpt_list = [args.checkpoint]
else:
chkpt_list = [os.path.join(cfg.work_dir, fn)
for fn in os.listdir(cfg.work_dir) if fn.endswith('.pth')]
# init logger before other steps
logger = get_root_logger(log_level=cfg.log_level)
# build a dataloader
model = build_model(cfg.model, default_args=dict(train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg))
dataset = build_dataset(cfg.data.test)
results = test_recognizer(model,
dataset,
cfg,
chkpt_list,
logger=logger,
progress=args.progress)
|
[
"zyz0205@hotmail.com"
] |
zyz0205@hotmail.com
|
ce84046a999e5bdece12262d2999942bb929bf77
|
9f3eb4bda3264e89b85c09503d974e1896d919b1
|
/omnisound/src/modifier/meter.py
|
019fe67c7ae7336014366d187dfecb248463b6a2
|
[] |
no_license
|
marksweiss/omnisound
|
13a12c3e8dd5adc5dd47ca684f91bcb6fef2ecc5
|
f3cd6540780f204d2b2a0971cb5a83c6e28b723b
|
refs/heads/main
| 2022-07-04T05:30:17.910008
| 2022-07-01T03:35:56
| 2022-07-01T03:35:56
| 158,024,007
| 0
| 1
| null | 2020-12-24T23:17:20
| 2018-11-17T20:52:34
|
Python
|
UTF-8
|
Python
| false
| false
| 12,360
|
py
|
# Copyright 2018 Mark S. Weiss
from bisect import bisect_left
from enum import Enum
from typing import Union
import pytest
from omnisound.src.container.note_sequence import NoteSequence
from omnisound.src.utils.validation_utils import validate_optional_types, validate_type, validate_type_choice
class InvalidMeterStringException(Exception):
pass
class NoteDur(Enum):
_1_0 = 1.0
_0_5 = 0.5
_0_25 = 0.25
_0_125 = 0.125
_0_0625 = 0.0625
_0_03125 = 0.03125
_0_015625 = 0.015625
WHL = _1_0
WHOLE = _1_0
HLF = _0_5
HALF = _0_5
QRTR = _0_25
QUARTER = _0_25
EITH = _0_125
EIGHTH = _0_125
SXTNTH = _0_0625
SIXTEENTH = _0_0625
THRTYSCND = _0_03125
THIRTYSECOND = _0_03125
SXTYFRTH = _0_015625
SIXTYFOURTH = _0_015625
METER_BEATS_NOTE_DUR_MAP = {
1: NoteDur.WHOLE,
2: NoteDur.HALF,
4: NoteDur.QUARTER,
8: NoteDur.EIGHTH,
16: NoteDur.SIXTEENTH,
32: NoteDur.THIRTYSECOND,
64: NoteDur.SIXTYFOURTH
}
class InvalidQuantizationDurationException(Exception):
pass
# TODO THIS NEEDS A NOTION OF TEMPO TO MAKE beat_start_times and quantizing valid
class Meter:
"""Class to represent and manage Meter in a musical Measure/Bar. Offers facilities for representing and
calculating meter using traditional units or floating point values. Can also apply quantizing to a
NoteSequence to either fit the notes with the same ration of separation between them but scaled to the
duration of the Measure, or to fit notes to the closest beat in the Measure. str displays information about
the configuration of the object, but repr displays the meter in traditional notation.
`beat_duration` - the duration of one beat
`beats_per_measure` - the number of quarter-note beats per measure
For example: 4
4
- `beats_per_measure` is an integer, 4
- `beat_duration` argument is the duration of a beat in the measure, NoteDur.QUARTER
In 4/4, there are 4 beats per measure and each beat is a quarter note.
For example: 6
8
- `beats_per_measure` is an integer, 6 - `beat_duration` argument is a NoteDuration, e.g NoteDur.EIGHTH
In 6/8, there are 6 beats per measure and each beat is an eighth note
"""
DEFAULT_QUARTER_NOTES_PER_MINUTE = 60
SECS_PER_MINUTE = 60
QUARTER_NOTE_DUR: float = NoteDur.QUARTER.value
def __init__(self, beats_per_measure: int = None, beat_note_dur: NoteDur = None,
tempo: int = None, quantizing: bool = True):
validate_optional_types(('beats_per_measure', beats_per_measure, int), ('beat_dur', beat_note_dur, NoteDur),
('tempo', tempo, int), ('quantizing', quantizing, bool))
self.quantizing = quantizing
# Meter notation
# Numerator of meter
self.beats_per_measure = beats_per_measure
# Inverse of denominator of meter, e.g. 4/4 is quarter note is 1 beat
self.beat_note_dur = beat_note_dur
# Meter in musical notation as a tuple, e.g. (4, 4)
# noinspection PyTypeChecker
self.meter_notation = (self.beats_per_measure, int(1 / self.beat_note_dur.value))
# Each note is some fraction of a quarter note. So for N / 4 meters, this ratio is 1.
# For N / 8 meters, e.g. 6 / 8, this ration os 0.5. This ratio multiplied by the actual time duration
# of a quarter note, derived from the tempo in qpm, is the duration of a note
# noinspection PyTypeChecker
self.quarter_notes_per_beat_note = int(self.beat_note_dur.value / Meter.QUARTER_NOTE_DUR)
# Actual note duration
# Map note durations from meter, which are unitless, to time, using tempo, which is a ratio of
# quarter-note beats to time. qpm == quarter notes per minute
self._set_tempo_attributes(tempo)
def _set_tempo_attributes(self, tempo: int):
self.tempo_qpm = tempo or Meter.DEFAULT_QUARTER_NOTES_PER_MINUTE
self.quarter_note_dur_secs = Meter.SECS_PER_MINUTE / self.tempo_qpm
self.beat_note_dur_secs = self.quarter_notes_per_beat_note * self.quarter_note_dur_secs
self.measure_dur_secs = self.beat_note_dur_secs * self.beats_per_measure
self.beat_start_times_secs = [self.beat_note_dur_secs * i for i in range(self.beats_per_measure)]
def _get_tempo(self):
return self.tempo_qpm
def _set_tempo(self, tempo: int):
self._set_tempo_attributes(tempo)
tempo = property(_get_tempo, _set_tempo)
def get_secs_for_note_time(self, note_time_val: Union[float, int, NoteDur]):
"""Helper to convert a note time in NoteDur or float that represents either a note start_time or
note duration within a measure in the measure's meter into an absolute floating point value in
seconds.
"""
validate_type_choice('note_time_val', note_time_val, (float, int, NoteDur))
dur = note_time_val
if not isinstance(note_time_val, float) \
and not isinstance(note_time_val, int) \
and note_time_val in NoteDur:
dur = note_time_val.value
# noinspection PyTypeChecker
return self.beat_note_dur_secs * dur
@staticmethod
def get_bpm_and_duration_from_meter_string(meter_string: str):
if '/' not in meter_string:
raise InvalidMeterStringException('Meter string must be in the form \\d+/\\d+')
try:
beats_per_measure, beat_note_duration = meter_string.split('/')
return int(beats_per_measure), METER_BEATS_NOTE_DUR_MAP[int(beat_note_duration)]
except Exception:
raise InvalidMeterStringException('Meter string must be in the form \\d+/\\d+')
def is_quantizing(self):
return self.quantizing
def quantizing_on(self):
self.quantizing = True
def quantizing_off(self):
self.quantizing = False
def quantize(self, note_sequence: NoteSequence):
"""
Also for the degree of quantization is this ratio:
- `notes_duration` = `max(note.start + note.dur) for note in note_sequence`
-- if the notes_duration matches the measure duration, then no quantization needed, return
-- if notes run too long they must be shortened and have their start times made earlier
-- if the notes don't run long enough, they must be lengthened to and have their start times made later
- `total_adjustment` = `measure_duration - notes_duration`
-- negative adjustment if notes_duration too long, positive adjustment if notes_duration not long enough
-- total_adjustment must be < a whole note, i.e. < 1.0
- Duration adjustment
-- Each note.dur adjusted by `note.dur += (note.dur * total_adjustment)`
- Start adjustment
-- `start_adjustment = total_adjustment / len(note_sequence)`
-- `for i, note in enumerate(note_sequence):
note.start += start_adjustment * i`
Example: Notes run longer than the duration of the measure
measure ------------------------*
0 0.25 0.50 0.75 1.00 1.25
********* *********************
n0 n1
notes_duration = 1.25
total_adjustment = 1.0 - 1.25 = -0.25
start_adjustment = -0.25 / 2 = -0.125
n0.dur += (0.25 * -0.25) = 0.25 -= 0.0625 = 0.1875
n1.dur += (0.50 * -0.25) = 0.50 -= 0.125 = 0.375
n0 index = 0, n0.start += 0.0
n1 index = 1, n1.start += 1 * -0.125 = 0.75 -= 0.125 = 0.625
measure ------------------------*
0 0.25 0.50 0.75 1.00 1.25
**** **************
(0.0, 0.1875) (0.625, 0.375)
n0 n1
"""
validate_type('note_sequence', note_sequence, NoteSequence)
# noinspection SpellCheckingInspection
if self.quantizing:
notes_dur = max(note.start + note.duration for note in note_sequence)
if notes_dur == self.measure_dur_secs:
return
total_adjustment = self.measure_dur_secs - notes_dur
# if abs(total_adjustment) > 1.0:
# raise InvalidQuantizationDurationException((f'quantization adjustment value of {total_adjustment} '
# '> than maximum allowed adjustment of 1.0'))
for note in note_sequence:
dur_adjustment = note.duration * total_adjustment
# Normalize duration adjustment by duration of note, because whole note == 1 and that is the entire
# duration of a measure and the max adjustment, so every note adjusts as a ratio of its duration
# to the total adjustment needed
note.duration += dur_adjustment
# Each note that doesn't start at 0 exactly adjusts forward/back by the amount its duration adjusted
start_adjustment = total_adjustment - dur_adjustment
if round(note.start, 1) > 0.0:
note.start += start_adjustment
# Note can't adjust to < 0.0 or > 1.0
if round(note.start, 1) == pytest.approx(0.0):
note.start = 0.0
elif round(note.start, 1) == pytest.approx(1.0):
note.start = 1.0 - note.duration
def quantize_to_beat(self, note_sequence: NoteSequence):
# sourcery skip: assign-if-exp
"""Adjusts each note start_time to the closest beat time, so that each note will start on a beat.
"""
validate_type('note_sequence', note_sequence, NoteSequence)
if self.quantizing:
# First quantize() to make sure notes in NoteSequence are scaled to duration of Measure
self.quantize(note_sequence)
# Then adjust note start times to closest beat
# Algorithm:
# - self.beat_start_times is sorted, so use binary search strategy to find closest note in O(logN) time
# - call bisect to find insertion point for note in the sequence of start times, the cases are:
# - insertion point i == 0, we are done, the note quantizes to the first beat
# - insertion point i > 0, then note.start >= beat_start_times[i - 1] <= note.start < beat_start_times[i]
# - in this case test distance of each beat_start_time to note.start and pick the closest one
# Append measure end time to beat_start_times as a sentinel value for bisect()
beat_start_times = self.beat_start_times_secs + [self.measure_dur_secs]
for note in note_sequence:
i = bisect_left(beat_start_times, note.start)
# Note maps to 0th beat
if i == 0:
note.start = 0.0
continue
# Note starts after last beat, so maps to last beat
elif i == len(beat_start_times):
note.start = self.beat_start_times_secs[-1]
continue
# Else note.start is between two beats in the range 1..len(beat_start_times) - 1
# The note is either closest to beat_start_times[i - 1] or beat_start_times[i]
prev_start = beat_start_times[i - 1]
next_start = beat_start_times[i]
prev_gap = note.start - prev_start
next_gap = next_start - note.start
if prev_gap <= next_gap:
note.start = prev_start
else:
note.start = next_start
def __str__(self):
return (f'beats_per_measure: {self.beats_per_measure} beat_dur: {self.beat_note_dur} '
f'quantizing: {self.quantizing}')
# noinspection PyTypeChecker
def __repr__(self):
dur = self.beat_note_dur.value
return f'{self.beats_per_measure} / {int(1 / dur)}'
def __eq__(self, other: 'Meter') -> bool:
return self.beats_per_measure == other.beats_per_measure and \
self.beat_note_dur == other.beat_note_dur and \
self.quantizing == other.quantizing
|
[
"marksimonweiss@gmail.com"
] |
marksimonweiss@gmail.com
|
e7ad8972acc472e17ae998243663c3795f7d553a
|
7cdbe088c4955839f9ab1a9b6d9cbc66df7ea7f9
|
/practice/threshold.py
|
e413a3e303178c1ef666da785775e119bbc5b2fe
|
[] |
no_license
|
RathanakSreang/OpenCVLearning
|
0c4b7bf42ea31e249ec6db157e175c5f0eb42386
|
ff450c8f704d0c6d90fe140040008a885c56db1d
|
refs/heads/master
| 2021-01-19T05:48:15.647187
| 2017-06-18T15:29:15
| 2017-06-18T15:29:15
| 87,451,804
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,063
|
py
|
import cv2
import argparse
import imutils
import numpy as np
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="The Image path")
args = vars(ap.parse_args())
image = cv2.imread(args["image"])
image = imutils.resize(image, height=400)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(image, (5,5), 0)
cv2.imshow("Image", image)
# threshold(image_SRC, MAX_COLOR_desityly_value, color_set_to_if_value_greater_then_max,mode)
(T, thresh) = cv2.threshold(blurred, 100, 255, cv2.THRESH_BINARY)
cv2.imshow("Threshold binary", thresh)
(T, threshInv) = cv2.threshold(blurred, 100, 255, cv2.THRESH_BINARY_INV)
cv2.imshow("Threshold binary inv", threshInv)
thresh = cv2.adaptiveThreshold(blurred, 255,
cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 21, 4)
cv2.imshow("Adptive thresh MEAN", thresh)
thresh = cv2.adaptiveThreshold(blurred, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY_INV, 15,3)
cv2.imshow("Adptive thresh Gaussian", thresh)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"sreang.rathanak@framgia.com"
] |
sreang.rathanak@framgia.com
|
416c1f6281e257452e1326147dd19dd16b3801b0
|
c548f3ff2e03d325c4381b0da103aae443cc323e
|
/installer/core/providers/aws/__init__.py
|
5d62551f669a8256bb7119d0a9d96e8e71f5c10d
|
[
"Apache-2.0"
] |
permissive
|
rluta/pacbot
|
ed88f9bfe7b65d903134a91beef4bd45a0cdbbec
|
ac5b15bba3b1d9fe88740661b12390d7eb18a4c3
|
refs/heads/master
| 2020-04-21T02:56:01.409744
| 2019-02-01T22:02:52
| 2019-02-01T22:02:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,573
|
py
|
from core.terraform.utils import get_terraform_provider_file
from core.mixins import MsgMixin
from core.terraform import PyTerraform
from core.terraform.resources import TerraformResource
from core import constants as K
from core.config import Settings
import inspect
import json
import os
class BaseAction(MsgMixin):
check_dependent_resources = True
def __init__(self, input=None):
self.input = input
self.tf_outputs = PyTerraform.load_terraform_output_from_json_file()
def _create_terraform_provider_file(self):
terraform_provider_file = get_terraform_provider_file()
provider_script = {
'provider': {
'aws': {
'access_key': self.input.aws_access_key,
'secret_key': self.input.aws_secret_key,
'region': self.input.aws_region
}
}
}
with open(terraform_provider_file, "w") as jsonfile:
json.dump(provider_script, jsonfile, indent=4)
def _delete_terraform_provider_file(self):
terraform_provider_file = get_terraform_provider_file()
if os.path.isfile(terraform_provider_file):
os.remove(terraform_provider_file)
def _delete_all_terraform_files(self):
for file in os.listdir(Settings.TERRAFORM_DIR):
if file.endswith(".tf"):
file_abs_path = os.path.join(Settings.TERRAFORM_DIR, file)
os.remove(file_abs_path)
def validate_resources(self, resources):
return self.validate_resource_existence(resources)
def validate_resource_existence(self, resources):
can_continue_installation = True
if not Settings.get('SKIP_RESOURCE_EXISTENCE_CHECK', False):
self.show_step_heading(K.RESOURCE_EXISTS_CHECK_STARTED)
for resource in resources:
resource_class = resource.__class__
if TerraformResource not in inspect.getmro(resource_class):
continue # This means resource is a Variable or Data and not TF Resource
self.show_progress_start_message("Checking resource existence for %s" % resource_class.__name__)
exists, checked_details = resource.check_exists_before(self.input, self.tf_outputs)
self.erase_printed_line()
if exists:
can_continue_installation = False
resource_name = resource.resource_instance_name.replace("_", " ").title()
message = "Resource: %s, %s: `%s`" % (resource_name, checked_details['attr'], checked_details['value'])
self.show_step_inner_messaage(message, K.EXISTS)
if can_continue_installation:
self.show_step_finish(K.RESOURCE_EXISTS_CHECK_COMPLETED, color=self.GREEN_ANSI)
else:
self.show_step_finish(K.RESOURCE_EXISTS_CHECK_FAILED, color=self.ERROR_ANSI)
self.stdout_flush()
return can_continue_installation
def validate_arguments(self, resources, terraform_with_targets):
key_msg = {}
if not terraform_with_targets:
resource_id_with_depends_on = {}
for resource in resources:
resource_id_with_depends_on[self._get_depends_key(resource)] = resource.DEPENDS_ON
success, msg_list = resource.validate_input_args()
if not success:
key_msg[resource.__class__.__name__] = msg_list
key_msg = self.validate_depends_on_resources(
resource_id_with_depends_on, key_msg)
return key_msg
def validate_depends_on_resources(self, resource_id_with_depends_on, key_msg):
if self.check_dependent_resources:
install_resource_keys = resource_id_with_depends_on.keys()
for key, resource_classes in resource_id_with_depends_on.items():
for resource_class in resource_classes:
if self._get_depends_key(resource_class) in install_resource_keys:
continue
if key in key_msg:
key_msg[key].append(
"Depends on resource is not found: %s" % resource_class.__name__)
else:
key_msg[key] = ["Depends on resource is not found: %s" %
resource_class.__name__]
return key_msg
def _get_depends_key(self, resource):
return str(resource.get_resource_id())
|
[
"sanjnur@gmail.com"
] |
sanjnur@gmail.com
|
9ff010773078c5ed523f2cfb8cdd669a00732bd2
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/leap/c1e83307ed6d44728dcd9c47381a6456.py
|
0464da11ccfbee758dc34237c7e4052fcd43f586
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 101
|
py
|
def is_leap_year(year):
return year % 400 == 0 \
or (year % 4 == 0 and year % 100 != 0)
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
0aec6636a6758d7a6a62ca2119618cd6b8e95290
|
51c7a0adbffb3628f89fc8a9e9374239c3d5878e
|
/3DHMCrunningcalc.py
|
74fb4e29db4c4ec19a314ca55f77fe3b6ef72735
|
[] |
no_license
|
aczejdo/bosons
|
733342d7344fb5fbbf9c7b80860feab0852aa1f8
|
32ae9e39627e9e5789c6cfcc718cfa69e3f06cc7
|
refs/heads/master
| 2020-03-21T09:57:23.834532
| 2019-07-05T09:57:09
| 2019-07-05T09:57:09
| 138,426,361
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,742
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 31 13:24:40 2018
@author: Aleks
for things that need updates or answers
ctrl-f [#]
"""
import numpy as np
import matplotlib.pyplot as plt
import os
def corr2point(phi):
#dependent on dimensions of phi
#phi comes in with size (N+2)^d (ghost cells for BC)
Pair=np.zeros((N,N,N))
Pair=phi[1,1,1]*phi[1:N+1,1:N+1,1:N+1] #trim BC cells
#create 2layer flat array (really 2 columns)
#N^2 = 2D
#Top (left) layer = distances
#bottom (right) layer = field amplitudes
arr1d = np.zeros((N**3,2))
arr1d[:,1] = np.reshape(Pair,(N**3))
distances=np.zeros((N,N,N))
#fill in distances
for i in range(0,N):
for j in range(0,N):
for k in range(0,N):
distances[i,j,k]=np.sqrt(i*i + j*j + k*k)
arr1d[:,0] = np.reshape(distances,(N**3)) #flatten distance array
global Sarr #Sorted array variable for averaging alg checknext
Sarr = np.zeros((N**3,2)) #init Sorting array
Sarr = arr1d[ np.argsort(arr1d[:,0])] #sorting arr1d based on dist, and sending to Sarr
#Sarr = np.abs(Sarr) # [#]
uniquedist=np.max(np.shape(np.unique(arr1d[:,0]))) #counting unique distances-# is size of vector to plot corr
dist = np.zeros((uniquedist)) #array for storing radial distances
corr = np.zeros((uniquedist)) #array for storring ,phi_0*phi_r
#corr[0]=Sarr[0,1] #set 000
corr[-1]=Sarr[-1,1] #set n,n,n
i=1 #uniqdist index
sarri=0 #sorting array index
global di #index for translating between sarri and i
di=0
while(i<uniquedist):
sumcorr,null1=checknext(1,0,0,sarri)
corr[i-1]=(1/(uniquedist*kappa))*(sumcorr)/(di) #mean
sarri=sarri+di #next nonunique distance
dist[i]=Sarr[sarri,0] #save
#print(di)
di=0 #reset di
i=i+1 #move to next uniquedist
global AnalyticCorrNoninteracting #what it says
AnalyticCorrNoninteracting=np.zeros((uniquedist))
eigfuncvec=np.zeros((uniquedist)) #seperate var for organization
#From both ch24 of Boudreau/ my own calculation
#i will move this out of here and use returned vars soon
#ku=(np.pi/N)*np.arange(0,N)
for k in range(0,uniquedist):
A=np.cos(dist[k]*np.pi*2/dist[-1])
#A=1
eigfuncvec[k] = A/ ( ((2-4*g)/kappa -(2*d)) + 4*(np.sin(dist[k]*np.pi/dist[-1]))**2)
AnalyticCorrNoninteracting=eigfuncvec*(1/(uniquedist*kappa)) #this is finessed [#]
UNQD[0]=uniquedist+0. #global uniquedist var for plotting
return(corr,dist,uniquedist)
def checknext(level,bottom,runsum,ind):
#this function is independent of dimensions as Sarr is flattened
#recursively avgs over amplitudes with the same radial distances
if (Sarr[ind,0]==Sarr[ind+1,0]): #if next value is the same
runsum,bottom=checknext(level+1,bottom,runsum,ind+1) #recurse with levl and ind+1
runsum+=Sarr[ind,1]
else:
runsum+=Sarr[ind,1] #add to sum for previous layer
bottom=ind #bottom is number of layers
if level==1: #if we are at the top
global di #this is sloppy, but it measures the unique dist index vs the Sarr index
di=bottom-ind +1
return(runsum,bottom)
def hybrid_mc():
#this function is independent of dimensions
#Initialization
#===================================
pi_0 = heatbath_pi()
phi_0 = np.zeros(pi_0.shape)
#a_t = np.append(N_saves,np.array(phi_0.shape)) #creating array for N_saves field configs
#Saves=np.zeros(a_t)
dE=np.zeros(N_saves) #for tracking energy change
#===================================
H0=H_MD(phi_0,pi_0)
print(H0, 'H0')
rej=0
temprej=0
i=0
#note
#This section is for thermalizing code at the start of a run for a specific number of steps
#I included this for measuring ergodicity
#Normally users should set N_therm to 0
while (i<N_therm):
phi_new,pi_new = leapfrog(phi_0,pi_0,Tmax_MD)
H_new = H_MD(phi_0,pi_0)
deltaH = H_new - H0
P_acc = np.exp(-deltaH)
if (np.random.rand()<=P_acc):
#print(H_new,'H_new',P_acc,'exp dH','ACCEPTED SAVE %.3f'%(i/N_saves),iii)
H0 = H_new
phi_0 = phi_new
temprej=0
i+=1
else:
#print(H_new,'H_new',P_acc,'exp dH','REJECTED SAVE')
temprej+=1
if temprej>rejmax:
os.exit()
pi_0 = heatbath_pi()
#----------------------------------------------
#print('saving',iii)
i=0
while (i<N_saves):
#Thermalizing
phi_0,pi_0,H_0=thermalize(phi_0,pi_0,H0,T_therm)
#---------------------------------------
#now saving
#---------------------------------------------
phi_new,pi_new = leapfrog(phi_0,pi_0,Tmax_MD)
H_new = H_MD(phi_0,pi_0)
deltaH = H_new - H0
P_acc = np.exp(-deltaH)
if (np.random.rand()<=P_acc):
if VERBOSE==1:
print(H_new,'H_new',P_acc,'exp dH','ACCEPTED SAVE %.3f'%(i/N_saves),iii)
H0 = H_new +0.
phi_0 = phi_new +0.
#Saves[i]=phi_new +0.
dE[i] = P_acc + 0.
Analysis(phi_new,i)
temprej=0
i+=1
else:
if VERBOSE==1:
print(H_new,'H_new',P_acc,'exp dH','REJECTED SAVE')
temprej+=1
rej +=1
if temprej>rejmax:
os.exit()
pi_0 = heatbath_pi()
#----------------------------------------------
rate = (N_saves/(rej+N_saves))
return(rate,dE)
def thermalize(phi,pi,H0,T_max):
#this function is independent of dimensions
#--------------------------------------
phi_new,pi_new = leapfrog(phi,pi,T_max)
H_new = H_MD(phi,pi)
deltaH = H_new - H0
P_acc = np.exp(-deltaH)
if (np.random.rand()<=P_acc):
H_0 = H_new
if VERBOSE==1:
print(H_new,'H_new',P_acc,'exp dH','ACCEPTED THERM')
phi_0 = phi_new
pi_0=pi_new
else:
if VERBOSE==1:
print(H_new,'H_new',P_acc,'exp dH','REJECTED THERM')
pi_new = heatbath_pi()
phi_0,pi_0,H_0=thermalize(phi_new,pi_new,H0,T_max)
return(phi_0,pi_0,H_0)
def leapfrog(phi,pi,T_max):
#this function is independent of dimensions
#leapfrog integrator
phi=phi+0.0
pi=pi+0.0
#initial timestep gives phi[dT],pi[dT/2]
pi_ev = pi - d_action(phi)* dT_MD*0.5
phi_ev = phi
t=0
while t<T_max:
phi_ev = phi_ev + pi_ev*dT_MD
dS=d_action(phi_ev)
#print(np.sum(dS),dS.shape,'sum,shape ds')
pi_ev = pi_ev - dS*dT_MD
t += dT_MD
t=np.round(t,6)
#final step brings pi to [T0]
pi_ev=pi_ev-(d_action(phi_ev))*dT_MD*0.5
return(phi_ev,pi_ev)
def H_MD(phi,pi):
pi=fBNC(pi)
p_term = 0.5*np.sum( pi[xi,yi,zi]**2)
s_term = Hamiltonian(phi)
H = p_term + s_term
return(H/(N**3))
def d_action(phi):
phi=fBNC(phi)
force=np.zeros((np.array(phi.shape)))
#from ch24
Jx = phi[xi+1,yi,zi] + phi[xi,yi+1,zi] + phi[xi,yi,zi+1] +\
phi[xi-1,yi,zi] + phi[xi,yi-1,zi] + phi[xi,yi,zi-1]
#hmctut
"""
force[1:N+1,1:N+1,1:N+1]= -2*kappa * Jx +\
2*phi[xi,yi,zi] + \
4*g*phi[xi,yi,zi]*(phi[xi,yi,zi]**2 - 1)
#ch24
"""
force[1:N+1,1:N+1,1:N+1]= a*(-kappa * Jx + \
2 * (1 - 2*g) * phi[xi,yi,zi] + \
4 * g * phi[xi,yi,zi]**3 ) #+\
#(1 - 2*g + 6*g*phi[xi,yi,zi]**2 )*a +\ small higher order terms
#4*g*phi[xi,yi,zi] *a**2 +\
#g*a**3)
return(force)
def Hamiltonian(phi):
Js = (phi[xi+1,yi,zi] + phi[xi,yi+1,zi] + phi[xi,yi,zi+1]) *phi[xi,yi,zi]
#print(np.sum(Js),'js')
#hmctut
#S= -2*kappa*Js + phi[xi,yi,zi]**2 + g * (phi[xi,yi,zi]**2 - 1)**2
#ch24
S = -kappa*Js + (1-2*g)*phi[xi,yi,zi]**2 + g * phi[xi,yi,zi]**4
H=np.mean(S[1:N+1,1:N+1,1:N+1])
return(H)
def plotter(name):
fig = plt.figure()
plt.plot(np.arange(0,name.shape[0]),name)
plt.show()
return(np.mean(name), '+-' , np.std(name))
def heatbath_pi():
pi_rand = np.random.normal(0,size=(N,N,N))
pi = np.zeros((N+2,N+2,N+2))
pi[1:N+1,1:N+1,1:N+1] = pi_rand
pi = fBNC(pi)
return(pi)
def bnc_periodic(A):
#N+2 by N+2 in
J=A.shape[0]-1
#K=A.shape[1]-1
A[0,:,:] = A[J-1,:,:]
A[J,:,:] = A[1,:,:]
A[:,0,:] = A[:,J-1,:]
A[:,J,:] = A[:,1,:]
A[:,:,0] =A[:,:,J-1]
A[:,:,J] =A[:,:,1]
return(A)
def main():
rate,dE=hybrid_mc()
print(rate*100,'% acceptance')
if (PLOTS==1):
plotter(dE)
print('dE per step')
plotter(mag)
print('magnetism')
plotter(magsqr)
print('magnetism2')
plotter(Bcmt)
print('Bindercmt')
if (CORR==1):
corravgd=np.sum(Corr2pt,axis=0)/N_saves
fig = plt.figure()
plt.plot(distances,corravgd,distances,AnalyticCorrNoninteracting)
plt.show()
print('Phiphi corr')
return(np.sum(magsqr)/N_saves,np.std(magsqr))
def Analysis(phiconfig,ind):
#done at each saved configuration
magsqr[ind] = np.mean(np.dot(phiconfig,phiconfig))
mag[ind] = np.mean(phiconfig)
Bcmt[ind] = np.mean(phiconfig**4)/((np.mean(phiconfig**2))**2)
Energy[ind] = Hamiltonian(phiconfig)
if (CORR==1):
if (ind<N_saves-1):
Corr2pt[ind],null,null=corr2point(phiconfig)
else:
global distances
Corr2pt[ind],distances,null=corr2point(phiconfig)
#print(distances.shape,'distshape328')
return()
#globals
#==============================================================================
#========
#Lattice
#========
N=8
a = 1 #lattice spacing
d=3
xm=np.arange(0,N,dtype=int)+1
xi,yi,zi=np.meshgrid(xm,xm,xm,indexing='ij')
#======
#MC
#======
Tmax_MD = 1.0 #MD time
dT_MD = 0.05 #timestep in MD time
N_saves = 5000 #number of saved field configs
N_therm = 0 #initial thermalization see note in hybrid_mc
T_therm = 5 #therm between measurements
rejmax=20 #max consecutive rejections
#==========
#Physics
#=========
fBNC = bnc_periodic #the most important, shoutout to Joaquin
g=0 #coupling
kappa=.3 #hopping parameter
#observables
Energy = np.zeros((N_saves))
mag = np.zeros((N_saves))
magsqr = np.zeros((N_saves))
Bcmt = np.zeros((N_saves))
Cp = np.zeros((N_saves))
Corr2pt = np.zeros((N_saves),dtype='object')
global UNQD
UNQD=np.zeros((1))
#============
#organization
#=============
iii=0 #current 1d run for many main runs
jjj=0 #current 2d run for many main runs
RUNTYPE=0 #see below
VERBOSE=1 #whether or not to print individual Met-Hast steps 1 is on
PLOTS=1 #plot observables like magnetism or correlations
CORR=1
#==============================================================================
if RUNTYPE==0:
#single run
main()
if RUNTYPE==1:
nplot=8
plotvec=np.zeros((nplot,2))
varvec=np.linspace(0.15,.24,nplot)
#phase transition in coupling
for iii in range(0,nplot):
print(iii,varvec[iii])
kappa=varvec[iii]
plotvec[iii]=main()
fig=plt.figure()
#plt.plot(varvec,plotvec[0],'ob')
err=plotvec[:,1]
plt.errorbar(varvec[:],plotvec[:,0],yerr=err,fmt='o')
plt.title('Mean Magnitude N=12')
plt.show()
if RUNTYPE==2:
nplot=15
plotvec=np.zeros((nplot,2))
varvec=np.linspace(0.15,.24,nplot)
#phase transition in coupling
for iii in range(0,nplot):
print(iii,varvec[iii])
kappa=varvec[iii]
plotvec[iii]=main()
fig=plt.figure()
#plt.plot(varvec,plotvec[0],'ob')
err=plotvec[:,1]
plt.errorbar(varvec[:],plotvec[:,0],yerr=err,fmt='o')
plt.title('Mean Magnitude')
plt.axit('kappa')
plt.show()
if RUNTYPE==3:
#phase transition in two variables
nlam=5
nmam=5
lamvec=np.linspace(0.01,0.1,nlam)
mamvec=np.linspace(0.01,0.1,nmam)
lv,mv=np.meshgrid(lamvec,mamvec,indexing='ij')
plotmat=np.zeros((nlam,nmam))
for iii in range(0,nlam):
g=lamvec[iii]
for jjj in range(0,nmam):
k=mamvec[jjj]
plotmat[iii,jjj]=main()
fig=plt.figure()
ax=fig.gca(projection='3d')
ax.plot_surface(lv,mv,plotmat, cmap='bone')
ax.set_ylabel('lam1')
ax.set_xlabel('lam2')
ax.set_zlabel('amp')
plt.title('phi')
plt.show()
"""if RUNTYPE==3:
#parallel!
if __name__ == '__main__':
nplot=16
varvec=np.linspace(4,10,nplot)
plotvec=np.zeros((nplot))
agents = 4
chunksize = 4
with Pool(processes=agents) as pool:
plotvec = pool.map(main, m=varvec, chunksize)
plt.plot(varvec,plotvec,'ob')
plt.show()
"""
|
[
"noreply@github.com"
] |
aczejdo.noreply@github.com
|
3c1c6afaf6887cd65e37ce4c202b59569e42751e
|
056ef2e008849515aa81487614259280f7f179c1
|
/vrt/vfin/dain/my_package/FlowProjection/__init__.py
|
0c0fbad8ee44eb0e86a3d5e6792a7af425a059fe
|
[
"MIT"
] |
permissive
|
darktohka/OpenVideoEnhance
|
5361c2af9b091ba3a80e4fc3e3225acbf551e180
|
f31746b6b2ad510e18d6343304646bd8fb50a390
|
refs/heads/develop
| 2023-03-11T20:31:23.258900
| 2021-02-27T09:23:03
| 2021-02-27T09:23:03
| 344,285,821
| 1
| 0
|
MIT
| 2021-03-03T22:53:05
| 2021-03-03T22:53:04
| null |
UTF-8
|
Python
| false
| false
| 36
|
py
|
from .FlowProjectionModule import *
|
[
"iBobbyTS@gmail.com"
] |
iBobbyTS@gmail.com
|
506089e8625d3b65a3e62516f0a74ab2b42214b3
|
f8b252ca4c381db6457cf24f88acd52fdcda2f48
|
/sparrow/spitfire/compiler/ast.py
|
3c3f46910c89130ba16355699fb4bead9b7004c7
|
[
"BSD-3-Clause"
] |
permissive
|
msolo/msolo
|
694a294e87f9c992300256db693b856a52a09817
|
8e73575d332614fdb52525b318aba8bb03bf1be3
|
refs/heads/master
| 2022-12-15T15:00:42.447984
| 2020-09-12T22:52:57
| 2020-09-12T22:52:57
| 58,083,651
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,621
|
py
|
import copy
# this is a horrible hack to let the tree modify itself during conversion
class EatPrevious(object):
pass
class ASTNode(object):
def __init__(self, name=''):
self.name = name
self.value = None
self.parent = None
self.child_nodes = NodeList()
# optimization annotations
self.hint_map = {}
def __str__(self):
if self.value:
return '%s %s %r' % (self.__class__.__name__, self.name, self.value)
return '%s %s' % (self.__class__.__name__, self.name)
def __repr__(self):
return self.__str__()
def __eq__(self, node):
return bool(type(self) == type(node) and
self.name == node.name and
self.value == node.value and
self.child_nodes == node.child_nodes)
def __hash__(self):
return hash('%s%s%s%s' %
(type(self), self.name, self.value, hash(tuple(self.child_nodes))))
def getChildNodes(self):
return [n for n in self.child_nodes if isinstance(n, ASTNode)]
def append(self, node):
if isinstance(node, list):
self.extend(node)
else:
if type(node) is EatPrevious:
del self.child_nodes[-1]
else:
self.child_nodes.append(node)
def prepend(self, node):
if isinstance(node, list):
for n in reversed(node):
self.child_nodes.insert(0, n)
else:
self.child_nodes.insert(0, node)
# some classes override append() so just call down to that for now
def extend(self, node_list):
for n in node_list:
self.append(n)
def insert_before(self, marker_node, insert_node):
idx = self.child_nodes.index(marker_node)
self.child_nodes.insert(idx, insert_node)
def replace(self, marker_node, insert_node_list):
print "replace", type(self)
idx = self.child_nodes.index(marker_node)
try:
for n in reversed(insert_node_list):
self.child_nodes.insert(idx, n)
except TypeError:
self.child_nodes.insert(idx, insert_node_list)
self.child_nodes.remove(marker_node)
def copy(self, copy_children=True):
node = copy.deepcopy(self)
if not copy_children:
node.child_nodes = NodeList()
return node
class NodeList(list):
def append(self, node):
if isinstance(node, list):
self.extend(node)
else:
list.append(self, node)
class _ListNode(ASTNode):
def __init__(self, parg_list=None, karg_list=None):
ASTNode.__init__(self)
if parg_list:
self.extend(parg_list)
if karg_list:
self.extend(karg_list)
def __iter__(self):
return iter(self.child_nodes)
def __str__(self):
return '%s %s' % (ASTNode.__str__(self),
', '.join(str(n) for n in self.child_nodes))
class ArgListNode(_ListNode):
pass
class BinOpNode(ASTNode):
def __init__(self, operator, left, right):
ASTNode.__init__(self)
self.operator = operator
self.left = left
self.right = right
def replace(self, node, replacement_node):
if self.left is node:
self.left = replacement_node
elif self.right is node:
self.right = replacement_node
else:
raise Exception("neither left nor right expression matches target")
def __str__(self):
return '%s (%s %s %s)' % (
self.__class__.__name__, self.left, self.operator, self.right)
class BinOpExpressionNode(BinOpNode):
pass
class AssignNode(BinOpNode):
def __init__(self, left, right):
BinOpNode.__init__(self, '=', left, right)
class BreakNode(ASTNode):
pass
class CallFunctionNode(ASTNode):
def __init__(self, expression=None, arg_list=None):
ASTNode.__init__(self)
self.expression = expression
if arg_list:
self.arg_list = arg_list
else:
self.arg_list = ArgListNode()
def replace(self, node, replacement_node):
if self.expression is node:
self.expression = replacement_node
else:
raise Exception("expression doesn't mactch replacement")
def __str__(self):
return '%s expr:%s arg_list:%s' % (
self.__class__.__name__, self.expression, self.arg_list)
class CommentNode(ASTNode):
pass
class ContinueNode(ASTNode):
pass
class DefNode(ASTNode):
def __init__(self, *pargs, **kargs):
ASTNode.__init__(self, *pargs, **kargs)
self.parameter_list = ParameterListNode()
def __str__(self):
return '%s name:%s parameter_list:%s' % (
self.__class__.__name__, self.name, self.parameter_list)
class BlockNode(DefNode):
pass
class ExpressionListNode(_ListNode):
pass
class ForNode(ASTNode):
def __init__(self, target_list=None, expression_list=None):
ASTNode.__init__(self)
if target_list:
self.target_list = target_list
else:
self.target_list = TargetListNode()
if expression_list:
self.expression_list = expression_list
else:
self.expression_list = ExpressionListNode()
def __str__(self):
return ('%s target_list:%s expr_list:%s' %
(self.__class__.__name__, self.target_list, self.expression_list))
# fixme: why is this necessary?
class FunctionInitNode(ASTNode):
pass
class FunctionNode(ASTNode):
def __init__(self, *pargs, **kargs):
ASTNode.__init__(self, *pargs, **kargs)
new_buffer = CallFunctionNode(
GetAttrNode(IdentifierNode('self'), 'new_buffer'))
self.child_nodes = [
AssignNode(
AssignIdentifierNode('buffer'),
new_buffer),
ReturnNode(
CallFunctionNode(GetAttrNode(IdentifierNode('buffer'), 'getvalue'))),
]
self.parameter_list = ParameterListNode()
def append(self, node):
self.child_nodes.insert(-1, node)
def __str__(self):
return '%s parameter_list:%r' % (
self.__class__.__name__, self.parameter_list)
class GetAttrNode(ASTNode):
def __init__(self, expression, name):
ASTNode.__init__(self)
self.expression = expression
self.name = name
def __eq__(self, node):
return bool(type(self) == type(node) and
self.name == node.name and
self.expression == node.expression)
def __hash__(self):
return hash('%s%s%s' %
(type(self), self.name, self.expression))
def __str__(self):
return '%s expr:%s . name:%s' % (
self.__class__.__name__, self.expression, self.name)
def getChildNodes(self):
child_nodes = self.expression.getChildNodes()
child_nodes.append(self.expression)
if isinstance(self.name, ASTNode):
child_nodes.append(self.name)
return child_nodes
def replace(self, node, replacement_node):
if self.expression is node:
self.expression = replacement_node
else:
raise Exception("expression doesn't mactch replacement")
class GetUDNNode(GetAttrNode):
pass
class IdentifierNode(ASTNode):
# all subclasses of IdentifierNode should be treated as equivalent
def __eq__(self, node):
return bool(isinstance(node, IdentifierNode) and
self.name == node.name)
def __hash__(self):
return hash(self.name)
class AssignIdentifierNode(IdentifierNode):
pass
class IfNode(ASTNode):
def __init__(self, test_expression=None):
ASTNode.__init__(self)
self.test_expression = test_expression
self.else_ = NodeList()
def replace(self, node, replacement_node):
if self.test_expression is node:
self.test_expression = replacement_node
else:
ASTNode.replace(self, node, replacement_node)
def __str__(self):
return '%s test_expr:%s\nelse:\n %s' % (
self.__class__.__name__, self.test_expression, self.else_)
class ImplementsNode(ASTNode):
pass
class ImportNode(ASTNode):
def __init__(self, module_name_list):
ASTNode.__init__(self)
self.module_name_list = module_name_list
def __str__(self):
return ('%s module_name_list:%r' %
(self.__class__.__name__, self.module_name_list))
# alpha break
class ExtendsNode(ImportNode):
pass
class FromNode(ImportNode):
def __init__(self, module_name_list, identifier):
ImportNode.__init__(self, module_name_list)
self.identifier = identifier
def __str__(self):
return ('%s module_name_list:%r identifier:%s' %
(self.__class__.__name__, self.module_name_list,
self.identifier))
class ListLiteralNode(ASTNode):
def __str__(self):
return '%s nodes:%r' % (self.__class__.__name__, self.child_nodes)
class LiteralNode(ASTNode):
def __init__(self, value):
ASTNode.__init__(self)
self.value = value
def __str__(self):
return '%s value:%r' % (self.__class__.__name__, self.value)
class ParameterNode(ASTNode):
def __init__(self, name, default=None):
ASTNode.__init__(self, name)
self.default = default
def __str__(self):
return '%s %s' % (ASTNode.__str__(self), self.default)
class AttributeNode(ParameterNode):
pass
class ParameterListNode(_ListNode):
pass
class PlaceholderNode(ASTNode):
pass
class PlaceholderSubstitutionNode(ASTNode):
def __init__(self, expression):
ASTNode.__init__(self)
self.expression = expression
def __str__(self):
return '%s expr:%r' % (self.__class__.__name__, self.expression)
class ReturnNode(ASTNode):
def __init__(self, expression):
ASTNode.__init__(self)
self.expression = expression
def __str__(self):
return '%s expr:%r' % (self.__class__.__name__, self.expression)
class SliceNode(ASTNode):
def __init__(self, expression, slice_expression):
ASTNode.__init__(self)
self.expression = expression
self.slice_expression = slice_expression
def __str__(self):
return ('%s expr:%s [ %s ]' %
(self.__class__.__name__, self.expression, self.slice_expression))
class TargetNode(IdentifierNode):
pass
class TargetListNode(_ListNode):
pass
class TextNode(ASTNode):
def __init__(self, value):
ASTNode.__init__(self)
self.value = value
def append_text_node(self, node):
if not isinstance(node, TextNode):
raise Exception('node type mismatch')
self.value += node.value
class NewlineNode(TextNode):
pass
class WhitespaceNode(TextNode):
def make_optional(self):
return OptionalWhitespaceNode(self.value)
class OptionalWhitespaceNode(TextNode):
pass
class TemplateNode(ASTNode):
library = False
def __init__(self, classname=None, **kargs):
ASTNode.__init__(self, **kargs)
# fixme: need to get the classname from somewhere else
self.classname = classname
self.main_function = FunctionNode(name='main')
self.main_function.parameter_list = ParameterListNode()
self.main_function.parameter_list.append(ParameterNode(name='self'))
self.encoding = 'utf-8'
self.extends_nodes = NodeList()
self.import_nodes = NodeList()
self.from_nodes = NodeList()
self.attr_nodes = NodeList()
def __str__(self):
return '%s\nimport:%s\nfrom:%s\nextends:%s\nmain:%s' % (
self.__class__.__name__,
self.import_nodes,
self.from_nodes,
self.extends_nodes,
self.main_function)
class TupleLiteralNode(ASTNode):
pass
class UnaryOpNode(ASTNode):
def __init__(self, operator, expression):
ASTNode.__init__(self)
self.operator = operator
self.expression = expression
# this is sort of a hack to support optional white space nodes inside the
# parse tree. the reality is that this probably requires a more complex
# parser, but we can get away with examining the node stake to fake it for now.
def make_optional(node_list):
try:
if type(node_list[-1]) == WhitespaceNode:
if len(node_list) == 1 or type(node_list[-2]) == NewlineNode:
node_list[-1] = OptionalWhitespaceNode(node_list[-1].value)
except IndexError:
pass
|
[
"msolo@bf66012e-f438-0410-a2e0-91e2d5152d13"
] |
msolo@bf66012e-f438-0410-a2e0-91e2d5152d13
|
a3650f851e9c6b8820406b1d16728a7e21173a82
|
1b2d4375184338d1d6ff9b485ed1467a87c99631
|
/pic_matching/utils.py
|
48c4959f8d3ed2b9068acdcd366df188f43c4cb0
|
[] |
no_license
|
yimingliu123/siftF
|
db7efab36658865711b7c7373eb4348eec5b665a
|
5d230c3086b59708e65edcac1adc58bd9f7791f1
|
refs/heads/master
| 2021-09-24T00:15:00.964565
| 2018-09-30T03:08:46
| 2018-09-30T03:08:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 863
|
py
|
# -*- coding: gb18030 -*-
import pickle as pkl
import cv2
#import sift_controller
def parse_pkl(dic):
return dic["id"], dic["des"]
def pickleloader(pklfile):
try:
while True:
yield pkl.load(pklfile)
except EOFError:
pass
def parse_glob(path):
return path.split("/")[2]
def get_top_k_result(match_list=None, k=10):
result = (sorted(match_list, key=lambda l: l[1], reverse=True))
return result[:k]
'''
def prefetching(query_path):
pkl_file = open("siftdump.pkl", "rb")
sift = sift_controller.SIFT()
query_img = cv2.imread(query_path, 0)
query_des = sift.extract(query_img)
input_list = []
for idx, contents in enumerate(pickleloader(pkl_file)):
id, indexed_des = parse_pkl(contents)
if (indexed_des.all()) == None:
continue
input_list.append([query_des, id, indexed_des])
del sift
pkl_file.close()
return input_list
'''
|
[
"noreply@github.com"
] |
yimingliu123.noreply@github.com
|
9e3f029b9795f6e18cd25c1004ef8c9cee136368
|
4e4a99d420b3ae13dd35bada87a8e851c903a737
|
/web/resume/urls.py
|
c00f290c87a9c8de270a0168b3e14ec24199ee62
|
[] |
no_license
|
wellstseng/XStocker
|
5870c2423f82814f14990103a347e1850299ab13
|
e29e37709ae30229b3ffdd39a934e58311d81198
|
refs/heads/master
| 2020-03-26T09:02:30.606669
| 2019-02-19T15:06:23
| 2019-02-19T15:06:23
| 144,732,658
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 239
|
py
|
from django.urls import path
from . import views
from django.conf.urls import url
from django.views.generic import TemplateView
app_name = "resume"
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='resume/index.html')),
]
|
[
"wells.tseng@gmail.com"
] |
wells.tseng@gmail.com
|
835cbfc16a8832c429cb99044691572b2bf9f192
|
10e278e496e2605de987e3c30dfd46eda231bcdd
|
/tests/models.py
|
d96084e79becdbe37567393834ebce9a4ab3bf4f
|
[
"MIT"
] |
permissive
|
tuffnatty/drf-proxy-pagination
|
211d82a17dfcea816626596c7f3d9f3e423ca6af
|
564bc9cf961cf6888570d222bcaa11ce00b963ac
|
refs/heads/master
| 2023-03-19T02:15:08.543795
| 2022-12-26T21:00:51
| 2022-12-26T21:00:51
| 71,636,006
| 11
| 4
|
MIT
| 2023-07-22T04:30:16
| 2016-10-22T11:53:54
|
Python
|
UTF-8
|
Python
| false
| false
| 176
|
py
|
from django.db import models
class TestModel(models.Model):
n = models.IntegerField("An integer")
created = models.DateTimeField("ordering field", auto_now_add=True)
|
[
"phil.krylov@gmail.com"
] |
phil.krylov@gmail.com
|
f745cd32977f7f828231017c6ec843de458466f5
|
3dd4de13396eb1f13cb94765cad076db8ece8a87
|
/api/apps/expenses/serializers/installment.py
|
af15ed245f1b199e3ea4c2989b883f98ae6616ee
|
[] |
no_license
|
allenjoseph/PettyCash
|
cdfc9592f09ccdeeca2803def8fab783d37aa772
|
a8dbc772d0b9d90e7eb164f084ecc53a833e91cd
|
refs/heads/master
| 2021-01-10T14:30:29.591242
| 2016-09-17T22:16:56
| 2016-09-17T22:16:56
| 45,518,385
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 321
|
py
|
from rest_framework import serializers
from ..models.installment import Installment
class InstallmentSerializer(serializers.ModelSerializer):
class Meta:
model = Installment
fields = (
'id',
'expense',
'month',
'amount',
'rate',
)
|
[
"allen.joseph.v.a@gmail.com"
] |
allen.joseph.v.a@gmail.com
|
1b9b454c0c8fe6a60c79d537dc40b7ce8ccbb791
|
8c51aff248eb6f463d62e934213660437c3a107b
|
/Lecture_note_ML/1_basic/numpy/numpy_03.py
|
091cb725597ad84c341c412482914cd55667fa94
|
[] |
no_license
|
wonjun0901/WJ_Develop_Individually
|
5f839932c189adf2b2b34f7dadbdeaa8744f8d0e
|
e0402f5dbdda8ae8292cace124d381e29f707183
|
refs/heads/master
| 2021-01-02T00:13:38.851832
| 2020-02-18T01:10:15
| 2020-02-18T01:10:15
| 239,406,395
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,922
|
py
|
# -*- coding: utf-8 -*-
import numpy as np
# 파이썬 리스트 생성
# - range 함수를 응용하여 간편하게 생성할 수 있음
# - 1 ~ 10 까지의 정수를 갖는 리스트 생성
python_list = list(range(1, 11))
# numpy 배열 생성
numpy_array_1 = np.array(python_list)
print(f'numpy_array_1.shape : {numpy_array_1.shape}')
print(f'numpy_array_1 : \n{numpy_array_1}')
# 배열의 형태를 수정할 수 있는 reshape() 메소드
# numpy배열변수.reshape(변경할 배열의 shape)
numpy_array_2 = numpy_array_1.reshape(2, 5)
print(f'numpy_array_2.shape : {numpy_array_2.shape}')
print(f'numpy_array_2 : \n{numpy_array_2}')
# reshape() 메소드 사용시 주의사항
# - 원본 배열의 요소의 수는
# 변경할 형태의 shape의 요소 개수를
# 만족해야합니다.
numpy_array_3 = numpy_array_1.reshape(3, 5)
print(f'numpy_array_3.shape : {numpy_array_3.shape}')
print(f'numpy_array_3 : \n{numpy_array_3}')
# -1 매개변수가 사용되는 경우 나머지 매개변수 값에의해서
# 자동으로 계산된 값이 적용됩니다.
# (아래의 예의 경우 행의 수를 2로 고정하여 열의 수는
# 5로 처리됩니다.)
numpy_array_4 = numpy_array_1.reshape(2, -1)
print(f'numpy_array_4.shape : {numpy_array_4.shape}')
print(f'numpy_array_4 : \n{numpy_array_4}')
# (아래의 예의 경우 열의 수를 2로 고정하여 행의 수는
# 5로 처리됩니다.)
numpy_array_5 = numpy_array_1.reshape(-1, 2)
print(f'numpy_array_5.shape : {numpy_array_5.shape}')
print(f'numpy_array_5 : \n{numpy_array_5}')
# 다차원 배열의 형태를 1차원 배열로 변경
numpy_array_6 = numpy_array_5.reshape(10)
print(f'numpy_array_6.shape : {numpy_array_6.shape}')
print(f'numpy_array_6 : \n{numpy_array_6}')
numpy_array_7 = numpy_array_5.reshape(-1)
print(f'numpy_array_7.shape : {numpy_array_7.shape}')
print(f'numpy_array_7 : \n{numpy_array_7}')
|
[
"wonjun0901@gmail.com"
] |
wonjun0901@gmail.com
|
f73b24eaa3d1041274abb4dc7cf9292cea797e9d
|
e78c11d91d9d9308057b717055b9a31e2992d00c
|
/users/migrations/0001_initial.py
|
ee0cb49f52729d5674576ac7ce514b08eb25f2b3
|
[] |
no_license
|
jsobb/AJAX
|
b5658d0bd1a8e2ddb685a71301137c3446ab8e75
|
502cc298253de2ff7e568fd85186ab5f741d1a2a
|
refs/heads/main
| 2023-07-16T20:51:51.471212
| 2021-08-17T13:34:10
| 2021-08-17T13:34:10
| 397,269,663
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 685
|
py
|
# Generated by Django 3.2.3 on 2021-06-02 19:39
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"dlaehdus99@naver.com"
] |
dlaehdus99@naver.com
|
c95f81ce161d4a64c796c90eb6a808748f4bdcda
|
88f7951c82ca2b49b9edab26ee50be3eb3fca5b5
|
/handlers/handlers/pattern.py
|
c18d448498ffb930a204f549cef680923ded1b98
|
[
"BSD-3-Clause"
] |
permissive
|
rapidsms/rapidsms-contrib-apps-dev
|
64ad9758edabf1956dbb870f760c4752c2076c14
|
9f3f2bf1e01d76abc808d79e963fe0cdf7d2d898
|
refs/heads/master
| 2021-06-01T16:00:20.200879
| 2010-08-27T19:41:01
| 2010-08-27T19:41:01
| 547,248
| 0
| 2
|
NOASSERTION
| 2020-09-30T20:28:27
| 2010-03-04T19:32:33
|
Python
|
UTF-8
|
Python
| false
| false
| 1,957
|
py
|
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
import re
from .base import BaseHandler
class PatternHandler(BaseHandler):
"""
This handler type can be subclassed to create simple pattern-based
handlers. This isn't usually a good idea -- it's cumbersome to write
patterns with enough flexibility to be used in the real world -- but
it's very handy for prototyping, and can easily be upgraded later.
When a message is received, it is matched against the mandatory
``pattern`` attribute (a regular expression). If the pattern is
matched, the ``handle`` method is called with the captures as
arguments. For example::
>>> class SumHandler(PatternHandler):
... pattern = r'^(\d+) plus (\d+)$'
...
... def handle(self, a, b):
... a, b = int(a), int(b)
... total = a + b
...
... self.respond(
... "%d+%d = %d" %
... (a, b, total))
>>> SumHandler.test("1 plus 2")
['1+2 = 3']
Note that the pattern is not mangled for flexibility (as it was in
previous versions of RapidSMS), so if you choose to deploy pattern
handlers, your incoming messages must match *precisely*. Perhaps
obviously, this won't work because of the trailing whitespace::
>>> SumHandler.test("1 plus 2 ")
False
All non-matching messages are silently ignored (as usual), to allow
other apps or handlers to catch them.
"""
@classmethod
def _pattern(cls):
if hasattr(cls, "pattern"):
return re.compile(cls.pattern, re.IGNORECASE)
@classmethod
def dispatch(cls, router, msg):
pattern = cls._pattern()
if pattern is None:
return False
match = pattern.match(msg.text)
if match is None:
return False
cls(router, msg).handle(*match.groups())
return True
|
[
"adam.mckaig@gmail.com"
] |
adam.mckaig@gmail.com
|
c42ab779bf9b7e6f09e90920b6c9a45605706ec6
|
b6759626cd51b4ad3d61ed05f3aeb03b80bc0157
|
/config.py
|
cb986ee99cf048f76dc9e2f363cefb3ebd545681
|
[] |
no_license
|
Demi-Ad/db_archive
|
c3bdd09d4462fbf26ffedfedbba331e7a57d9119
|
a13fde42597c92a5b08b9db07a7fd224375a224f
|
refs/heads/main
| 2023-03-24T09:45:35.091090
| 2021-03-21T07:25:02
| 2021-03-21T07:25:02
| 349,341,909
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 59
|
py
|
from typing import Final
db_path: Final = 'DB\\DB.sqlite'
|
[
"votm777@naver.com"
] |
votm777@naver.com
|
11930c8b40e8ee781b7e28498f363266fc6d29f9
|
82b946da326148a3c1c1f687f96c0da165bb2c15
|
/sdk/python/pulumi_azure_native/eventgrid/v20200601/get_event_subscription_full_url.py
|
82af64797d876ce0e32691c6741c44024652c874
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
morrell/pulumi-azure-native
|
3916e978382366607f3df0a669f24cb16293ff5e
|
cd3ba4b9cb08c5e1df7674c1c71695b80e443f08
|
refs/heads/master
| 2023-06-20T19:37:05.414924
| 2021-07-19T20:57:53
| 2021-07-19T20:57:53
| 387,815,163
| 0
| 0
|
Apache-2.0
| 2021-07-20T14:18:29
| 2021-07-20T14:18:28
| null |
UTF-8
|
Python
| false
| false
| 2,984
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetEventSubscriptionFullUrlResult',
'AwaitableGetEventSubscriptionFullUrlResult',
'get_event_subscription_full_url',
]
@pulumi.output_type
class GetEventSubscriptionFullUrlResult:
"""
Full endpoint url of an event subscription
"""
def __init__(__self__, endpoint_url=None):
if endpoint_url and not isinstance(endpoint_url, str):
raise TypeError("Expected argument 'endpoint_url' to be a str")
pulumi.set(__self__, "endpoint_url", endpoint_url)
@property
@pulumi.getter(name="endpointUrl")
def endpoint_url(self) -> Optional[str]:
"""
The URL that represents the endpoint of the destination of an event subscription.
"""
return pulumi.get(self, "endpoint_url")
class AwaitableGetEventSubscriptionFullUrlResult(GetEventSubscriptionFullUrlResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetEventSubscriptionFullUrlResult(
endpoint_url=self.endpoint_url)
def get_event_subscription_full_url(event_subscription_name: Optional[str] = None,
scope: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetEventSubscriptionFullUrlResult:
"""
Full endpoint url of an event subscription
:param str event_subscription_name: Name of the event subscription.
:param str scope: The scope of the event subscription. The scope can be a subscription, or a resource group, or a top level resource belonging to a resource provider namespace, or an EventGrid topic. For example, use '/subscriptions/{subscriptionId}/' for a subscription, '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}' for a resource group, and '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}' for a resource, and '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/topics/{topicName}' for an EventGrid topic.
"""
__args__ = dict()
__args__['eventSubscriptionName'] = event_subscription_name
__args__['scope'] = scope
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:eventgrid/v20200601:getEventSubscriptionFullUrl', __args__, opts=opts, typ=GetEventSubscriptionFullUrlResult).value
return AwaitableGetEventSubscriptionFullUrlResult(
endpoint_url=__ret__.endpoint_url)
|
[
"noreply@github.com"
] |
morrell.noreply@github.com
|
a514019918db3ce54edd21489fdead2cf18dd6fd
|
b7b3244fc7e3a0970b6c75baf1aa21db732abccd
|
/venv/Scripts/easy_uninstall-script.py
|
6be2ab08f70cc72664720c611946f6b7c5d97e82
|
[] |
no_license
|
AjayTomar3342/Movie_Recommendation_System_Python
|
ce5f973be86cf8b4072fd8153845b3d9161e075c
|
b53e8cbe1e0d693d3e12901e49d077232c1bdcfd
|
refs/heads/master
| 2023-07-15T17:10:37.012388
| 2021-08-27T14:59:23
| 2021-08-27T14:59:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,036
|
py
|
#!c:\users\ajay\pycharmprojects\collaborative_based_recommendation_system\venv\scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pi==0.1.2','console_scripts','easy_uninstall'
import re
import sys
# for compatibility with easy_install; see #2198
__requires__ = 'pi==0.1.2'
try:
from importlib.metadata import distribution
except ImportError:
try:
from importlib_metadata import distribution
except ImportError:
from pkg_resources import load_entry_point
def importlib_load_entry_point(spec, group, name):
dist_name, _, _ = spec.partition('==')
matches = (
entry_point
for entry_point in distribution(dist_name).entry_points
if entry_point.group == group and entry_point.name == name
)
return next(matches).load()
globals().setdefault('load_entry_point', importlib_load_entry_point)
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(load_entry_point('pi==0.1.2', 'console_scripts', 'easy_uninstall')())
|
[
"ajaytomar6639@gmail.com"
] |
ajaytomar6639@gmail.com
|
d3246a5dfed956b098a2d681102dd4c853bc08b8
|
d5c4c35a18fa1470a596c60a27dff7177f78ebd3
|
/mysite/settings.py
|
f75c501a756955ecc651d129bc4f9cf9c83da9ca
|
[] |
no_license
|
thebestjsyu/my-first-blog
|
90bdbe3a77eaca84661605abba7a086d95493bec
|
b219580029efb9fcd0a7d07435b185e7d4603bbf
|
refs/heads/master
| 2020-04-26T12:09:14.876596
| 2019-03-03T12:40:36
| 2019-03-03T12:40:36
| 173,515,782
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,214
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.0.13.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'jb!#6%5wu7^4h1gj6^ygi3_jly(8xu5xgnm=2tu@ncvu3*r6(d'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
#ALLOWED_HOSTS = []
ALLOWED_HOSTS = ['127.0.0.1', '.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Australia/Perth'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
|
[
"jisooyu603@gmail.com"
] |
jisooyu603@gmail.com
|
1209b2c0e68114b4e7c69e3f4e46b299ef1294d0
|
b311517a63eaf846c75fa590a2aee079788cc524
|
/test.py
|
a1bce0b30411f785087704b7c21ed677d837186f
|
[] |
no_license
|
Super-JK/projet-bd1
|
6f63147618656a84de335da6f9ff36484e99ecaa
|
d74f9da01586a667d098be45f4e5ed3d1ee9f1bf
|
refs/heads/master
| 2023-02-02T00:20:40.407469
| 2020-12-17T16:30:26
| 2020-12-17T16:30:26
| 320,002,132
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 450
|
py
|
from engine import *
def main():
select = Selection(Leq('sal', Const(1000)), 'emp')
proj = Projection(['ename', 'empno', 'sal', 'job'], select)
join = Join('stocks', proj)
rename = Rename('sal', 'lol', proj)
union = Union(join, 'stocks')
diff = Difference(union, 'stocks')
expr = rename
req = Request('TestTAbles.db', expr)
print(req.translate())
req.print_result()
if __name__ == '__main__':
main()
|
[
"vandechat96@gmail.com"
] |
vandechat96@gmail.com
|
c3f7fd09d46f1a540ef20d86f014b2ab85142efd
|
dd75f0a8f6305e528b26de29fa71ca5d6a8ee7e8
|
/aiida_structure/data/test_kinds.py
|
0afaf40ada611e40564a02d738519301b77b74ce
|
[
"MIT"
] |
permissive
|
chrisjsewell/aiida-structure
|
daf495060ed2304896b2b350300eca3035f221c2
|
2266736bbb729f7a87381c38379c7cf068ccca9c
|
refs/heads/master
| 2020-05-01T13:13:58.199635
| 2019-03-25T03:05:59
| 2019-03-25T03:05:59
| 177,486,008
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 490
|
py
|
def test_basic(new_database):
from aiida_structure.data.kinds import KindData
node = KindData()
data = {
"kind_names": ["A", "B", "C"],
"field1": [1, 2, 3]
}
node.set_data(data)
assert node.data == data
assert node.kind_dict == {
"A": {"field1": 1},
"B": {"field1": 2},
"C": {"field1": 3}
}
assert node.field_dict == {
"field1": {
"A": 1,
"B": 2,
"C": 3
}
}
|
[
"chrisj_sewell@hotmail.com"
] |
chrisj_sewell@hotmail.com
|
46de272c750a3d407d5c65911f3d378e608d1409
|
e328200261a62b00385785a42d30b8671a9c0cfb
|
/dsproman/database_test.py
|
f69fb14e7aef9ab2ac0ba1310e56e95e712836cb
|
[] |
no_license
|
gonzaponte/dsproman
|
2e6f26684802e206c3ad9bca665c14e0ed8deb1c
|
b8070f7c9c14bc6f144f749951a6a34313d8f654
|
refs/heads/master
| 2021-07-20T23:06:20.775042
| 2021-04-11T10:37:05
| 2021-04-11T13:21:43
| 246,654,580
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 764
|
py
|
from pytest import mark
from . database import crystal_types
from . database import signals
from . database import excitations
from . database import emissions
def test_signals_are_iteratable():
for crystal in signals.values():
assert len(crystal) >= 1
for emissions in crystal:
assert len(emissions) == 2
@mark.parametrize("data", (excitations, emissions))
def test_excitations_emissions_are_iteratable(data):
for crystal in data.values():
assert len(crystal) >= 1
for emission in crystal:
assert isinstance(emission, int)
@mark.parametrize("data", (signals, excitations, emissions))
def test_data_contains_all_crystals(data):
for crystal in crystal_types:
assert crystal in data
|
[
"gonzaponte@gmail.com"
] |
gonzaponte@gmail.com
|
714d3799dec3aa79cdfa0663df21a1afaf726d38
|
1f7f3df7cb28e371d4e863d27f2342951c0e1239
|
/differentiation.py
|
b80b90dd2325089cd6d6d0f6838e2dfd0bb5a653
|
[] |
no_license
|
mohammnas/Numerical_Analysis
|
e4b34f3256efe30cde2e75930e7379f5e890bbcc
|
12c62da0610aaa2501ffa075fc1436abbd7488a6
|
refs/heads/master
| 2021-03-20T03:46:52.717401
| 2020-03-14T17:16:17
| 2020-03-14T17:16:17
| 247,172,885
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 519
|
py
|
import numpy as np
h = [1,0.5,0.25,0.125,0.0625,0.03125]
def f(x):
return np.log(x)
def fp(f,x,h):
z = f(x+(2*h))-f(x-h)
return z/(3*h)
print(h)
for i in h:
print(fp(f,2,i))
print('\n')
print('error')
for i in h:
e = 1/2 - fp(f,2,i)
print(e)
print('\n')
print('e/h')
for i in h:
e = 1/2 - fp(f,2,i)
print(e/i)
print('\n')
print('e/h^2')
for i in h:
e = 1/2 - fp(f,2,i)
print(e/(i**2))
print('\n')
for i in h:
e = 1/2 - fp(f,2,i)
print(e/(i**3))
|
[
"nassermohammed@nassers-mbp.home"
] |
nassermohammed@nassers-mbp.home
|
fc66e4be4d638f48574cc7916b5b2cb7f1eb4e8e
|
9a671c2427e125cc933c88a910447e3897e49ded
|
/Assignment1/neural_test.py
|
5d7acc2c59724d73f4366760bcad0b02500adc41
|
[] |
no_license
|
leopardary/CS7641
|
69dd53f6d25493851ea89e90f5f53a1b11a2bfb9
|
245a9fc07839fdd12d318f0890c6b503b221719c
|
refs/heads/master
| 2021-01-15T18:26:35.156514
| 2016-03-12T08:46:04
| 2016-03-12T08:46:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,614
|
py
|
import io
import pydotplus
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import StratifiedKFold
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MinMaxScaler, StandardScaler, OneHotEncoder, Imputer
#from sklearn.metrics import accuracy_score
from plot_curves import *
class rb_neural_test:
def __init__(self, x_train, x_test, y_train, y_test, x_col_names, data_label, cv):
self.x_train = x_train
self.x_test = x_test
self.y_train = y_train
self.y_test = y_test
self.x_col_names = x_col_names
self.data_label = data_label
self.cv = cv
def run_cv_model(self, alpha=0.0001, batch_size=200, learning_rate_init=0.001, power_t=0.5, max_iter=200, momentum=0.9, beta_1=0.9, beta_2=0.999, hidden_layer_sizes=(100,), do_plot=True):
# use k-fold cross validation
# we need to standardize the data for the KNN learner
pipe_clf = Pipeline([ ('scl', StandardScaler() ),
('clf', MLPClassifier(alpha=alpha,
batch_size=batch_size,
learning_rate_init=learning_rate_init,
power_t=power_t,
max_iter=max_iter,
momentum=momentum,
beta_1=beta_1,
beta_2=beta_2,
hidden_layer_sizes=hidden_layer_sizes))])
# resample the test data without replacement. This means that each data point is part of a test a
# training set only once. (paraphrased from Raschka p.176). In Stratified KFold, the features are
# evenly disributed such that each test and training set is an accurate representation of the whole
# this is the 0.17 version
#kfold = StratifiedKFold(y=self.y_train, n_folds=self.cv, random_state=0)
# this is the 0.18dev version
skf = StratifiedKFold(n_folds=self.cv, random_state=0)
# do the cross validation
train_scores = []
test_scores = []
#for k, (train, test) in enumerate(kfold):
for k, (train, test) in enumerate(skf.split(X=self.x_train, y=self.y_train)):
# run the learning algorithm
pipe_clf.fit(self.x_train[train], self.y_train[train])
train_score = pipe_clf.score(self.x_train[test], self.y_train[test])
train_scores.append(train_score)
test_score = pipe_clf.score(self.x_test, self.y_test)
test_scores.append(test_score)
print('Fold:', k+1, ', Training score:', train_score, ', Test score:', test_score)
train_score = np.mean(train_scores)
print('Training score is', train_score)
test_score = np.mean(test_scores)
print('Test score is', test_score)
if do_plot:
self.__plot_learning_curve(pipe_clf)
return train_score, test_score
def run_model(self, alpha=0.0001, batch_size=200, learning_rate_init=0.001, power_t=0.5, max_iter=200, momentum=0.9, beta_1=0.9, beta_2=0.999, hidden_layer_sizes=(100,), do_plot=True):
# we need to standardize the data for the learner
pipe_clf = Pipeline([ ('scl', StandardScaler() ),
('clf', MLPClassifier(alpha=alpha,
batch_size=batch_size,
learning_rate_init=learning_rate_init,
power_t=power_t,
max_iter=max_iter,
momentum=momentum,
beta_1=beta_1,
beta_2=beta_2,
hidden_layer_sizes=hidden_layer_sizes))])
# test it: this should match the non-pipelined call
pipe_clf.fit(self.x_train, self.y_train)
# check model accuracy
train_score = pipe_clf.score(self.x_train, self.y_train)
print('Training score is', train_score)
test_score = pipe_clf.score(self.x_test, self.y_test)
print('Test score is', test_score)
if do_plot:
self.__plot_learning_curve(pipe_clf)
return train_score, test_score
def __plot_learning_curve(self, estimator):
plc = rb_plot_curves()
plc.plot_learning_curve(estimator, self.x_train, self.y_train, self.cv, self.data_label)
def plot_validation_curve(self, alpha=0.0001, batch_size=200, learning_rate_init=0.001, power_t=0.5, max_iter=200, momentum=0.9, beta_1=0.9, beta_2=0.999, hidden_layer_sizes=(100,)):
estimator = Pipeline([ ('scl', StandardScaler() ),
('clf', MLPClassifier(alpha=alpha,
batch_size=batch_size,
learning_rate_init=learning_rate_init,
power_t=power_t,
max_iter=max_iter,
momentum=momentum,
beta_1=beta_1,
beta_2=beta_2,
hidden_layer_sizes=hidden_layer_sizes))])
param_names = ['clf__batch_size', 'clf__learning_rate_init', 'clf__power_t', 'clf__max_iter']
param_ranges = [np.arange(50,500,10), np.arange(0.001,0.1,0.01), np.arange(0.01,0.1,0.01), np.arange(50, 1000, 10)]
data_label = self.data_label
plc = rb_plot_curves()
for i in range(len(param_names)):
param_name = param_names[i]
param_range = param_ranges[i]
plc.plot_validation_curve(estimator, self.x_train, self.y_train,
self.cv, data_label,
param_range, param_name, n_jobs=-1)
|
[
"="
] |
=
|
469775e2bd8d58aceac9dbb2ec9b8ca2bc005667
|
dbfcb91c055db22d22967eab36a5b48f6ff4c5fc
|
/day11/puzzle22.py
|
6b52b80dcd24dca2e3052d090d15d80fd8b39ad0
|
[] |
no_license
|
rhoogduin/AoC
|
e8daf057553e6afb6a4b9c1d76474a1d27dfeb5b
|
abb7019023da40829e01f0f49c72fcbcf85b1b1f
|
refs/heads/master
| 2023-02-01T15:26:57.589394
| 2020-12-18T20:06:54
| 2020-12-18T20:06:54
| 317,666,193
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,204
|
py
|
#enum
FLOOR = -1
EMPTY = 0
FILLED = 1
#read input
file = open("input.txt", "r")
lines = []
lines = file.read().splitlines()
file.close()
# lists that will represent the waiting area
WA_old = []
WA_new = []
# read input and populate waiting area
for line in lines :
row = []
for char in line :
if char == '.' :
row.append(FLOOR)
elif char == 'L' :
row.append(EMPTY)
elif char == '#' :
row.append(FILLED)
WA_old.append(row)
# dimensions of the waiting area
DIM_X = len(WA_old[0])
DIM_Y = len(WA_old)
# initialise with zeroes
for i in range(DIM_Y) :
row = []
for j in range(DIM_X) :
row.append(0)
WA_new.append(row)
# look in a direction for a filled seat
def look (y, x, yd, xd) :
if yd == 0 and xd == 0 :
return 0
y += yd
x += xd
while y >= 0 and y < DIM_Y and x >= 0 and x < DIM_X :
if WA_old[y][x] == FILLED :
return 1
elif WA_old[y][x] == EMPTY :
return 0
y += yd
x += xd
return 0
# calculate value of seat for next generation
def newValue (y, x) :
if WA_old[y][x] == FLOOR :
return FLOOR
count = 0
for i in range(-1, 2) :
for j in range(-1, 2) :
count += look(y, x, i, j)
if WA_old[y][x] == EMPTY and count == 0 :
return FILLED
if WA_old[y][x] == FILLED and count >= 5 :
return EMPTY
return WA_old[y][x]
# calculate one generation
def generation () :
# new board
for i in range(DIM_Y) :
for j in range(DIM_X) :
WA_new[i][j] = newValue(i,j)
# check if boards are equal
def equal () :
for i in range(DIM_Y) :
for j in range(DIM_X) :
if WA_new[i][j] != WA_old[i][j] :
return False
return True
# run simulation until stable situation is found
while not equal() :
generation()
ph = WA_new
WA_new = WA_old
WA_old = ph
count = 0
for i in range(DIM_Y) :
for j in range(DIM_X) :
if WA_new[i][j] == FILLED :
count += 1
print (f"Amount of occupied seats: {count}")
|
[
"s1247123@vuw.leidenuniv.nl"
] |
s1247123@vuw.leidenuniv.nl
|
e7a36347f249728f620ac8c635fb106e2807fdf2
|
d9aeaf6d77114a970b8caf3accf2570430bbb592
|
/ex12.py
|
0bbdf25de1f590d87eba0069d7a9b1da5c899921
|
[] |
no_license
|
agungTuanany/Learn-Python-The-Hardway
|
033c6f12a7acfb4f33af772624aefb75a29189d3
|
4130eda9607987f31611306849399f6c7738e5d3
|
refs/heads/master
| 2020-03-25T09:01:09.214703
| 2018-08-06T10:49:33
| 2018-08-06T10:50:44
| 143,643,267
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,038
|
py
|
# Exercise 12: Prompting People.
# When you typed **raw_input()**, you were typing the ( and ) characters, which
# are parenthesis characters. This is similar to when you used them to do a
# format with extra variables, as in **"%s %s" % (x, y).** For **raw_input**,
# you want for the prompt inside the () so that is looks like this:
#
# y = raw_input("Name? ")
#
# This prompts the user with "Name?" and puts the result into the variable **y**.
# This is how you ask someone a question and get the answer.
#
# This means we can completely rewrite our previous exercise using just **raw_input**
# to do all the prompting.
age = raw_input("how old are you? ")
height = raw_input("How tall are you? ")
weight = raw_input("How much do you weigh? ")
#print "So, you're %r old, %r tall and %r heavy." % (
# age, height, weight
# )
# Type **pydoc raw_input**. To see raw_input documentation.
# using **%s** instead **%r**
print "So, you're %s old, %s tall and %s heavy." % (
age, height, weight
)
|
[
"agung.tuanany@gmail.com"
] |
agung.tuanany@gmail.com
|
13d9a233ce729c06b482d163cf30e75e2fadf3a8
|
d0f28dccc15804d4426bdb79c964fbc69206ec21
|
/streaming/wordCount/mapper1.py
|
e7ffc83ea5f20a9d4466108a9fef210e1fce7580
|
[] |
no_license
|
INKWWW/python_project
|
f4170b435a2d08fa931c1ebff4b45bfa172181a6
|
0e4647d09ec4a7a5ea4d14863c2628402b3830d4
|
refs/heads/master
| 2020-04-02T08:29:54.970819
| 2019-04-13T07:52:54
| 2019-04-13T07:52:54
| 154,247,147
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 284
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
def main():
for line in sys.stdin:
words = line.split()
for word in words:
# print('{}\t{}'.format(word, 1))
print('%s%s%d' % (word, '\t', 1))
if __name__ == '__main__':
main()
|
[
"whmink@foxmail.com"
] |
whmink@foxmail.com
|
a9ea495946dead0b502d7d3481ea0618271980d4
|
2fedc543c38b3a4f659e938d5b208db4e0501caf
|
/xml_parser.py
|
7d362b6bc1e6b4dceaf4777215d9918242a57b1d
|
[] |
no_license
|
publicznyprofil/tribalwars_build_village
|
595b34cbcdd1307496203d97eaafdc638eaee879
|
df8a8206bcd9ba14ce6f6828de4a1903fe29f7ae
|
refs/heads/master
| 2021-01-16T23:10:52.596503
| 2017-09-24T19:50:10
| 2017-09-24T19:50:10
| 95,709,835
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,405
|
py
|
import xml.etree.ElementTree as ElementTree
class XmlListConfig(list):
def __init__(self, list_):
for element in list_:
if element:
if len(element) == 1 or element[0].tag != element[1].tag:
self.append(XmlDictConfig(element))
elif element[0].tag == element[1].tag:
self.append(XmlListConfig(element))
elif element.text:
text = element.text.strip()
if text:
self.append(text)
class XmlDictConfig(dict):
def __init__(self, parent_element):
if parent_element.items():
self.update(dict(parent_element.items()))
for element in parent_element:
if element:
if len(element) == 1 or element[0].tag != element[1].tag:
dict_ = XmlDictConfig(element)
else:
dict_ = {element[0].tag: XmlListConfig(element)}
if element.items():
dict_.update(dict(element.items()))
self.update({element.tag: dict_})
elif element.items():
self.update({element.tag: dict(element.items())})
else:
self.update({element.tag: element.text})
def get_dict_from_xml_string(xml_string):
root = ElementTree.XML(xml_string)
return XmlDictConfig(root)
|
[
"nitrobee@o2.pl"
] |
nitrobee@o2.pl
|
30936e83f328e3dc710f6715e1b87cf9d19f139e
|
8637769c641d987cab3878a29cc84c3f983bb349
|
/app/lights.py
|
376f2ba07295b72dfe62372a4a31c34ee7ef3022
|
[
"MIT"
] |
permissive
|
fhopeman/mgmt-center
|
eca3de99c5c978bd21518ed405633682a8f59d7b
|
b0210f668e43be15a260463abb05f10248feeadd
|
refs/heads/master
| 2020-04-20T23:42:46.921422
| 2015-04-03T13:33:03
| 2015-04-03T13:33:13
| 28,199,740
| 6
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,025
|
py
|
from flask import render_template
from app import app
from util_communication import slaveCall, slaveRead
# define lights (id is gpio pin)
lights = {
0: {"name": "living room TV", "slave": 0x04, "id": 0, "state": 0},
1: {"name": "living room LED", "slave": 0x04, "id": 0, "state": 0},
2: {"name": "balcony", "slave": 0x04, "id": 0, "state": 0}
}
SLAVE_CMD_LIGHT_START = 20
SLAVE_CMD_LIGHT_DIGITS = 1
@app.route("/light")
def light():
data = {
"lights": lights
}
return render_template("light.html", **data)
@app.route("/light/<int:light>/<int:state>", methods=["POST"])
def change_light_state(light, state):
light = lights[light]
# change the state if it has changed
if state != light["state"]:
# change light state
slaveCall(light["slave"], SLAVE_CMD_LIGHT_START + light["id"])
# change global state dict
light["state"] = int(slaveRead(light["slave"], SLAVE_CMD_LIGHT_DIGITS));
else:
print("no light changes")
return "success"
|
[
"fabian.hoffmann@otto.de"
] |
fabian.hoffmann@otto.de
|
0b4642b0ad7c64c8f005e679b74dc0449fc5ef1c
|
72f3d2561bf74ad91b00096ff8bc2e17642ec6ce
|
/albamonproj/albamonproj/wsgi.py
|
8fdea78f495b0f5abc37740e91db9457d22cd125
|
[] |
no_license
|
kimsongha/Likelion_albamon
|
5592f8b071ec3bbe834ef8051c1725a66e63d909
|
c81c1b214863fed818aadf143f4e1bc6fc695111
|
refs/heads/main
| 2023-05-07T22:01:38.730128
| 2021-05-21T00:35:47
| 2021-05-21T00:35:47
| 368,143,347
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
"""
WSGI config for albamonproj project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'albamonproj.settings')
application = get_wsgi_application()
|
[
"songha1024@naver.com"
] |
songha1024@naver.com
|
4d9a94e46e4043fec8be22f9d05cffcd420ad450
|
6dadceef9ffafbd2bdcc0cf64b8d541e93e2fdaa
|
/HINGCN-GS/helpers.py
|
402a21c9d4bff820b427fed52c884c9aecc830d2
|
[
"MIT"
] |
permissive
|
dingdanhao110/HINGCN
|
451683b3a1924f1cef831e5f1ab7c16ad87c937f
|
281b73c03bd3b00e35bce4c5e1c27076233555e4
|
refs/heads/master
| 2022-11-29T21:25:55.112989
| 2020-07-30T15:07:09
| 2020-07-30T15:07:09
| 167,971,613
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,744
|
py
|
#!/usr/bin/env python
"""
helpers.py
"""
from __future__ import print_function
import numpy as np
import torch
from torch.autograd import Variable
import scipy.sparse as sp
from sklearn.feature_extraction.text import TfidfTransformer
from preprocess.word_emb import count2feat
def set_seeds(seed=0):
np.random.seed(seed)
_ = torch.manual_seed(seed)
if torch.cuda.is_available():
_ = torch.cuda.manual_seed(seed)
def to_numpy(x):
if isinstance(x, Variable):
return x.cpu().data.numpy() if x.is_cuda else x.data.numpy()
return x.cpu().item().item if x.is_cuda else x.item().item
def read_embed(path="./data/dblp/",
emb_file="APC"):
with open("{}{}.emb".format(path, emb_file)) as f:
n_nodes, n_feature = map(int, f.readline().strip().split())
print("number of nodes:{}, embedding size:{}".format(n_nodes, n_feature))
embedding = np.loadtxt("{}{}.emb".format(path, emb_file),
dtype=np.float32, skiprows=1)
emb_index = {}
for i in range(n_nodes):
emb_index[embedding[i, 0]] = i
features = np.asarray([embedding[emb_index[i], 1:] for i in range(n_nodes)])
assert features.shape[1] == n_feature
assert features.shape[0] == n_nodes
return features, n_nodes, n_feature
def normalize(mx):
"""Row-normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
return mx
def load_2hop_index(path="./data/dblp/", file="APA"):
index = {}
with open("{}{}.ind".format(path, file), mode='r') as f:
for line in f:
array = [int(x) for x in line.split()]
a1 = array[0]
a2 = array[1]
if a1 not in index:
index[a1] = {}
if a2 not in index[a1]:
index[a1][a2] = set()
for p in array[2:]:
index[a1][a2].add(p)
return index
def read_mpindex_dblp(path="./data/dblp2/",train_per=0.1):
print(train_per)
label_file = "author_label"
PA_file = "PA"
PC_file = "PC"
PT_file = "PT"
feat_emb_file = 'term_emb.npy'
feat_emb = np.load("{}{}".format(path, feat_emb_file))
# print("{}{}.txt".format(path, PA_file))
PA = np.genfromtxt("{}{}.txt".format(path, PA_file),
dtype=np.int32)
PC = np.genfromtxt("{}{}.txt".format(path, PC_file),
dtype=np.int32)
PT = np.genfromtxt("{}{}.txt".format(path, PT_file),
dtype=np.int32)
PA[:, 0] -= 1
PA[:, 1] -= 1
PC[:, 0] -= 1
PC[:, 1] -= 1
PT[:, 0] -= 1
PT[:, 1] -= 1
paper_max = max(PA[:, 0]) + 1
author_max = max(PA[:, 1]) + 1
term_max = max(PT[:, 1]) + 1
PA_s = sp.coo_matrix((np.ones(PA.shape[0]), (PA[:, 0], PA[:, 1])),
shape=(paper_max, author_max),
dtype=np.float32)
PT_s = sp.coo_matrix((np.ones(PT.shape[0]), (PT[:, 0], PT[:, 1])),
shape=(paper_max, term_max),
dtype=np.float32)
# transformer = TfidfTransformer()
features = PA_s.transpose() * PT_s # AT
# features = transformer.fit_transform(features)
# features = np.array(features.todense())
features = count2feat(features, feat_emb)
labels_raw = np.genfromtxt("{}{}.txt".format(path, label_file),
dtype=np.int32)
labels_raw[:, 0] -= 1
labels_raw[:, 1] -= 1
labels = np.zeros(author_max)
labels[labels_raw[:, 0]] = labels_raw[:, 1]
reordered = np.random.permutation(labels_raw[:, 0])
total_labeled = labels_raw.shape[0]
idx_train = reordered[range(int(total_labeled * train_per))]
idx_val = reordered[range(int(total_labeled * train_per), int(total_labeled * 0.8))]
idx_test = reordered[range(int(total_labeled * 0.8), total_labeled)]
folds = {'train':idx_train,'val':idx_val,'test':idx_test}
return features, labels, folds
def read_mpindex_cora(path="./data/cora/",train_per=0.1):
label_file = "paper_label"
feat_emb_file = 'term_emb.npy'
PT_file = "PT"
feat_emb = np.load("{}{}".format(path,feat_emb_file))
PT = np.genfromtxt("{}{}.txt".format(path, PT_file),
dtype=np.int32)
PT[:, 0] -= 1
PT[:, 1] -= 1
paper_max = max(PT[:, 0]) + 1
term_max = max(PT[:, 1]) + 1
PT_s = sp.coo_matrix((np.ones(PT.shape[0]), (PT[:, 0], PT[:, 1])),
shape=(paper_max, term_max),
dtype=np.float32)
# transformer = TfidfTransformer()
features = PT_s # AT
# features = transformer.fit_transform(features)
# features = np.array(features.todense())
# features = np.zeros(paper_max).reshape(-1, 1)
features = count2feat(features,feat_emb)
labels_raw = np.genfromtxt("{}{}.txt".format(path, label_file),
dtype=np.int32)
labels_raw[:, 0] -= 1
no_label_mask = labels_raw[:,1] != 0
labels_raw = labels_raw[no_label_mask]
print('labels shape: ', labels_raw.shape)
#remap label
label_dict={}
for i in labels_raw[:,1]:
if i not in label_dict:
label_dict[i]=len(label_dict)
print('number of label classes: ', len(label_dict))
for i in range(labels_raw.shape[0]):
labels_raw[i,1] = label_dict[labels_raw[i,1]]
labels = np.zeros(paper_max)
labels[labels_raw[:, 0]] = labels_raw[:, 1]
reordered = np.random.permutation(labels_raw[:, 0])
total_labeled = labels_raw.shape[0]
idx_train = reordered[range(int(total_labeled * train_per))]
idx_val = reordered[range(int(total_labeled * train_per), int(total_labeled * 0.8))]
idx_test = reordered[range(int(total_labeled * 0.8), total_labeled)]
folds = {'train':idx_train,'val':idx_val,'test':idx_test}
return features, labels, folds
def load_edge_emb(path, schemes, n_dim=17, n_author=20000):
data = np.load("{}edge{}.npz".format(path, n_dim))
index = {}
emb = {}
for scheme in schemes:
# print('number of authors: {}'.format(n_author))
ind = sp.coo_matrix((np.arange(1,data[scheme].shape[0]+1),
(data[scheme][:, 0], data[scheme][:, 1])),
shape=(n_author, n_author),
dtype=np.long)
ind = ind + ind.T.multiply(ind.T>ind)
ind = sparse_mx_to_torch_sparse_tensor(ind)#.to_dense()
embedding = np.zeros(n_dim, dtype=np.float32)
embedding = np.vstack((embedding, data[scheme][:, 2:]))
emb[scheme] = torch.from_numpy(embedding).float()
index[scheme] = ind.long()
print('loading edge embedding for {} complete, num of embeddings: {}'.format(scheme,embedding.shape[0]))
return index, emb
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
"""Convert a scipy sparse matrix to a torch sparse tensor."""
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(
np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
return torch.sparse.FloatTensor(indices, values, shape)
def read_mpindex_yelp(path="../../data/yelp/",train_per=0.1):
label_file = "true_cluster"
feat_file = "attributes"
# print("{}{}.txt".format(path, PA_file))
feat = np.genfromtxt("{}{}.txt".format(path, feat_file),
dtype=np.float)
features = feat[:,:2]
#features = np.zeros((feat.shape[0],1))
#features = np.eye(feat.shape[0])
labels = np.genfromtxt("{}{}.txt".format(path, label_file),
dtype=np.int32)
reordered = np.random.permutation(np.arange(labels.shape[0]))
total_labeled = labels.shape[0]
idx_train = reordered[range(int(total_labeled * train_per))]
idx_val = reordered[range(int(total_labeled * train_per), int(total_labeled * 0.8))]
idx_test = reordered[range(int(total_labeled * 0.8), total_labeled)]
folds = {'train':idx_train,'val':idx_val,'test':idx_test}
return features, labels, folds
def read_mpindex_yago(path="../../data/yago/", label_file = "labels",train_per=0.1):
movies = []
with open('{}{}.txt'.format(path, "movies"), mode='r', encoding='UTF-8') as f:
for line in f:
movies.append(line.split()[0])
n_movie = len(movies)
movie_dict = {a: i for (i, a) in enumerate(movies)}
features = np.zeros(n_movie).reshape(-1,1)
labels_raw = []
with open('{}{}.txt'.format(path, label_file), 'r', encoding='UTF-8') as f:
for line in f:
arr = line.split()
labels_raw.append([int(movie_dict[arr[0]]), int(arr[1])])
labels_raw = np.asarray(labels_raw)
labels = np.zeros(n_movie)
labels[labels_raw[:, 0]] = labels_raw[:, 1]
reordered = np.random.permutation(labels_raw[:, 0])
total_labeled = labels_raw.shape[0]
idx_train = reordered[range(int(total_labeled * train_per))]
idx_val = reordered[range(int(total_labeled * train_per), int(total_labeled * 0.8))]
idx_test = reordered[range(int(total_labeled * 0.8), total_labeled)]
folds = {'train': idx_train, 'val': idx_val, 'test': idx_test}
return features, labels, folds
def read_homograph(path="../../data/yago/", problem='yago',):
dataset = "homograph"
emb_file = {'yago':'MADW_16','dblp':'APC_16','yelp':'RBUK_16'}
with open("{}{}.emb".format(path, emb_file[problem])) as f:
n_nodes, n_feature = map(int, f.readline().strip().split())
embedding = np.loadtxt("{}{}.emb".format(path, emb_file[problem]),
dtype=np.float32, skiprows=1, encoding='latin-1')
emb_index = {}
for i in range(n_nodes):
# if type(embedding[i, 0]) is not int:
# continue
emb_index[embedding[i, 0]] = i
features = np.asarray([embedding[emb_index[i], 1:] for i in range(embedding.shape[0])])
features = torch.FloatTensor(features)
edges = np.genfromtxt("{}{}.txt".format(path, dataset),
dtype=np.int32)
adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),
shape=(n_nodes, n_nodes),
dtype=np.float32)
# build symmetric adjacency matrix
adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
# features = normalize(features)
adj = normalize(adj + sp.eye(adj.shape[0]))
adj = sparse_mx_to_torch_sparse_tensor(adj)
return adj, features
# features, labels, folds = read_mpindex_yago()
|
[
"dingdanhao110@163.com"
] |
dingdanhao110@163.com
|
583f8d847bb0db995cf1b918a2034975c71c015e
|
99c53abf533e1d2f47608c9f4166b050c7cb2b6d
|
/gym_aa/__init__.py
|
3e61248ec2d39ae25a3999b572cb1bb379de2464
|
[] |
no_license
|
cair/Axis_and_Allies
|
647189492e207d59ad6ed9e2852fd85eb9b692a1
|
ec87ac0f02829b753a37481e902dfb13c9ef20e1
|
refs/heads/master
| 2020-03-23T08:29:22.078769
| 2018-09-30T22:49:32
| 2018-09-30T22:49:32
| 141,329,606
| 4
| 2
| null | 2018-10-02T20:09:49
| 2018-07-17T18:41:36
|
Python
|
UTF-8
|
Python
| false
| false
| 189
|
py
|
from gym.envs.registration import register
register(
id='axis-and-allies-4x4-random-agent-v0',
entry_point='gym_aa.envs:AxisAndAllies4x4RandomAgent'
#timestep_limit=2000,
)
|
[
"perara12@gmail.com"
] |
perara12@gmail.com
|
29942c923bc7a82bdbb8e16b87663ab7a3b9faf1
|
33836016ea99776d31f7ad8f2140c39f7b43b5fe
|
/mks_ti_alpha_ord1/oldfiles/functions_ti_alpha_ord1_alt.py
|
82174620ead2ad9de3dc717aa7eebaa1323f3448
|
[] |
no_license
|
earthexploration/MKS-Experimentation
|
92a2aea83e041bfe741048d662d28ff593077551
|
9b9ff3b468767b235e7c4884b0ed56c127328a5f
|
refs/heads/master
| 2023-03-17T23:11:11.313693
| 2017-04-24T19:24:35
| 2017-04-24T19:24:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,220
|
py
|
# -*- coding: utf-8 -*-
"""
Functions connected to 3D, isotropic MKS analyses
In general these functions are not for parallel processing or chunking of data
Noah Paulson, 5/9/2014
"""
import numpy as np
import time
import itertools as it
import scipy.io as sio
def calib(k,M,resp_fft,p,H,el,ns):
"""
Summary: This function calibrates the influence coefficients from the
frequency space calibration microstructures and FEM responses
Inputs:
M: (el,el,el,ns,H) The microstructure function in frequency space.
Includes all local states (from any order terms)
resp_fft: (el,el,el,ns) The response of the calibration FEM analyses
after fftn
H: (scalar) The number of local states in the microstructure function
el: (scalar) The number of elements per side of the 'cube'
ns: (scalar) The number of samples (assumed to be n-1 calibration and 1
validation)
filename (string): The filename to write messages to
Outputs:
specinfc_k:(H) influence coefficients in frequency space for the k'th
frequency
"""
[u,v,w] = np.unravel_index(k,[el,el,el])
MM = np.zeros((H,H),dtype = 'complex128')
PM = np.zeros((H,1),dtype = 'complex128')
for n in xrange(ns-1):
mSQ = np.array(M[u,v,w,n,:])
mSQc = np.conj(mSQ[None,:])
mSQt = mSQ[:,None]
MM = MM + np.dot(mSQt, mSQc)
PM[:,0] = PM[:,0] + np.dot(resp_fft[u,v,w,n],mSQc)
if k < 2:
p = independent_columns(MM, .001)
calred = MM[p,:][:,p]
resred = PM[p,0].conj().T
specinfc_k = np.zeros(H,dtype = 'complex64')
specinfc_k[p] = np.linalg.solve(calred, resred)
if k == 1:
return specinfc_k, p
else:
return specinfc_k
def eval_meas(mks_R_indv,resp_indv,el):
"""
Summary:
Inputs:
mks_R: (el,el,el) The response predicted by the MKS for the validation
microstructure
resp: (el,el,el,ns) the FEM responses of all microstructures
el: (scalar) The number of elements per side of the 'cube'
Outputs:
avgE: (scalar)
MASE: (scalar)
"""
avgE = np.average(mks_R_indv)
MASE = 0
for k in xrange(el**3):
[u,v,w] = np.unravel_index(k,[el,el,el])
MASE = MASE + ((np.abs(resp_indv[u,v,w] - mks_R_indv[u,v,w]))/(avgE * el**3))
return avgE, MASE
def gen_micr(filename1,filename2,set_id,read_dat,ns,el,H):
"""
Summary:
This function reads the microstructures for all samples (calibration
and validation) from a matlab data file, rearanges them into ns # of
el x el x el cubes, and saves them in the .npy file format
Inputs:
file_name (string): the filename for the matlab '.mat' file
read_dat (int): if read_dat = 1 the '.mat' file is read and rearanged,
if read_dat = 0 micr is simply loaded from the '.npy' file
ns (int): the total number of microstructures (for calibration and
validation)
el (int): the number of elements per side of the microstructure cube
Output:
micr ([el,el,el,ns],int8): The binary microstructures for calibration
and validation
"""
if read_dat == 1:
start = time.time()
## convert the matlab files arrays in python
micr_flag_BASE = sio.loadmat(filename2)
## micr_flag contains 9261 flags for each sample microsturcture,
## each representing an orientation. The number in these flags
## corresponds with an orientation in ex_ori_fr
micr_flag = micr_flag_BASE['ct']
ex_ori_BASE = sio.loadmat(filename1)
## ex_ori_fr contains 522 sets of 15 GSH coefficients, where each
## set corresponds with an orientation on the surface of the
## hexagonal-triclinic fundamental zone.
ex_ori_fr = ex_ori_BASE['extremeorienth_fr']
pre_micr = np.zeros((el**3,ns,H),dtype = 'complex64')
for k in range(el**3):
for n in range(ns):
pre_micr[k,n,:] = ex_ori_fr[micr_flag[k,n]-1,:]
## here we perform flips and reshapes to enact the proper arrangement
## of spatial locations in the 3D cub
micr = np.zeros((el,el,el,ns,H),dtype = 'complex64')
for n in range(ns):
for h in range(H):
micr[:,:,:,n,h] = np.swapaxes(np.reshape(
np.flipud(pre_micr[:,n,h]), [el,el,el]),1,2)
## save the microstructure array
np.save('micr_%s%s' %(ns,set_id),micr)
end = time.time()
timeE = np.round((end - start),3)
else:
start = time.time()
micr = np.load('micr_%s%s.npy' %(ns,set_id))
end = time.time()
timeE = np.round((end - start),3)
return [micr, timeE]
def independent_columns(A, tol = 1e-05):
"""
Summary:
This function returns an vector of the independent columns of a matrix
Note: the answer may not be unique; this function returns one of many
possible answers.
Source: http://stackoverflow.com/q/1331249
Inputs:
A (generic array {numerical})
tol (float): This number specifies how numerically close two columns
must be to be dependent.
Outputs:
independent (vector of int): vector containing the indices of the
independent columns of A
"""
Q, R = np.linalg.qr(A)
independent = np.where(np.abs(R.diagonal()) > tol)[0]
return independent
def load_fe(filename1,set_id,read_dat,ns,el):
"""
Summary:
This function loads the finite element (FE) responses from '.dat'
files. This version is used for files with orientation information
Inputs:
filename (string): The '.mat' file containing orientation information
for the set of microstructures
read_dat (int): if read_dat = 1 the '.dat' files are read and compiled,
if read_dat = 0 resp is simply loaded from an existing '.npy' file
ns (int): the total number microstructures (calibration or validation)
el (int): the number of elements per side of the microstructure cube
Outputs:
resp ([el,el,el,ns],float): The FEM responses of all calibration or
validation microstructures
msg (string): A message detailing how resp was loaded
"""
if read_dat == 1:
start = time.time()
micr_flag_BASE = sio.loadmat(filename1)
## ori_mats contains a 3x3 orientation matrix for each spatial location
## in each sample microstructure
ori_mat = micr_flag_BASE['orientation']
resp = np.zeros((el**3,6,ns),dtype = 'float64')
for sn in xrange(ns):
filename = "hcp_21el_200s_%s.dat" %(sn+1)
resp[:,:,sn] = res_red(filename,ori_mat,el,sn)
sio.savemat('FE_cal200_vect.mat', {'FE_cal200_vect':resp})
end = time.time()
timeE = np.round((end - start),1)
msg = "Import FE results: %s seconds" %timeE
## if read_dat == 0 the script will simply reload the results from a
## previously saved FE_results_#.npy
else:
resp = np.load('FE_500_cal_vect.npy')
msg = "FE results loaded"
return [resp,msg]
def mf(micr_sub,el,order, H):
## microstructure functions
sub_len = len(micr_sub[0,0,0,:])
pm = np.zeros([el,el,el,sub_len,2])
pm[:,:,:,:,0] = (micr_sub == 0)
pm[:,:,:,:,1] = (micr_sub == 1)
pm = pm.astype(int)
if order == 1:
m_sub = pm
if order == 2:
hs = np.array([[1,1],[0,0],[1,0],[0,1]])
vec = np.array([[1,0],[1,1],[1,2]])
k = 0
m_sub = np.zeros([el,el,el,sub_len,H])
for hh in xrange(len(hs[:,0])):
for t in xrange(len(vec[:,0])):
a1 = pm[:,:,:,:,hs[hh,0]]
a2 = np.roll(pm[:,:,:,:,hs[hh,1]],vec[t,0],vec[t,1])
m_sub[:,:,:,:,k] = a1 * a2
k = k + 1
if order ==7:
hs = np.array(list(it.product([0,1],repeat=7)))
vec = np.array([[1,0],[1,1],[1,2],[-1,0],[-1,1],[-1,2]])
vlen = len(vec[:,0])
m_sub = np.zeros([el,el,el,sub_len,H])
for hh in xrange(H):
a1 = pm[:,:,:,:,hs[hh,0]]
pre_m = a1
for t in xrange(vlen):
a_n = np.roll(pm[:,:,:,:,hs[hh,t+1]],vec[t,0],vec[t,1])
pre_m = pre_m * a_n
m_sub[:,:,:,:,hh] = pre_m
m_sub = m_sub.astype(int)
return m_sub
def mf_sn(micr_sn, el, order, H):
"""
Summary:
This function takes in a single microstructure, and generates all of
the local states based on the desired order of terms. It does this by
shifting the microstructure in pre-defined directions, and then
performing an element-wise multiplication. This reveals the
conformation for a higher order local state.
Inputs:
micr_sn ([el,el,el],int8): the arangement of black and white cells in
the cube for a single microstructure (from the micr variable)
el (int): the number of elements per side of the microstructure cube
H (int): the total number of local states in the microstructure
function (including higher order comformations)
order (int): the order of the terms used for
Output:
m_sn ([el,el,el,H],int): The microstructure function for a single
microstructure, including higher order local state conformations
"""
## 1st order microstructure function generation
pm = np.zeros([el,el,el,2])
pm[:,:,:,0] = (micr_sn == 0)
pm[:,:,:,1] = (micr_sn == 1)
pm = pm.astype(int)
if order == 1:
## pm already represents the microstructure function for first order
## terms
m_sn = pm
if order == 2:
## in hs, the first element of each row represents the desired local
## state of the original cell (black or white), and the second is
## the desired local state after the microstructure is shifted.
hs = np.array([[1,1],[0,0],[1,0],[0,1]])
## in vec, the first element of each row represents the number of
## elements to shift the microstructure, and the second is the
## dimension along which the microstructure should be shifted
vec = np.array([[1,0],[1,1],[1,2]])
## in the generation of the microstructure for second order
## localization terms the microstructure is rolled in a single
## direction for each term.
k = 0
m_sn = np.zeros([el,el,el,H])
for hh in xrange(len(hs[:,0])):
for t in xrange(len(vec[:,0])):
a1 = pm[:,:,:,hs[hh,0]]
a2 = np.roll(pm[:,:,:,hs[hh,1]],vec[t,0],vec[t,1])
m_sn[:,:,:,k] = a1 * a2
k = k + 1
if order == 7:
## Here hs is automatically generated
hs = np.array(list(it.product([0,1],repeat=7)))
vec = np.array([[1,0],[1,1],[1,2],[-1,0],[-1,1],[-1,2]])
vlen = len(vec[:,0])
m_sn = np.zeros([el,el,el,H])
## in the generation of the microstructure for seventh order terms
## the microstructure is rolled in all 6 directions. These 7
## microstructures are all multiplied together.
for hh in xrange(H):
a1 = pm[:,:,:,hs[hh,0]]
pre_m = a1
for t in xrange(vlen):
a_n = np.roll(pm[:,:,:,hs[hh,t+1]],vec[t,0],vec[t,1])
pre_m = pre_m * a_n
m_sn[:,:,:,hh] = pre_m
m_sn = m_sn.astype(int)
return m_sn
def res_red(filename,ori_mat,el,sn):
"""
Summary:
This function reads the E11 values from a .dat file and reorganizes
the data into a el x el x el array with the correct organization
It will also plot a certain x-slice in the dataset if called within
this script.
Inputs:
filename (string): the name of the '.dat' file containing the
FEM response
el (int): the number of elements per side of the microstructure cube
Outputs:
e11mat ([el,el,el],float): the FEM response of the '.dat' file of
interest
"""
f = open(filename, "r")
linelist = f.readlines()
# finds a location several lines above the start of the data
# linelist[n] reads the entire line at location n
for ln in range(1000):
if 'THE FOLLOWING TABLE' in linelist[ln]:
break
# line0 is the index of first line of the data
line0 = ln + 5;
E = np.zeros((21**3,8,6))
c = -1
# this series of loops generates a 9261x8 dataset of E11s (element x integration point)
for k in range(21**3):
for jj in range(8):
c += 1
E[k,jj,:] = linelist[line0 + c].split()[3:]
f.close()
# here we average all 8 integration points in each element cell
E = np.mean(E, axis=1)
Etot = np.zeros([el**3,6])
# here we convert the strain tensor at each location from crystal to
# sample frame
for k in range(21**3):
# E_ten_cry is the strain tensor at the spatial location of interest
# in the crystal frame
E_ten_cry = np.array([[ E[k,0], 0.5*E[k,3], 0.5*E[k,4]],
[0.5*E[k,3], E[k,1], 0.5*E[k,5]],
[0.5*E[k,4], 0.5*E[k,5], E[k,2]]])
# Here we convert from crystal to sample frame
E_ten_samp = np.dot(ori_mat[:,:,k,sn].T ,np.dot(E_ten_cry,ori_mat[:,:,k,sn]))
Etot[k,:] = [E_ten_samp[0,0],E_ten_samp[1,1],E_ten_samp[2,2],
E_ten_samp[0,1],E_ten_samp[1,2],E_ten_samp[1,2]]
return Etot
def WP(msg,filename):
"""
Summary:
This function takes an input message and a filename, and appends that
message to the file. This function also prints the message
Inputs:
msg (string): the message to write and print.
filename (string): the full name of the file to append to.
"""
fil = open(filename, 'a')
print msg
fil.write(msg)
fil.write('\n')
fil.close()
def validate(M_val,specinfc,H,el=21):
## vectorize the frequency-space microstructure function for the validation
## dataset
lin_M = np.zeros((el**3,H),dtype = 'complex64')
for h in xrange(H):
lin_M[:,h] = np.reshape(M_val[:,:,:,h],el**3)
## find the frequency-space response of the validation microstructure
## and convert to the real space
lin_sum = np.sum(np.conjugate(specinfc) * lin_M, 1)
mks_F = np.reshape(lin_sum,[21,21,21])
mks_R = np.fft.ifftn(mks_F).real
#np.save('MKS_2stOrd_resp',mks_R)
return mks_R
|
[
"noahhpaulson@gmail.com"
] |
noahhpaulson@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.