blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
87eff0b3c9ddab212912be87826ddc770f8a2187
|
ac176f429c420a8d6290c57df14346d207cff3de
|
/leetcodepython/app/leetcode/380.py
|
9ce7dfb0338e06c907aba080ed7763a4ec93c16d
|
[] |
no_license
|
xu20160924/leetcode
|
72c6fc02428e568148af4503c63c6f2553982a1c
|
43e12a3db5b4087913ec338e652ae7fd59859c23
|
refs/heads/master
| 2023-08-04T14:41:06.964273
| 2022-11-20T13:39:28
| 2022-11-20T13:39:28
| 252,931,114
| 0
| 0
| null | 2023-07-23T10:52:04
| 2020-04-04T06:57:49
|
Java
|
UTF-8
|
Python
| false
| false
| 680
|
py
|
from random import choice
class RandomizedSet():
def __init__(self):
self.dict = {}
self.list = []
def insert(self, val: int) -> bool:
if val in self.dict:
return False
self.dict[val] = len(self.list)
self.list.append(val)
return True
def remove(self, val: int) -> bool:
if val in self.dict:
last_element, idx = self.list[-1], self.dict[val]
self.list[idx], self.dict[last_element] = last_element, idx
self.list.pop()
del self.dict[val]
return True
return False
def getRandom(self) -> int:
return choice(self.list)
|
[
"xu20151211@gmail.com"
] |
xu20151211@gmail.com
|
ec8be8b241f2596c74299a4b06fd66ad143d458a
|
d7016f69993570a1c55974582cda899ff70907ec
|
/sdk/applicationinsights/azure-mgmt-applicationinsights/azure/mgmt/applicationinsights/v2020_03_01_preview/aio/operations/_component_linked_storage_accounts_operations.py
|
5c5b89b09688f8fc50620bd2e8f592eb267b4831
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
kurtzeborn/azure-sdk-for-python
|
51ca636ad26ca51bc0c9e6865332781787e6f882
|
b23e71b289c71f179b9cf9b8c75b1922833a542a
|
refs/heads/main
| 2023-03-21T14:19:50.299852
| 2023-02-15T13:30:47
| 2023-02-15T13:30:47
| 157,927,277
| 0
| 0
|
MIT
| 2022-07-19T08:05:23
| 2018-11-16T22:15:30
|
Python
|
UTF-8
|
Python
| false
| false
| 25,551
|
py
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, Callable, Dict, IO, Optional, TypeVar, Union, overload
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._component_linked_storage_accounts_operations import (
build_create_and_update_request,
build_delete_request,
build_get_request,
build_update_request,
)
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ComponentLinkedStorageAccountsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.applicationinsights.v2020_03_01_preview.aio.ApplicationInsightsManagementClient`'s
:attr:`component_linked_storage_accounts` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace_async
async def get(
self, resource_group_name: str, resource_name: str, storage_type: Union[str, _models.StorageType], **kwargs: Any
) -> _models.ComponentLinkedStorageAccounts:
"""Returns the current linked storage settings for an Application Insights component.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the Application Insights component resource. Required.
:type resource_name: str
:param storage_type: The type of the Application Insights component data source for the linked
storage account. "ServiceProfiler" Required.
:type storage_type: str or
~azure.mgmt.applicationinsights.v2020_03_01_preview.models.StorageType
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ComponentLinkedStorageAccounts or the result of cls(response)
:rtype:
~azure.mgmt.applicationinsights.v2020_03_01_preview.models.ComponentLinkedStorageAccounts
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2020-03-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2020-03-01-preview")
)
cls: ClsType[_models.ComponentLinkedStorageAccounts] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
storage_type=storage_type,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponseLinkedStorage, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("ComponentLinkedStorageAccounts", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/components/{resourceName}/linkedStorageAccounts/{storageType}"
}
@overload
async def create_and_update(
self,
resource_group_name: str,
resource_name: str,
storage_type: Union[str, _models.StorageType],
linked_storage_accounts_properties: _models.ComponentLinkedStorageAccounts,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.ComponentLinkedStorageAccounts:
"""Replace current linked storage account for an Application Insights component.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the Application Insights component resource. Required.
:type resource_name: str
:param storage_type: The type of the Application Insights component data source for the linked
storage account. "ServiceProfiler" Required.
:type storage_type: str or
~azure.mgmt.applicationinsights.v2020_03_01_preview.models.StorageType
:param linked_storage_accounts_properties: Properties that need to be specified to update
linked storage accounts for an Application Insights component. Required.
:type linked_storage_accounts_properties:
~azure.mgmt.applicationinsights.v2020_03_01_preview.models.ComponentLinkedStorageAccounts
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ComponentLinkedStorageAccounts or the result of cls(response)
:rtype:
~azure.mgmt.applicationinsights.v2020_03_01_preview.models.ComponentLinkedStorageAccounts
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def create_and_update(
self,
resource_group_name: str,
resource_name: str,
storage_type: Union[str, _models.StorageType],
linked_storage_accounts_properties: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.ComponentLinkedStorageAccounts:
"""Replace current linked storage account for an Application Insights component.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the Application Insights component resource. Required.
:type resource_name: str
:param storage_type: The type of the Application Insights component data source for the linked
storage account. "ServiceProfiler" Required.
:type storage_type: str or
~azure.mgmt.applicationinsights.v2020_03_01_preview.models.StorageType
:param linked_storage_accounts_properties: Properties that need to be specified to update
linked storage accounts for an Application Insights component. Required.
:type linked_storage_accounts_properties: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ComponentLinkedStorageAccounts or the result of cls(response)
:rtype:
~azure.mgmt.applicationinsights.v2020_03_01_preview.models.ComponentLinkedStorageAccounts
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def create_and_update(
self,
resource_group_name: str,
resource_name: str,
storage_type: Union[str, _models.StorageType],
linked_storage_accounts_properties: Union[_models.ComponentLinkedStorageAccounts, IO],
**kwargs: Any
) -> _models.ComponentLinkedStorageAccounts:
"""Replace current linked storage account for an Application Insights component.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the Application Insights component resource. Required.
:type resource_name: str
:param storage_type: The type of the Application Insights component data source for the linked
storage account. "ServiceProfiler" Required.
:type storage_type: str or
~azure.mgmt.applicationinsights.v2020_03_01_preview.models.StorageType
:param linked_storage_accounts_properties: Properties that need to be specified to update
linked storage accounts for an Application Insights component. Is either a
ComponentLinkedStorageAccounts type or a IO type. Required.
:type linked_storage_accounts_properties:
~azure.mgmt.applicationinsights.v2020_03_01_preview.models.ComponentLinkedStorageAccounts or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ComponentLinkedStorageAccounts or the result of cls(response)
:rtype:
~azure.mgmt.applicationinsights.v2020_03_01_preview.models.ComponentLinkedStorageAccounts
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2020-03-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2020-03-01-preview")
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.ComponentLinkedStorageAccounts] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(linked_storage_accounts_properties, (IO, bytes)):
_content = linked_storage_accounts_properties
else:
_json = self._serialize.body(linked_storage_accounts_properties, "ComponentLinkedStorageAccounts")
request = build_create_and_update_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
storage_type=storage_type,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.create_and_update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponseLinkedStorage, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("ComponentLinkedStorageAccounts", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_and_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/components/{resourceName}/linkedStorageAccounts/{storageType}"
}
@overload
async def update(
self,
resource_group_name: str,
resource_name: str,
storage_type: Union[str, _models.StorageType],
linked_storage_accounts_properties: _models.ComponentLinkedStorageAccountsPatch,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.ComponentLinkedStorageAccounts:
"""Update linked storage accounts for an Application Insights component.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the Application Insights component resource. Required.
:type resource_name: str
:param storage_type: The type of the Application Insights component data source for the linked
storage account. "ServiceProfiler" Required.
:type storage_type: str or
~azure.mgmt.applicationinsights.v2020_03_01_preview.models.StorageType
:param linked_storage_accounts_properties: Properties that need to be specified to update a
linked storage accounts for an Application Insights component. Required.
:type linked_storage_accounts_properties:
~azure.mgmt.applicationinsights.v2020_03_01_preview.models.ComponentLinkedStorageAccountsPatch
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ComponentLinkedStorageAccounts or the result of cls(response)
:rtype:
~azure.mgmt.applicationinsights.v2020_03_01_preview.models.ComponentLinkedStorageAccounts
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def update(
self,
resource_group_name: str,
resource_name: str,
storage_type: Union[str, _models.StorageType],
linked_storage_accounts_properties: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.ComponentLinkedStorageAccounts:
"""Update linked storage accounts for an Application Insights component.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the Application Insights component resource. Required.
:type resource_name: str
:param storage_type: The type of the Application Insights component data source for the linked
storage account. "ServiceProfiler" Required.
:type storage_type: str or
~azure.mgmt.applicationinsights.v2020_03_01_preview.models.StorageType
:param linked_storage_accounts_properties: Properties that need to be specified to update a
linked storage accounts for an Application Insights component. Required.
:type linked_storage_accounts_properties: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ComponentLinkedStorageAccounts or the result of cls(response)
:rtype:
~azure.mgmt.applicationinsights.v2020_03_01_preview.models.ComponentLinkedStorageAccounts
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def update(
self,
resource_group_name: str,
resource_name: str,
storage_type: Union[str, _models.StorageType],
linked_storage_accounts_properties: Union[_models.ComponentLinkedStorageAccountsPatch, IO],
**kwargs: Any
) -> _models.ComponentLinkedStorageAccounts:
"""Update linked storage accounts for an Application Insights component.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the Application Insights component resource. Required.
:type resource_name: str
:param storage_type: The type of the Application Insights component data source for the linked
storage account. "ServiceProfiler" Required.
:type storage_type: str or
~azure.mgmt.applicationinsights.v2020_03_01_preview.models.StorageType
:param linked_storage_accounts_properties: Properties that need to be specified to update a
linked storage accounts for an Application Insights component. Is either a
ComponentLinkedStorageAccountsPatch type or a IO type. Required.
:type linked_storage_accounts_properties:
~azure.mgmt.applicationinsights.v2020_03_01_preview.models.ComponentLinkedStorageAccountsPatch
or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ComponentLinkedStorageAccounts or the result of cls(response)
:rtype:
~azure.mgmt.applicationinsights.v2020_03_01_preview.models.ComponentLinkedStorageAccounts
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2020-03-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2020-03-01-preview")
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.ComponentLinkedStorageAccounts] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(linked_storage_accounts_properties, (IO, bytes)):
_content = linked_storage_accounts_properties
else:
_json = self._serialize.body(linked_storage_accounts_properties, "ComponentLinkedStorageAccountsPatch")
request = build_update_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
storage_type=storage_type,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponseLinkedStorage, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("ComponentLinkedStorageAccounts", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/components/{resourceName}/linkedStorageAccounts/{storageType}"
}
@distributed_trace_async
async def delete( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, resource_name: str, storage_type: Union[str, _models.StorageType], **kwargs: Any
) -> None:
"""Delete linked storage accounts for an Application Insights component.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the Application Insights component resource. Required.
:type resource_name: str
:param storage_type: The type of the Application Insights component data source for the linked
storage account. "ServiceProfiler" Required.
:type storage_type: str or
~azure.mgmt.applicationinsights.v2020_03_01_preview.models.StorageType
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2020-03-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2020-03-01-preview")
)
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_delete_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
storage_type=storage_type,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponseLinkedStorage, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/components/{resourceName}/linkedStorageAccounts/{storageType}"
}
|
[
"noreply@github.com"
] |
kurtzeborn.noreply@github.com
|
9182eaa7f5f7285efe41b14655d30b60846c8320
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_zephyrs.py
|
38f03736b025c8e20af7c2b4b3508ac068fbd3ab
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 238
|
py
|
from xai.brain.wordbase.nouns._zephyr import _ZEPHYR
#calss header
class _ZEPHYRS(_ZEPHYR, ):
def __init__(self,):
_ZEPHYR.__init__(self)
self.name = "ZEPHYRS"
self.specie = 'nouns'
self.basic = "zephyr"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
f33788314ab1fcd7ad7f64d8e44b77efbc9e97d2
|
198f73858c2e6064d86a0f05fbd3299e795b880c
|
/home/migrations/0006_userdetail_image.py
|
df7273956d579d51d9a921d58eafb733c6b73e6e
|
[
"Apache-2.0"
] |
permissive
|
sandipsandal/Just-A-Thought
|
756e3a6c01158759bd5d01924b4f3449d64b3506
|
97f97404b303deb2ea7bcc86d89b2b21b3715fba
|
refs/heads/master
| 2022-12-23T11:05:32.701479
| 2020-09-26T12:14:03
| 2020-09-26T12:14:03
| 298,791,140
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 429
|
py
|
# Generated by Django 3.0.9 on 2020-08-18 18:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0005_userdetail_mobile'),
]
operations = [
migrations.AddField(
model_name='userdetail',
name='image',
field=models.ImageField(default='default.jpg', null=True, upload_to='profile_pics'),
),
]
|
[
"sandipsandal@gmail.com"
] |
sandipsandal@gmail.com
|
b4af09295fdcf9510b2c4ae49e5f248e19407252
|
987390ca6481ec5aa2b9e0e0e849203b6c22ce62
|
/zkeco-core/python-support/base/variable_for_trans.py
|
a73ef2f2f8cde16dc67c065fb9324ec3030e7872
|
[] |
no_license
|
alungboy/johan-doc
|
81b2363e7f2ad189d0623007eea66233a2e18f1c
|
7ced14577405caf6127df03007619fe9cfda3847
|
refs/heads/master
| 2020-04-03T18:01:08.531971
| 2013-08-13T04:26:42
| 2013-08-13T04:26:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,369
|
py
|
# -*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
#变量的翻译放在此数组中好让makemessage_ext能够检测到
#list_variable_for_trans=[
#_("get_emp_name"),
#_("name"),
#_("groups"),
#_("accfirstopen_set"),
#_(u"首卡常开"),
#_("get_name_by_PIN"),
#_(u"姓名"),
#_("accmorecardset_set"),
#_(u"多卡开门"),
#_("interlock_details"),
#_(u"互锁设置信息"),
#_("antiback_details"),
#_(u"反潜设置信息"),
#_("emp_group"),
#_(u"人员"),
#_("morecard_emp_count"),
#_(u"人员数"),
#_("foemp_count"),
#_(u"可开门人数"),
#_("emp_count"),
#_(u"人员数"),
#_("level_count"),
#_(u"从属权限组数"),
#_("show_status"),
#_(u"状态"),
#_("show_enabled"),
#_(u"启用"),
#_(u"get_template"),
#_(u"指纹数"),
#_(u"get_ExceptionID"),
#_(u"例外情况"),
#_(u"get_process_status"),
#_(u"设备执行命令进度"),
#_(u"get_attarea"),
#_(u"所属区域"),
#_(u"deptadmin_set"),
#_(u"授权部门"),
#_(u"areaadmin_set"),
#_(u"授权区域"),
#_(u"morecardgroup"),
#_(u"多卡开门组合"),
#_(u"get_user_template"),
#_(u"指纹数"),
#
#_(u"personnel"),
#_(u"人事"),
#_(u"iclock"),
#_(u"设备"),
#_(u"iaccess"),
#_(u"门禁"),
#_(u"att"),
#_(u"考勤"),
#_(u"base"),
#_(u"系统设置"),
#_(u"Can add 角色"),
#_(u"增加角色"),
#_(u"Can change 角色"),
#_(u"修改角色"),
#_(u"Can add 用户"),
#_(u"增加用户"),
#_(u"Can change 用户"),
#_(u"修改用户"),
#_(u"Required. 30 characters or fewer. Letters, numbers and @/./+/-/_ characters")
#_(u"浏览调休"),
#_(u"浏览公告类别"),
#_(u"浏览联动设置"),
#_(u"浏览临时排班"),
#_(u"浏览假类"),
#_(u"浏览员工排班"),
#_(u"浏览考勤时段"),
#_(u"浏览班次"),
#_(u"浏览班次明细"),
#_(u"浏览统计结果详情"),
#_(u"浏览统计项目表"),
#_(u"浏览考勤参数"),
#_(u"浏览考勤明细表"),
#_(u"浏览数据库管理"),
#_(u"浏览通信命令详情"),
#_(u"浏览考勤计算与报表"),
#_(u"浏览考勤设备管理"),
#_(u"浏览数据库管理"),
#_(u"浏览区域用户管理"),
#_(u"浏览人员"),
#_(u"浏览设备监控"),
#_(u"浏览监控全部"),
#_(u"浏览报警事件"),
#_(u"浏览全部门禁事件"),
#_(u"浏览门禁异常事件"),
#_(u"浏览人员门禁权限"),
#_(u"浏览考勤节假日"),
#_(u"浏览补签卡"),
#_(u"浏览请假"),
#_(u"浏览轮班表"),
#_(u"浏览角色"),
#_(u"浏览权限"),
#_(u"浏览用户"),
#_(u"浏览日志记录"),
#_(u"浏览个性设置"),
#_(u"浏览系统参数"),
#_(u"浏览基础代码表"),
#_(u"浏览互锁设置"),
#_(u"浏览反潜设置"),
#_(u"浏览多卡开门人员组"),
#_(u"浏览多卡开门组"),
#_(u"浏览多卡开门设置"),
#_(u"浏览实时监控记录"),
#_(u"浏览门禁时间段"),
#_(u"浏览门禁节假日"),
#_(u"浏览门"),
#_(u"浏览门禁权限组"),
#_(u"浏览门禁设备扩展参数"),
#_(u"浏览韦根卡格式"),
#_(u"浏览首卡常开设置"),
#_(u"浏览人员指纹"),
#_(u"浏览原始记录表"),
#_(u"浏览设备"),
#_(u"浏览服务器下发命令"),
#_(u"浏览操作设备日志"),
#_(u"浏览设备通讯日志"),
#_(u"浏览人事报表"),
#_(u"浏览人员发卡"),
#_(u"浏览人员离职"),
#_(u"浏览人员调动"),
#_(u"浏览区域设置"),
#_(u"浏览卡类型"),
#_(u"浏览卡类型"),
#_(u"浏览考勤报表"),
#_(u"浏览部门"),
#_(u"浏览公告发布"),
#_(u"浏览系统提醒设置"),
#_(u"浏览用户消息确认"),
#_(u"查看设备操作日志"),
#_(u"清除临时排班记录"),
#_(u"新增临时排班"),
#_(u"清除排班记录"),
#_(u"新增请假"),
#_(u"员工卡片清单"),
#_(u"数据初始化"),
#_(u"批量发卡"),
#_(u"挂失卡"),
#_(u"解挂卡"),
#_(u"关闭门禁"),
#_(u"关闭考勤"),
#_(u"离职恢复"),
#_(u"为区域添加用户"),
#_(u"编辑多卡开门"),
#_(u"清除数据"),
#_(u"重新上传日志"),
#_(u"设置自动关机"),
#_(u"重新启动设备"),
#_(u"更新设备信息"),
#_(u"清除图片"),
#_(u"恢复使用"),
#_(u"病假"),
#_(u"事假"),
#_(u"产假"),
#_(u"探亲假"),
#_(u"年假"),
#_(u"应到/实到"),
#_(u"迟到"),
#_(u"早退"),
#_(u"请假"),
#_(u"旷工"),
#_(u"加班"),
#_(u"未签到"),
#_(u"未签退"),
#_(u"浏览"),
#]
|
[
"xiongjianhong@a5b1b082-b159-ab12-9a9e-d79e7dfc8adf"
] |
xiongjianhong@a5b1b082-b159-ab12-9a9e-d79e7dfc8adf
|
81b17f81b23475bf220d8fcfc6892e06a49c3461
|
0d87906ca32b68965c3aa5b4cb829383276b13c8
|
/veriloggen/dataflow/dataflow.py
|
e2fd12adb393621889cb4725977bbbee6158d103
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
tanbour/veriloggen
|
301beea3d9419c2d63d1d1159a2ec52ed316ef20
|
858fbc872be78964cfc7e5a23e1491b2c3d5cf52
|
refs/heads/master
| 2020-03-18T20:38:24.653119
| 2018-05-19T04:49:01
| 2018-05-19T04:49:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,524
|
py
|
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import copy
import collections
import functools
import veriloggen.core.vtypes as vtypes
from veriloggen.core.module import Module
from veriloggen.seq.seq import Seq
from . import visitor
from . import dtypes
from . import mul
from . import scheduler
from . import allocator
from . import graph
# ID counter for 'Dataflow'
_dataflow_counter = 0
def reset():
global _dataflow_counter
_dataflow_counter = 0
dtypes._object_counter = 0
mul.reset()
def DataflowManager(module, clock, reset,
aswire=True, no_hook=False):
return Dataflow(module=module, clock=clock, reset=reset,
aswire=aswire, no_hook=no_hook)
class Dataflow(object):
def __init__(self, *nodes, **opts):
# ID for manager reuse and merge
global _dataflow_counter
self.object_id = _dataflow_counter
_dataflow_counter += 1
self.nodes = set(nodes)
self.max_stage = 0
self.last_input = None
self.last_output = None
self.module = opts['module'] if 'module' in opts else None
self.clock = opts['clock'] if 'clock' in opts else None
self.reset = opts['reset'] if 'reset' in opts else None
self.aswire = opts['aswire'] if 'aswire' in opts else True
self.seq = None
if (self.module is not None and
self.clock is not None and self.reset is not None):
no_hook = opts['no_hook'] if 'no_hook' in opts else False
if not no_hook:
self.module.add_hook(self.implement)
seq_name = (opts['seq_name'] if 'seq_name' in opts else
'_dataflow_seq_%d' % self.object_id)
self.seq = Seq(self.module, seq_name, self.clock, self.reset)
#-------------------------------------------------------------------------
def add(self, *nodes):
self.nodes.update(set(nodes))
#-------------------------------------------------------------------------
def to_module(self, name, clock='CLK', reset='RST', aswire=False, seq_name=None):
""" generate a Module definion """
m = Module(name)
clk = m.Input(clock)
rst = m.Input(reset)
m = self.implement(m, clk, rst, aswire=aswire, seq_name=seq_name)
return m
#-------------------------------------------------------------------------
def implement(self, m=None, clock=None, reset=None, aswire=None, seq_name=None):
""" implemente actual registers and operations in Verilog """
if m is None:
m = self.module
if clock is None:
clock = self.clock
if reset is None:
reset = self.reset
if self.seq is None or self.seq.done:
if seq_name is None:
seq_name = '_dataflow_seq_%d' % self.object_id
seq = Seq(m, seq_name, clock, reset)
else:
seq = self.seq
if aswire is None:
aswire = self.aswire
# for mult and div
m._clock = clock
m._reset = reset
dataflow_nodes = self.nodes
input_visitor = visitor.InputVisitor()
input_vars = set()
for node in sorted(dataflow_nodes, key=lambda x: x.object_id):
input_vars.update(input_visitor.visit(node))
output_visitor = visitor.OutputVisitor()
output_vars = set()
for node in sorted(dataflow_nodes, key=lambda x: x.object_id):
output_vars.update(output_visitor.visit(node))
# add input ports
for input_var in sorted(input_vars, key=lambda x: x.object_id):
input_var._implement_input(m, seq, aswire)
# schedule
sched = scheduler.ASAPScheduler()
sched.schedule(output_vars)
# balance output stage depth
max_stage = 0
for output_var in sorted(output_vars, key=lambda x: x.object_id):
max_stage = dtypes._max(max_stage, output_var.end_stage)
self.max_stage = max_stage
output_vars = sched.balance_output(output_vars, max_stage)
# get all vars
all_visitor = visitor.AllVisitor()
all_vars = set()
for output_var in sorted(output_vars, key=lambda x: x.object_id):
all_vars.update(all_visitor.visit(output_var))
# allocate (implement signals)
alloc = allocator.Allocator()
alloc.allocate(m, seq, all_vars)
# set default module information
for var in sorted(all_vars, key=lambda x: x.object_id):
var._set_module(m)
var._set_df(self)
if var.seq is not None:
seq.update(var.seq)
var._set_seq(seq)
# add output ports
for output_var in sorted(output_vars, key=lambda x: x.object_id):
output_var._implement_output(m, seq, aswire)
# save schedule result
self.last_input = input_vars
self.last_output = output_vars
return m
#-------------------------------------------------------------------------
def draw_graph(self, filename='out.png', prog='dot', rankdir='LR', approx=False):
if self.last_output is None:
self.to_module()
graph.draw_graph(self.last_output, filename=filename, prog=prog,
rankdir=rankdir, approx=approx)
def enable_draw_graph(self, filename='out.png', prog='dot', rankdir='LR', approx=False):
self.module.add_hook(self.draw_graph,
kwargs={'filename': filename, 'prog': prog,
'rankdir': rankdir, 'approx': approx})
#-------------------------------------------------------------------------
def get_input(self):
if self.last_input is None:
return collections.OrderedDict()
ret = collections.OrderedDict()
for input_var in sorted(self.last_input, key=lambda x: x.object_id):
key = str(input_var.input_data)
value = input_var
ret[key] = value
return ret
def get_output(self):
if self.last_output is None:
return collections.OrderedDict()
ret = collections.OrderedDict()
for output_var in sorted(self.last_output, key=lambda x: x.object_id):
key = str(output_var.output_data)
value = output_var
ret[key] = value
return ret
#-------------------------------------------------------------------------
def pipeline_depth(self):
return self.max_stage
#-------------------------------------------------------------------------
def __getattr__(self, attr):
try:
return object.__getattr__(self, attr)
except AttributeError as e:
if attr.startswith('__') or attr not in dir(dtypes):
raise e
func = getattr(dtypes, attr)
@functools.wraps(func)
def wrapper(*args, **kwargs):
v = func(*args, **kwargs)
if isinstance(v, (tuple, list)):
for item in v:
self._set_info(item)
else:
self._set_info(v)
return v
return wrapper
def _set_info(self, v):
if isinstance(v, dtypes._Numeric):
v._set_module(self.module)
v._set_df(self)
v._set_seq(self.seq)
|
[
"shta.ky1018@gmail.com"
] |
shta.ky1018@gmail.com
|
38217eb5a0ddd6cc4e151e059afd154bbdce7ed8
|
2946cc54680c68eb32d4c1c514f5771979a1b25b
|
/zadachapop.py
|
f05ca14a666d95f64f6754761e823951d3f0c11b
|
[
"MIT"
] |
permissive
|
Aijana-github/Aijana_new_github
|
f00bcb18fb959b017cc6e8c96c4427fdf2e3dc2d
|
d7eb06b7c5374d2a59484fe9e78f60eb2645ab7e
|
refs/heads/main
| 2023-01-12T06:16:31.175892
| 2020-11-09T03:31:06
| 2020-11-09T03:31:06
| 306,527,823
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 529
|
py
|
list1 = [1,2,3,4,5,6]
def change_list(mode,number):
if mode == 'add':
list1.append(number)
elif mode == 'pop' and number in list1:
list1.pop(number)
elif mode == 'remove' and number in list1:
list1.remove(number)
elif mode == 'pop':
if number >= len(list1):
print('Вы ввели сликом большое число')
else:
print('Вы ввели неверный мод!')
change_list('add',7)
change_list('pop',5)
change_list('remove',7)
print(list1)
|
[
"ayzhana.abdrahmanova@mail.ru"
] |
ayzhana.abdrahmanova@mail.ru
|
5eb8452bc0e7c1854864fd2798cebc3c12178e0d
|
959617c05cd561b14b647bf6a55f5ab2bfda3d9a
|
/pid/test-pid.py
|
019cb0db00470e570f33f8aea031a1647022b252
|
[] |
no_license
|
HeBeiLiangJianFei/selenium-geetest--
|
c5fd7555b10da98e7e67a41e76ff419cb81a224b
|
76b9904d1133c98f5ee5caefca1233b4e55d8753
|
refs/heads/master
| 2020-04-27T04:09:22.723949
| 2020-03-14T03:40:29
| 2020-03-14T03:40:29
| 174,045,367
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,494
|
py
|
import time
class PID:
def __init__(self, P=0.2, I=0.0, D=0.0):
self.Kp = P
self.Ki = I
self.Kd = D
self.sample_time = 0.00
self.current_time = time.time()
self.last_time = self.current_time
self.clear()
def clear(self):
self.SetPoint = 0.0
self.PTerm = 0.0
self.ITerm = 0.0
self.DTerm = 0.0
self.last_error = 0.0
self.int_error = 0.0
self.windup_guard = 20.0
self.output = 0.0
def update(self, feedback_value):
error = self.SetPoint - feedback_value
self.current_time = time.time()
delta_time = self.current_time - self.last_time
delta_error = error - self.last_error
if (delta_time >= self.sample_time):
self.PTerm = self.Kp * error#比例
self.ITerm += error * delta_time#积分
if (self.ITerm < -self.windup_guard):
self.ITerm = -self.windup_guard
elif (self.ITerm > self.windup_guard):
self.ITerm = self.windup_guard
self.DTerm = 0.0
if delta_time > 0:
self.DTerm = delta_error / delta_time
self.last_time = self.current_time
self.last_error = error
self.output = self.PTerm + (self.Ki * self.ITerm) + (self.Kd * self.DTerm)
def setKp(self, proportional_gain):
self.Kp = proportional_gain
def setKi(self, integral_gain):
self.Ki = integral_gain
def setKd(self, derivative_gain):
self.Kd = derivative_gain
def setWindup(self, windup):
self.windup_guard = windup
def setSampleTime(self, sample_time):
self.sample_time = sample_time
import time
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
import numpy as np
from scipy.interpolate import spline
#这个程序的实质就是在前九秒保持零输出,在后面的操作中在传递函数为某某的系统中输出1
def test_pid(P = 0.2, I = 0.0, D= 0.0, L=100):
"""Self-test PID class
.. note::
...
for i in range(1, END):
pid.update(feedback)
output = pid.output
if pid.SetPoint > 0:
feedback += (output - (1/i))
if i>9:
pid.SetPoint = 1
time.sleep(0.02)
---
"""
pid = PID(P, I, D)
pid.SetPoint=0.0
pid.setSampleTime(0.01)
END = L
feedback = 0
feedback_list = []
time_list = []
setpoint_list = []
for i in range(1, END):
pid.update(feedback)
output = pid.output
if pid.SetPoint > 0:
feedback +=output# (output - (1/i))控制系统的函数
if i>9:
pid.SetPoint = 1
time.sleep(0.01)
feedback_list.append(feedback)
setpoint_list.append(pid.SetPoint)
time_list.append(i)
time_sm = np.array(time_list)
time_smooth = np.linspace(time_sm.min(), time_sm.max(), 300)
feedback_smooth = spline(time_list, feedback_list, time_smooth)
plt.figure(0)
plt.plot(time_smooth, feedback_smooth)
plt.plot(time_list, setpoint_list)
plt.xlim((0, L))
plt.ylim((min(feedback_list)-0.5, max(feedback_list)+0.5))
plt.xlabel('time (s)')
plt.ylabel('PID (PV)')
plt.title('TEST PID')
plt.ylim((1-0.5, 1+0.5))
plt.grid(True)
plt.show()
# if __name__ == "__main__":
# test_pid(1.2, 1, 0.001, L=80)
# test_pid(0.8, L=50)
|
[
"m13833170342@163.com"
] |
m13833170342@163.com
|
40468fcbfed1e9ee8f9ffa01d3ec8979ac0b5493
|
575ab9f0027d82a26041f37a443cda16cf010379
|
/DeepLearning/BILSTM-CRF/Model.py
|
c784d28dd143dd8d704b221371c5da68a36290c4
|
[] |
no_license
|
huning2009/MachineLearning
|
ca665d63c6924d6229bcdea09d0e9fe715d2d1c8
|
016b98eae7e31aab4e2ca5a14e86150f31e97bba
|
refs/heads/master
| 2022-04-08T06:01:05.370260
| 2020-03-12T04:24:16
| 2020-03-12T04:24:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,612
|
py
|
#-*- coding:utf-8 _*-
"""
@version:
@author: CharlesXu
@license: Q_S_Y_Q
@file: Model.py
@time: 2018/4/3 17:50
@desc: BiLSTM + CRF 模型层
"""
import numpy as np
import os, time, sys
import tensorflow as tf
from tensorflow.contrib.rnn import LSTMCell
from tensorflow.contrib.crf import crf_log_likelihood
from tensorflow.contrib.crf import viterbi_decode
from DataPro import pad_sequences, batch_yield
from Utils import get_logger
from Eval import conlleval
class BiLSTM_CRF(object):
def __init__(self, args, embeddings, tag2label, vocab, paths, config):
self.batch_size = args.batch_size
self.epoch_num = args.epoch
self.hidden_dim = args.hidden_dim
self.embeddings = embeddings
self.CRF = args.CRF
self.update_embedding = args.update_embedding
self.dropout_keep_prob = args.dropout
self.optimizer = args.optimizer
self.lr = args.lr
self.clip_grad = args.clip
self.tag2label = tag2label
self.num_tags = len(tag2label)
self.vocab = vocab
self.shuffle = args.shuffle
self.model_path = paths['model_path']
self.summary_path = paths['summary_path']
self.logger = get_logger(paths['log_path'])
self.result_path = paths['result_path']
self.config = config
def build_graph(self):
self.add_placeholders()
self.lookup_layer_op()
self.biLSTM_layer_op()
self.softmax_pred_op()
self.loss_op()
self.trainstep_op()
self.init_op()
# 各层网络实现
def add_placeholders(self):
self.word_ids = tf.placeholder(tf.int32, shape=[None, None], name="word_ids")
self.labels = tf.placeholder(tf.int32, shape=[None, None], name="labels")
self.sequence_lengths = tf.placeholder(tf.int32, shape=[None], name="sequence_lengths")
# drop_out的作用:随机丢弃一些参数
self.dropout_pl = tf.placeholder(dtype=tf.float32, shape=[], name="dropout")
# lr 的作用
self.lr_pl = tf.placeholder(dtype=tf.float32, shape=[], name="lr")
# 仔细理解
def lookup_layer_op(self):
with tf.variable_scope("words"):
# embedding 的作用
_word_embeddings = tf.Variable(self.embeddings, dtype=tf.float32, trainable=self.update_embedding,
name="_word_embeddings")
# embedding_lookup 根据id寻找embedding中对应的元素
word_embeddings = tf.nn.embedding_lookup(params=_word_embeddings, ids=self.word_ids,
name="word_embeddings")
self.word_embeddings = tf.nn.dropout(word_embeddings, self.dropout_pl)
def biLSTM_layer_op(self):
with tf.variable_scope("bi-lstm"):
cell_fw = LSTMCell(self.hidden_dim)
cell_bw = LSTMCell(self.hidden_dim)
(output_fw_seq, output_bw_seq), _ = tf.nn.bidirectional_dynamic_rnn(
cell_fw = cell_fw,
cell_bw = cell_bw,
inputs = self.word_embeddings,
sequence_length = self.sequence_lengths,
dtype = tf.float32
)
output = tf.concat([output_fw_seq, output_bw_seq], axis=-1)
output = tf.nn.dropout(output, self.dropout_pl)
with tf.variable_scope("proj"):
W = tf.get_variable(name='W',
shape=[2 * self.hidden_dim, self.num_tags],
initializer=tf.contrib.layers.xavier_initializer(),
dtype=tf.float32)
b = tf.get_variable(name='b',
shape=[self.num_tags],
initializer=tf.zeros_initializer(), # 对比tf.ones_initializer()
dtype=tf.float32)
s = tf.shape(output)
output = tf.reshape(output, [-1, 2*self.hidden_dim])
pred = tf.matmul(output, W) + b
self.logits = tf.reshape(pred, [-1, s[1], self.num_tags])
def loss_op(self):
'''
crf_log_likelihood : 如果你要预测的是一个序列,可以用这个做损失函数
:return:
'''
if self.CRF:
log_likelihood, self.transition_params = crf_log_likelihood(
inputs=self.logits, # unary potentials,也就是每个标签的预测概率值,这个值根据实际情况选择计算方法,CNN,RNN...都可以
tag_indices=self.labels, #这个就是真实的标签序列了
sequence_lengths=self.sequence_lengths, # 序列的长度
# transition_params,转移概率,可以没有,没有的话这个函数也会算出来
)
self.loss = -tf.reduce_mean(log_likelihood)
else:
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits, labels=self.labels) # 交叉熵函数
# 张量变换函数,返回一个表示每个单元的前N个位置的mask张量
# tf.sequence_mask([1, 3, 2], 5) # [[True, False, False, False, False],
# [True, True, True, False, False],
# [True, True, False, False, False]]
# tf.sequence_mask([[1, 3], [2, 0]]) # [[[True, False, False],
# [True, True, True]],
# [[True, True, False],
# [False, False, False]]]
mask = tf.sequence_mask(self.sequence_lengths)
#tf.boolean_mask(a,b) 将使a (m维)矩阵仅保留与b中“True”元素同下标的部分,并将结果展开到m-1维。
losses = tf.boolean_mask(losses, mask)
self.loss = tf.reduce_mean(losses)
# 对标量数据汇总和记录
tf.summary.scalar("loss", self.loss)
def softmax_pred_op(self):
if not self.CRF:
self.labels_softmax_ = tf.argmax(self.logits, axis=-1)
self.labels_softmax_ = tf.cast(self.labels_softmax_, tf.int32)
def trainstep_op(self):
with tf.variable_scope("train_step"):
self.global_step = tf.Variable(0, name="global_step", trainable=False)
if self.optimizer == 'Adam':
optim = tf.train.AdamOptimizer(learning_rate=self.lr_pl)
elif self.optimizer == 'Adadelta':
optim = tf.train.AdadeltaOptimizer(learning_rate=self.lr_pl)
elif self.optimizer == 'Adagrad':
optim = tf.train.AdagradOptimizer(learning_rate=self.lr_pl)
elif self.optimizer == 'RMSProp':
optim = tf.train.RMSPropOptimizer(learning_rate=self.lr_pl)
elif self.optimizer == 'Momentum':
optim = tf.train.MomentumOptimizer(learning_rate=self.lr_pl, momentum=0.9)
elif self.optimizer == 'SGD':
optim = tf.train.GradientDescentOptimizer(learning_rate=self.lr_pl)
else:
optim = tf.train.GradientDescentOptimizer(learning_rate=self.lr_pl)
grands_and_vars = optim.compute_gradients(self.loss)
grands_and_vars_clip = [[tf.clip_by_value(g, -self.clip_grad, self.clip_grad), v] for g, v in grands_and_vars]
self.train_op = optim.apply_gradients(grands_and_vars_clip, global_step=self.global_step)
def init_op(self):
self.init_op = tf.global_variables_initializer()
def add_summary(self, sess):
'''
:param sess:
:return:
'''
self.merged = tf.summary.merge_all()
self.file_writer = tf.summary.FileWriter(self.summary_path, sess.graph)
def train(self, train, dev):
'''
:param train:
:param dev:
:return:
'''
saver = tf.train.Saver(tf.global_variables())
with tf.Session(config=self.config) as sess:
sess.run(self.init_op())
self.add_summary(sess)
for epoch in range(self.epoch_num):
self.run_one_epoch(sess, train, dev, self.tag2label, epoch, saver)
def test(self, test):
saver = tf.train.Saver()
with tf.Session(config=self.config) as sess:
self.logger.info('=========== testing ===========')
saver.restore(sess, self.model_path)
label_list, seq_len_list = self.dev_one_epoch(sess, test)
self.evaluate(label_list, seq_len_list, test)
def demo_one(self, sess, sent):
"""
:param sess:
:param sent:
:return:
"""
label_list = []
for seqs, labels in batch_yield(sent, self.batch_size, self.vocab, self.tag2label, shuffle=False):
label_list_, _ = self.predict_one_batch(sess, seqs)
label_list.extend(label_list_)
label2tag = {}
for tag, label in self.tag2label.items():
label2tag[label] = tag if label != 0 else label
tag = [label2tag[label] for label in label_list[0]]
return tag
def run_one_epoch(self, sess, train, dev, tag2label, epoch, saver):
'''
:param sess:
:param train:
:param dev:
:param tag2label:
:param epoch:
:param saver:
:return:
'''
num_batches = (len(train) + self.batch_size - 1) // self.batch_size
start_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
batches = batch_yield(train, self.batch_size, self.vocab, self.tag2label, shuffle=self.shuffle)
for step, (seqs, labels) in enumerate(batches):
sys.stdout.write(' processing: {} batch / {} batches.'.format(step + 1, num_batches) + '\r')
step_num = epoch * num_batches + step + 1
feed_dict, _ = self.get_feed_dict(seqs, labels, self.lr, self.dropout_keep_prob)
_, loss_train, summary, step_num = sess.run([self.train_op, self.loss, self.merged, self.global_step], feed_dict=feed_dict)
if step + 1 == 1 or (step + 1) % 300 == 0 or step + 1 == num_batches:
self.logger.info(
'{} epoch {}, step {}, loss: {:.4}, global_step: {}'.format(start_time, epoch + 1, step + 1,
loss_train, step_num))
self.file_writer.add_summary(summary, step_num)
if step + 1 == num_batches:
saver.save(sess, self.model_path, global_step=step_num)
self.logger.info('===========validation / test===========')
label_list_dev, seq_len_list_dev = self.dev_one_epoch(sess, dev)
self.evaluate(label_list_dev, seq_len_list_dev, dev, epoch)
def dev_one_epoch(self, sess, dev):
'''
:param sess:
:param dev:
:return:
'''
label_list, seq_len_list = [], []
for seqs, labels in batch_yield(dev, self.batch_size, self.vocab, self.tag2label, shuffle=False):
label_list_, seq_len_list_ = self.predict_one_batch(sess, seqs)
label_list.extend(label_list_)
seq_len_list.extend(seq_len_list_)
return label_list, seq_len_list
def predict_one_batch(self, sess, seqs):
'''
:param sess:
:param seqs:
:return:
'''
feed_dict, seq_len_list = self.get_feed_dict(seqs, dropout=1.0)
if self.CRF:
logits, transition_params = sess.run([self.logits, self.transition_params], feed_dict=feed_dict)
label_list = []
for logit, seq_len in zip(logits, seq_len_list):
viterbi_seq, _ = viterbi_decode(logit[:seq_len], transition_params)
label_list.append(viterbi_seq)
return label_list, seq_len_list
def get_feed_dict(self, seqs, labels=None, lr=None, dropout=None):
'''
:param seqs:
:param labels:
:param lr:
:param dropout_keep_prob:
:return:
'''
word_ids, seq_len_list = pad_sequences(seqs, pad_mark=0)
feed_dict = {self.word_ids:word_ids, self.sequence_lengths:seq_len_list}
if labels is not None:
labels_, _ = pad_sequences(labels, pad_mark=0)
feed_dict[self.labels] = labels_
if lr is not None:
feed_dict[self.lr_pl] = lr
if dropout is not None:
feed_dict[self.dropout_pl] = dropout
return feed_dict, seq_len_list
def evaluate(self, label_list, seq_len_list, data, epoch=None):
'''
模型评估
:param label_list:
:param seq_len_list:
:param data:
:param epoch:
:return:
'''
label2tag = {}
for tag, label in self.tag2label.items():
label2tag[label] = tag if label != 0 else label
model_predict = []
for label_, (sent, tag) in zip(label_list, data):
tag_ = [label2tag[label__] for label__ in label_]
sent_res = []
if len(label_) != len(sent):
print(sent)
print(len(label_))
print(tag)
for i in range(len(sent)):
sent_res.append([sent[i], tag[i], tag_[i]])
model_predict.append(sent_res)
epoch_num = str(epoch + 1) if epoch != None else 'test'
label_path = os.path.join(self.result_path, 'label_' + epoch_num)
metric_path = os.path.join(self.result_path, 'result_metric_' + epoch_num)
for _ in conlleval(model_predict, label_path, metric_path):
self.logger.info(_)
|
[
"charlesxu86@163.com"
] |
charlesxu86@163.com
|
b10c2f615a276a88bad43d9a2a4de0fd2cff1552
|
2386f273ee2be3a1b0fe7957f372dfc5eb98f53c
|
/w13/class_to_serialize.py
|
e86048952cb5ee1e2b6425ef37977bb5f0274ac2
|
[] |
no_license
|
MekanYaldyrov/python_course
|
7f84fd92d51903bd6bd4725b764247ad0f2be631
|
6150da69316a4229eae6c112ea1cbc3f592b275b
|
refs/heads/master
| 2023-02-10T12:19:49.690417
| 2021-01-09T12:32:09
| 2021-01-09T12:32:09
| 324,836,564
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 370
|
py
|
class MyClass:
def __init__(self, name, surname, is_hired):
self.name = name
self.surname = surname
self.is_hired = is_hired
def obj_to_json(my_class_instance):
return json.dumps(my_class_instance.__dict__)
def json_to_obj(my_class_instance):
obj_dict = json.loads(my_class_instance)
obj = MyClass(**obj_dict)
return obj
|
[
"Mekan.yaldyrov@phystech.edu"
] |
Mekan.yaldyrov@phystech.edu
|
005fddaf792fc04b309616c6034b81aacf2e8156
|
1bf997c06373f7a25a961fb0eccd27347f17da04
|
/Test/Copy_playlist2folder.py
|
8e8b3a1ced5d987dd41fd30a846b018dadb0774f
|
[] |
no_license
|
sbuvanesh/P_MusicManagement
|
3f7a4d175fd9646a91da701f28d58312fe2c7a32
|
919f88f8845609a798bf2bea1e76d1951b26a97a
|
refs/heads/master
| 2021-01-10T04:02:58.371378
| 2015-12-21T20:05:22
| 2015-12-21T20:05:22
| 48,390,614
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 900
|
py
|
import shutil, os, sys
if __name__ == '__main__':
if len(sys.argv) == 3:
plylist_path = sys.argv[1]
dst_path = sys.argv[2]
os.chdir(dst_path)
plylist_dst_dir = plylist_path.split('/')[-1].replace('.m3u','')
if not os.path.exists(plylist_dst_dir):
os.mkdir(plylist_dst_dir)
try:
plylist = open(plylist_path,'r')
for line in plylist.readlines():
if line[0:4]!='#EXT':
song=line.replace('\n','')
try:
shutil.copy(song,plylist_dst_dir)
except IOError as e:
print('Cannot copy file: '+song)
continue
print('Completed')
except Exception as e:
print(e)
else:
print('Script usage: <script> <playlist_path> <destination_path>')
|
[
"sbuvanesh@gmail.com"
] |
sbuvanesh@gmail.com
|
b4ccb518eb8bf230a4b567858d689fcbd4e4e99e
|
3491f64abc9260775cb6f7118135af823a98d0cd
|
/server/apps/user/models.py
|
e3ebfcd7ab6690cf7b21775091dc2344365d3f25
|
[] |
no_license
|
Moonsteel22/shop_backend
|
f658eeba7a77cf677c1dec0b0974cac892e28de3
|
05cfec89bbe8e4c424f85f4b0545d5557fa41f5a
|
refs/heads/master
| 2023-07-21T23:05:03.549149
| 2021-08-27T11:20:21
| 2021-08-27T11:20:21
| 397,538,229
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 375
|
py
|
from django.db import models
from django.contrib.auth.models import User as IUser
class User(IUser):
phone = models.CharField(max_length=11, blank=True)
age = models.PositiveIntegerField(default=0)
img_url = models.CharField(max_length=100, blank=True)
role = models.CharField(max_length=2, default='U', blank=True)
class Meta:
managed = True
|
[
"aibulat.ryic@gmail.com"
] |
aibulat.ryic@gmail.com
|
6ddc57efa21919842854084e39f17c690a578d3b
|
1822440a663916fc4b38ba09edb91f7b0d313c6f
|
/tests/test_css.py
|
f66ca0bfaf0a349695078930bf1125cb6ca1d9d2
|
[
"BSD-2-Clause"
] |
permissive
|
herbstluftwm/herbstluftwm
|
09fb4318ed1a00bf96335a93cb7d3ebba58b3f2d
|
d6be49251ccaf0fed6edc30ee80017d40b5eeccf
|
refs/heads/master
| 2023-08-11T00:40:40.568941
| 2023-06-06T09:51:17
| 2023-06-06T09:51:17
| 24,884,178
| 1,167
| 131
|
NOASSERTION
| 2023-08-30T13:00:19
| 2014-10-07T09:27:20
|
C++
|
UTF-8
|
Python
| false
| false
| 14,792
|
py
|
import textwrap
import re
def test_basic_css_normalize(hlwm):
input2normalize = {
"// comment\n foo .p, bar.c,* +c { border-width: 4px 2px; }": """\
foo .p ,
bar.c ,
* + c {
border-width: 4px 2px;
}
""",
"x { /*com\nment*/ border-width: 1px 1px 1px 1px}": """\
x {
border-width: 1px 1px 1px 1px;
}
""",
"a.b { border-width: 2px } c.d e{border-width: 5px}": """\
a.b {
border-width: 2px;
}
c.d e {
border-width: 5px;
}
""",
"* { }": """\
* {
}
""",
".cl { border-width: 1px; border-width: 1px; border-width: 1px}":
"""\
.cl {
border-width: 1px;
border-width: 1px;
border-width: 1px;
}
""",
".cl { border-color: #9fbc00; background-color: #1234ab; }":
"""\
.cl {
border-color: #9fbc00;
background-color: #1234ab;
}
""",
"foo.class1 .class2 {border-left-width:4px; border-right-width : 2px ; } //": """\
foo.class1 .class2 {
border-left-width: 4px;
border-right-width: 2px;
}
""",
"// foo": "",
"// foo\n": "",
"/* end */": "",
}
for source, normalized in input2normalize.items():
output = hlwm.call(['debug_css', '--print-css', '--stylesheet=' + source]).stdout
assert output == textwrap.dedent(normalized)
# check that pretty printing is idempotent:
assert hlwm.call(['debug_css', '--print-css', '--stylesheet=' + output]).stdout == output
def test_basic_css_parse_error(hlwm):
input2error = {
'{ border-width: 2px; }': "need at least one selector",
', { border-width: 2px; }': "selector must not be empty",
'p { border-width: 2px;': "Expected }",
'p } border-width: 2px;': "Expected { but got \"}",
'/* unmatched': r'Expected \*/ but got EOF',
'/* unmatched\n': r'Expected \*/ but got EOF',
'/*\n': r'Expected \*/ but got EOF',
'* // { }': "but got EOF",
'* { // }': "Expected } but got EOF",
}
for source, error in input2error.items():
assert hlwm.call_xfail(['debug_css', '--print-css', '--stylesheet=' + source]) \
.expect_stderr(error)
def test_basic_dummy_tree(hlwm):
"""testing the interface used for testing...."""
input2normalize = {
'()': '()',
'(win (c focused (e f)))': '(win\n (c focused\n (e f)))',
'(win (c (e f)focused))': '(win\n (c focused\n (e f)))',
'((c d (e f)))': '(\n (c d\n (e f)))',
'((() (e f)))': '(\n (\n ()\n (e f)))',
}
for source, normalized in input2normalize.items():
output = hlwm.call(['debug_css', '--print-tree', '--tree=' + source]).stdout
assert output == normalized + '\n'
# check that pretty printing is idempotent:
assert hlwm.call(['debug_css', '--print-tree', '--tree=' + output]).stdout == output
input2error = {
'(': r'Expected \) but got EOF',
'( (a ())))': r'Expected EOF but got.*\)',
'() ()': r'Expected EOF but got.*\(',
}
for source, error in input2error.items():
hlwm.call_xfail(['debug_css', '--print-tree', '--tree=' + source]) \
.expect_stderr(error)
def test_css_property_parsing(hlwm):
input2error = {
'* { something-wrong: 2px; }': 'No such property "something-wrong"',
'* { border-width: 1sdfpx; }': "unparsable suffix",
'* { border-width: 1; }': "must be of the format",
'* { border-width: 1px 2px 3px 4px 5px; }': '"border-width" does not accept 5',
'* { border-style: invalidstyle; }': 'Expected \"solid\"',
}
for source, error in input2error.items():
assert hlwm.call_xfail(['debug_css', '--print-css', '--stylesheet=' + source]) \
.expect_stderr(error)
def test_css_basic_selectors(hlwm):
tree = '(client-decoration (focus (tab urgent)) (normal))'
selector2match = {
'.client-decoration': [''],
'client-decoration': [], # the . is missing
'#client-decoration': [], # wrong access
'.client-decoration>.focus': ['0'],
'.client-decoration > .focus': ['0'],
'.client-decoration > .focus': ['0'],
'.client-decoration >.focus': ['0'],
'.client-decoration * > .focus': [],
'.client-decoration > .focus + *': ['1'],
':first-child': ['0', '0 0'],
':last-child': ['1', '0 0'],
'.focus :last-child': ['0 0'],
'.focus:last-child': [],
'.focus:first-child': ['0'],
'* + .normal': ['1'],
'*': ['', '0', '0 0', '1'],
'* *': ['0', '0 0', '1'],
}
for selector, expected in selector2match.items():
cmd = [
'debug_css', '--tree=' + tree,
'--query-tree-indices=' + selector
]
output = hlwm.call(cmd).stdout.splitlines()
assert sorted(output) == ['match: ' + x for x in sorted(expected)]
def test_css_custom_name(hlwm):
tree = """
(client-decoration
(something with index0)
(another with custom))
"""
selector2match = {
'.something': ['0'],
'.with': ['0', '1'],
'.custom': ['1'],
}
for selector, expected in selector2match.items():
cmd = [
'debug_css', '--tree=' + tree,
'--query-tree-indices=' + selector
]
output = hlwm.call(cmd).stdout.splitlines()
assert sorted(output) == ['match: ' + x for x in sorted(expected)]
def test_css_sibling_cominbators(hlwm):
tree = """
(client-decoration
(something with index0)
(tabbar tab index1
(tab)
(tab focus)
(tab urgent))
(tab anything))
"""
selector2match = {
'.tab + .tab': ['1 1', '1 2', '2'],
'* + .tab + .tab': ['1 2', '2'],
'.tab + .tab + .tab': ['1 2'],
'.tab .tab + .tab': ['1 1', '1 2'],
'* + .tab': ['1 1', '1 2', '2', '1'],
'.client-decoration * + .tab': ['1 1', '1 2', '2', '1'],
'.client-decoration > * + .tab': ['1', '2'],
}
for selector, expected in selector2match.items():
cmd = [
'debug_css', '--tree=' + tree,
'--query-tree-indices=' + selector,
]
output = hlwm.call(cmd).stdout.splitlines()
assert sorted(output) == ['match: ' + x for x in sorted(expected)]
def test_css_property_applier(hlwm):
decl2computed = {
'border: 2px solid #9fbc00':
'\n'.join([
f'border-{side}-{prop}: {value};'
for side in ['top', 'left', 'right', 'bottom']
# 'solid' isn't shown because it's the default style
for prop, value in [('width', '2px'), ('color', '#9fbc00')]]),
'border-top: 5px solid #ffddee':
"""\
border-top-width: 5px;
border-top-color: #ffddee;
""",
'border-left: 5px solid #ffddee':
"""\
border-left-width: 5px;
border-left-color: #ffddee;
""",
'border-right: 5px solid #ffddee':
"""\
border-right-width: 5px;
border-right-color: #ffddee;
""",
'border-bottom: 5px solid #ffddee':
"""\
border-bottom-width: 5px;
border-bottom-color: #ffddee;
""",
'border-width: 4px 2px':
"""\
border-top-width: 4px;
border-bottom-width: 4px;
border-left-width: 2px;
border-right-width: 2px;
""",
'border-width: 2px 3px 4px 5px':
"""\
border-top-width: 2px;
border-right-width: 3px;
border-bottom-width: 4px;
border-left-width: 5px;
""",
'display: flex': '', # flex is the default
'font: initial': '',
'font: sans; font: initial': '', # initial is the default
'font: fixed': """\
font: fixed;
""",
}
simple_props = [
'min-height: 5px',
'min-width: 6px',
'display: none',
'color: #125323',
'text-align: center',
'background-color: #fb4ace',
]
for prop in simple_props:
decl2computed[prop] = prop
for css_decl, computed_style in decl2computed.items():
css_decl = css_decl.rstrip(' \n;') + ';'
css = f"""
.testclass {{
{css_decl}
}}
"""
tree = '((a) (testclass) (b))'
cmd = [
'debug_css', '--tree=' + tree,
'--compute-style=1',
'--stylesheet=' + css
]
def normalize(buf):
return sorted([line.strip().rstrip(';') for line in buf.splitlines()])
expected = normalize(textwrap.dedent(computed_style))
output = normalize(hlwm.call(cmd).stdout)
assert expected == output
def test_css_computed_style(hlwm):
tree = """
(client-decoration
(some-buttons in the future maybe)
(tabbar tab index1
(tab focus)
(tab)
(tab urgent)
(tab))
(some more buttons at the end))
"""
css = """
.tab + .tab {
border-left-width: 1px;
}
.tab.focus {
border-color: #9fbc00;
}
/* this definition is later, but less specific */
.tab {
border-width: 2px 4px 6px 8px;
}
/* and so this is overwritten entirely */
* {
border-width: 77px;
}
.some-buttons.future {
margin-left: 5px;
}
.some-buttons {
margin-left: 3px;
margin-right: 2px;
}
.the.future {
border-width: 0px;
}
"""
index2style = {
'1 0': # the active tab
"""\
border-top-color: #9fbc00;
border-right-color: #9fbc00;
border-bottom-color: #9fbc00;
border-left-color: #9fbc00;
border-top-width: 2px;
border-right-width: 4px;
border-bottom-width: 6px;
border-left-width: 8px;
""",
'1 1': # the tab next to it has a thinner left border
"""\
border-top-width: 2px;
border-right-width: 4px;
border-bottom-width: 6px;
border-left-width: 1px;
""",
'0': # the some-buttons...
"""\
margin-left: 5px;
margin-right: 2px;
"""
}
for tree_index, computed_style in index2style.items():
cmd = [
'debug_css', '--tree=' + tree,
'--compute-style=' + tree_index,
'--stylesheet=' + css
]
expected = sorted(textwrap.dedent(computed_style).strip().splitlines())
output = sorted(hlwm.call(cmd).stdout.splitlines())
assert expected == output
def test_debug_css_errors(hlwm):
"""test that the debug_css command itself does correct
error handling"""
hlwm.call_xfail('debug_css --compute-style=lkj') \
.expect_stderr("stoi")
hlwm.call_xfail('debug_css --compute-style=8') \
.expect_stderr("--compute-style requires a tree")
hlwm.call_xfail('debug_css --tree="()" --compute-style=8') \
.expect_stderr("invalid tree index")
hlwm.call_xfail('debug_css --tree="()" --compute-style="0 8"') \
.expect_stderr("invalid tree index")
def test_css_names_tree_check(hlwm):
names = [f'class{idx}' for idx in range(0, 900)]
tree = '((foo) (' + ' '.join(names) + ') (class4 class8))'
matches_all_classes = ''.join(['.' + name for name in names])
matches_some_class = ', '.join(['.' + name for name in names])
css = f"""
// for elements that match all the classes
{matches_all_classes} {{
border-top-width: 4px;
}}
// for elements that match one of the classes
{matches_some_class} {{
border-left-width: 7px;
}}
"""
def computed_style_of_tree_index(tree_index):
cmd = ['debug_css', f'--tree={tree}', f'--compute-style={tree_index}', f'--stylesheet={css}']
return sorted(hlwm.call(cmd).stdout.splitlines())
assert computed_style_of_tree_index('0') == []
assert computed_style_of_tree_index('1') == [
'border-left-width: 7px;',
'border-top-width: 4px;']
assert computed_style_of_tree_index('2') == [
'border-left-width: 7px;']
def test_theme_name_invalid_path(hlwm):
hlwm.call_xfail('attr theme.name this_does_not_exist') \
.expect_stderr('this_does_not_exist.*No such file')
def test_toplevel_css_classes_toplevel_client(hlwm):
hlwm.open_persistent_pipe()
winid, proc = hlwm.create_client()
class State:
def __init__(self):
self.tab_count = 0
self.client_count = 1
self.other_client = None
self.focused = True
self.floating = False
self.fullscreen = False
state = State()
def step0():
hlwm.call(['set_layout', 'vertical'])
pass
def step1():
hlwm.call(['set_layout', 'max'])
state.tab_count = 1
def step2():
winid, _ = hlwm.create_client()
state.other_client = winid
state.client_count += 1
state.tab_count = 2
def step3():
hlwm.create_client()
state.client_count += 1
state.tab_count = 2
def step4():
hlwm.call(['jumpto', state.other_client])
state.focused = False
def step5():
hlwm.attr.tags.focus.floating = True
state.tab_count = 0 # no tabs in floating mode
state.floating = True
for current_step in [step0, step1, step2, step3, step4, step5]:
current_step()
sep = '($|\\.|\\n)'
regexes = {
r'\.regular' + sep: not state.fullscreen,
r'\.fullscreen' + sep: False,
r'\.no-tabs' + sep: state.tab_count == 0,
r'\.one-tab' + sep: state.tab_count == 1,
r'\.multiple-tabs' + sep: state.tab_count >= 2,
r'\.floating' + sep: state.floating,
r'\.tiling' + sep: not state.floating,
r'\.focus' + sep: state.focused,
}
output = hlwm.call(['debug_css', f'--pretty-client-tree={winid}']).stdout.splitlines()[0]
for r, expected in regexes.items():
assert bool(re.search(r, output)) is expected, \
f'Checking that regex "{r}" is {expected}'
def test_debug_css_basic_flags_dont_crash(hlwm):
hlwm.call_xfail(['debug_css', '--print-tree']) \
.expect_stderr('requires a tree')
assert hlwm.call(['debug_css', '--print-css']).stdout == ''
|
[
"noreply@github.com"
] |
herbstluftwm.noreply@github.com
|
cd8e107457bcea7b89eaafa34671542afd9c7553
|
926b3c52070f6e309567c8598248fd5c57095be9
|
/src/mmgeneration/tests/test_models/test_pix2pix.py
|
f2cb3ee96a56efa0c95c1e827fc3730127774bb0
|
[
"Apache-2.0"
] |
permissive
|
fengbingchun/PyTorch_Test
|
410f7cd2303707b0141d433fb9d144a961e1f4c8
|
df5c2169f0b699bcd6e74adb4cb0e57f7dcd9348
|
refs/heads/master
| 2023-05-23T16:42:29.711338
| 2023-03-25T11:31:43
| 2023-03-25T11:31:43
| 167,339,907
| 15
| 4
| null | 2023-03-25T11:31:45
| 2019-01-24T09:24:59
|
C++
|
UTF-8
|
Python
| false
| false
| 10,226
|
py
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import torch
from mmcv.runner import obj_from_dict
from mmgen.models import GANLoss, L1Loss, build_model
from mmgen.models.architectures.pix2pix import (PatchDiscriminator,
UnetGenerator)
def test_pix2pix():
# model settings
model_cfg = dict(
type='Pix2Pix',
generator=dict(
type='UnetGenerator',
in_channels=3,
out_channels=3,
num_down=8,
base_channels=64,
norm_cfg=dict(type='BN'),
use_dropout=True,
init_cfg=dict(type='normal', gain=0.02)),
discriminator=dict(
type='PatchDiscriminator',
in_channels=6,
base_channels=64,
num_conv=3,
norm_cfg=dict(type='BN'),
init_cfg=dict(type='normal', gain=0.02)),
gan_loss=dict(
type='GANLoss',
gan_type='vanilla',
real_label_val=1.0,
fake_label_val=0.0,
loss_weight=1.0),
default_domain='photo',
reachable_domains=['photo'],
related_domains=['photo', 'mask'],
gen_auxiliary_loss=dict(
type='L1Loss',
loss_weight=100.0,
data_info=dict(pred='fake_photo', target='real_photo'),
reduction='mean'))
train_cfg = None
test_cfg = None
# build synthesizer
synthesizer = build_model(
model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
# test attributes
assert synthesizer.__class__.__name__ == 'Pix2Pix'
assert isinstance(synthesizer.generators['photo'], UnetGenerator)
assert isinstance(synthesizer.discriminators['photo'], PatchDiscriminator)
assert isinstance(synthesizer.gan_loss, GANLoss)
assert isinstance(synthesizer.gen_auxiliary_losses[0], L1Loss)
assert synthesizer.test_cfg is None
# prepare data
img_mask = torch.rand(1, 3, 256, 256)
img_photo = torch.rand(1, 3, 256, 256)
data_batch = {'img_mask': img_mask, 'img_photo': img_photo}
# prepare optimizer
optim_cfg = dict(type='Adam', lr=2e-4, betas=(0.5, 0.999))
optimizer = {
'generators':
obj_from_dict(
optim_cfg, torch.optim,
dict(params=getattr(synthesizer, 'generators').parameters())),
'discriminators':
obj_from_dict(
optim_cfg, torch.optim,
dict(params=getattr(synthesizer, 'discriminators').parameters()))
}
# test forward_test
domain = 'photo'
with torch.no_grad():
outputs = synthesizer(img_mask, target_domain=domain, test_mode=True)
assert torch.equal(outputs['source'], data_batch['img_mask'])
assert torch.is_tensor(outputs['target'])
assert outputs['target'].size() == (1, 3, 256, 256)
# test forward_train
outputs = synthesizer(img_mask, target_domain=domain, test_mode=False)
assert torch.equal(outputs['source'], data_batch['img_mask'])
assert torch.is_tensor(outputs['target'])
assert outputs['target'].size() == (1, 3, 256, 256)
# test train_step
outputs = synthesizer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
assert isinstance(outputs['results'], dict)
for v in ['loss_gan_d_fake', 'loss_gan_d_real', 'loss_gan_g', 'loss_l1']:
assert isinstance(outputs['log_vars'][v], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['real_mask'], data_batch['img_mask'])
assert torch.equal(outputs['results']['real_photo'],
data_batch['img_photo'])
assert torch.is_tensor(outputs['results']['fake_photo'])
assert outputs['results']['fake_photo'].size() == (1, 3, 256, 256)
# test cuda
if torch.cuda.is_available():
synthesizer = synthesizer.cuda()
optimizer = {
'generators':
obj_from_dict(
optim_cfg, torch.optim,
dict(params=getattr(synthesizer, 'generators').parameters())),
'discriminators':
obj_from_dict(
optim_cfg, torch.optim,
dict(
params=getattr(synthesizer,
'discriminators').parameters()))
}
data_batch_cuda = copy.deepcopy(data_batch)
data_batch_cuda['img_mask'] = img_mask.cuda()
data_batch_cuda['img_photo'] = img_photo.cuda()
# forward_test
with torch.no_grad():
outputs = synthesizer(
data_batch_cuda['img_mask'],
target_domain=domain,
test_mode=True)
assert torch.equal(outputs['source'],
data_batch_cuda['img_mask'].cpu())
assert torch.is_tensor(outputs['target'])
assert outputs['target'].size() == (1, 3, 256, 256)
# test forward_train
outputs = synthesizer(
data_batch_cuda['img_mask'], target_domain=domain, test_mode=False)
assert torch.equal(outputs['source'], data_batch_cuda['img_mask'])
assert torch.is_tensor(outputs['target'])
assert outputs['target'].size() == (1, 3, 256, 256)
# train_step
outputs = synthesizer.train_step(data_batch_cuda, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
assert isinstance(outputs['results'], dict)
for v in [
'loss_gan_d_fake', 'loss_gan_d_real', 'loss_gan_g', 'loss_l1'
]:
assert isinstance(outputs['log_vars'][v], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['real_mask'],
data_batch_cuda['img_mask'].cpu())
assert torch.equal(outputs['results']['real_photo'],
data_batch_cuda['img_photo'].cpu())
assert torch.is_tensor(outputs['results']['fake_photo'])
assert outputs['results']['fake_photo'].size() == (1, 3, 256, 256)
# test disc_steps and disc_init_steps
data_batch['img_mask'] = img_mask.cpu()
data_batch['img_photo'] = img_photo.cpu()
train_cfg = dict(disc_steps=2, disc_init_steps=2)
synthesizer = build_model(
model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
optimizer = {
'generators':
obj_from_dict(
optim_cfg, torch.optim,
dict(params=getattr(synthesizer, 'generators').parameters())),
'discriminators':
obj_from_dict(
optim_cfg, torch.optim,
dict(params=getattr(synthesizer, 'discriminators').parameters()))
}
# iter 0, 1
for i in range(2):
outputs = synthesizer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
assert isinstance(outputs['results'], dict)
assert outputs['log_vars'].get('loss_gan_g') is None
assert outputs['log_vars'].get('loss_l1') is None
for v in ['loss_gan_d_fake', 'loss_gan_d_real']:
assert isinstance(outputs['log_vars'][v], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['real_mask'],
data_batch['img_mask'])
assert torch.equal(outputs['results']['real_photo'],
data_batch['img_photo'])
assert torch.is_tensor(outputs['results']['fake_photo'])
assert outputs['results']['fake_photo'].size() == (1, 3, 256, 256)
assert synthesizer.iteration == i + 1
# iter 2, 3, 4, 5
for i in range(2, 6):
assert synthesizer.iteration == i
outputs = synthesizer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
assert isinstance(outputs['results'], dict)
log_check_list = [
'loss_gan_d_fake', 'loss_gan_d_real', 'loss_gan_g', 'loss_l1'
]
if i % 2 == 1:
assert outputs['log_vars'].get('loss_gan_g') is None
assert outputs['log_vars'].get('loss_pixel') is None
log_check_list.remove('loss_gan_g')
log_check_list.remove('loss_l1')
for v in log_check_list:
assert isinstance(outputs['log_vars'][v], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['real_mask'],
data_batch['img_mask'])
assert torch.equal(outputs['results']['real_photo'],
data_batch['img_photo'])
assert torch.is_tensor(outputs['results']['fake_photo'])
assert outputs['results']['fake_photo'].size() == (1, 3, 256, 256)
assert synthesizer.iteration == i + 1
# test without pixel loss
model_cfg_ = copy.deepcopy(model_cfg)
model_cfg_.pop('gen_auxiliary_loss')
synthesizer = build_model(model_cfg_, train_cfg=None, test_cfg=None)
optimizer = {
'generators':
obj_from_dict(
optim_cfg, torch.optim,
dict(params=getattr(synthesizer, 'generators').parameters())),
'discriminators':
obj_from_dict(
optim_cfg, torch.optim,
dict(params=getattr(synthesizer, 'discriminators').parameters()))
}
data_batch['img_mask'] = img_mask.cpu()
data_batch['img_photo'] = img_photo.cpu()
outputs = synthesizer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
assert isinstance(outputs['results'], dict)
assert outputs['log_vars'].get('loss_pixel') is None
for v in ['loss_gan_d_fake', 'loss_gan_d_real', 'loss_gan_g']:
assert isinstance(outputs['log_vars'][v], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['real_mask'], data_batch['img_mask'])
assert torch.equal(outputs['results']['real_photo'],
data_batch['img_photo'])
assert torch.is_tensor(outputs['results']['fake_photo'])
assert outputs['results']['fake_photo'].size() == (1, 3, 256, 256)
|
[
"fengbingchun@163.com"
] |
fengbingchun@163.com
|
83d393d04b40ab618e4180b6f90abaaa3f0c30ea
|
feb46219641392c35670881bb5a26f2bffc2e837
|
/prac8.py
|
0d1a87ad9b17743663ada26e72ebd2861334c872
|
[] |
no_license
|
17mca1050/Advanced-Embedded-Programming-Lab-CAT-857-
|
d2e1d90de3438b3fbb6d8eafe4f23f42b9455610
|
c485d7ac95abf35c7f9ff2d5277bd4a62c4b4bf5
|
refs/heads/master
| 2021-05-18T20:01:46.762358
| 2020-03-30T20:11:09
| 2020-03-30T20:11:09
| 251,393,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 609
|
py
|
import urllib
import cv2,time
import numpy as np
first_frame = None
url= ’http://192.168.2.52:8080/shot.jpg’
imgResp = urllib.urlopen(url)
imgNp = np.array(bytearray(imgResp.read()),dtype=np.uint8)
img = cv2.imdecode(imgNp,-1)
cv2.namedWindow(‘image’,cv2.WINDOW_NORMAL)
cv2.resizeWindow(‘image’,600,600)
i=0
while True:
imgResp=urllib.urlopen(url)
imgNp=np.array(bytearray(imgResp.read()),dtype=n.uint8)
img=cv2.imdecode(imgNp,-1)
cv2.imshow(‘image’,img)
cv2.imwrite(‘test.jpg’’,img)
key=cv2.waitKey(1)
if key==ord(‘q’):
break
cv2.destroyAllWindows
|
[
"noreply@github.com"
] |
17mca1050.noreply@github.com
|
1196f945f853b96059f7283b9cbc5f49a719f37c
|
fdd8a9ffefea5f6a1e5e4548803ab3fbf83318a4
|
/OCRopyServices/RecognitionService/RecognitionService/urls.py
|
d71c94fb26bc33c9def3eefb94ef8529c22db92e
|
[
"Apache-2.0"
] |
permissive
|
acislab/HuMaIN_Microservices
|
cecc63d517ee107da7b27cb4596cb4b2d8d655c8
|
1bd4ac8885799f54959daa6304a0148c5f1d4a9f
|
refs/heads/master
| 2021-07-10T07:20:40.276888
| 2020-06-07T11:15:52
| 2020-06-07T11:15:52
| 97,147,994
| 1
| 2
| null | 2017-07-20T00:52:04
| 2017-07-13T17:17:48
|
Python
|
UTF-8
|
Python
| false
| false
| 1,045
|
py
|
"""RecognitionService URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
from api import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^recognitionapi(/)?', include('api.urls')),
url(r'^admin/', admin.site.urls),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"jingchaoluan@gmail.com"
] |
jingchaoluan@gmail.com
|
dde0cf9be9d0e57920c808810c2a7c4010f1fea1
|
d2a9e616dc94e3d9959fd7fa9e0abbbcbc23221d
|
/preprocessing/papework2.py
|
469e5a912a5b19079b41f59bea08a504701b2967
|
[] |
no_license
|
pelekhs/Visitor_distribution_prediction
|
17189e401eb83e7260047ef2a7b1527929d2ad0e
|
ec06bfcd975fa4c92cd7c496ca63280f0a6be098
|
refs/heads/master
| 2020-04-07T17:32:26.930556
| 2018-12-09T20:36:49
| 2018-12-09T20:36:49
| 158,573,449
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,628
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 27 22:40:23 2018
@author: spele
"""
import numpy as np
import pandas as pd
#==============================================================================
# ########### choose parameters that affect the primary dataset #######
#==============================================================================
#number of clusters and time period interval and filtering hours
clusters = 6
t11, t12, t21, t22 = '00:00:00','00:45:00','23:15:00','00:00:00'
#choose between 'all '2017' '2018'
years = '2017'
from datetime import datetime
t1 = datetime.strptime(t11, '%H:%M:%S')
t2 = datetime.strptime(t12, '%H:%M:%S')
minutes = t2.minute-t1.minute
#create datasets
from defs_after import clustersPeriods
cluster = clustersPeriods(clusters=clusters, timestep=minutes)
data, pois_to_clusters = cluster.assign_users_in_regions(df2 = "artists_list.csv",
export_csv = False,
plot = False, years = years,
random_state = 42, clustering = 'kmeans') #new dataset in dataframe_teliko.csv
#==============================================================================
# ############# Parameters below affect creation of final1 #############
#==============================================================================
#False if you only want his presence
artist_metrics = True
# 'No':metric during live, 'start':metric only at start, 'end':metric only before the bands live
only_start='No'
from final1_tune import final1_create_and_tune
final1, cltopred, clwithlive = final1_create_and_tune(data=data,
pois_to_clusters=pois_to_clusters,
t11=t11, t12=t12, t21=t21, t22=t22,
artist_metrics=artist_metrics,
cluster=cluster,minutes=minutes,
only_start=only_start,years=years)
#if (cltopred not in clwithlive):
# cltopred = str(input('NO live in most significant cluster. Enter cluster to predict:\n')).upper()
#print("I predict: ", cltopred)
# =============================================================================
# CREATE WEATHER COLUMNS
# =============================================================================
from weather import weather
#weather column dataframe
df = weather(final1)
#==============================================================================
# ###################### Dataset to be processed #######################
#==============================================================================
#final2 will be the version of final1 to pe processed
clusters= ['A','B','C','D','E','F']
final2 = pd.DataFrame()
final2[clusters]=final1[clusters]
for cl in clwithlive:
final2[cl+"pop"]=final1[cl+"pop"]
final2['Total users'] = final1['Total users']
final2['Temperature'] = df['Temperature']
final2['Conditions'] = df['Conditions']
if years=='all':
final2['Year'] = final1['Year']
final2['Time index'] = final1['Time index']
###optional if you want time series dates
final2['Time series']=final1['Time series dates']
#==============================================================================
# #FROM timestep MINUTES TO multiplier*timestep MINUTES
#==============================================================================
#final multi is the dataset that is made from final2 averaged to multiplied timesteps
from theregulator import theregulator
multiplier=6
final_multi = theregulator(final2, minutes, multiplier,clwithlive)
#order columns
columns=np.concatenate([clusters,[clwithlive[0]+'pop',clwithlive[1]+'pop','Total users',
'Temperature', 'Conditions', 'Time index', 'Time series']])
final_multi = final_multi[columns]
final_multi=final_multi.append(final_multi.iloc[-1][:]).reset_index(drop=True)
#choose line below based on year
#final_multi.iloc[-1,-1] = '2018-07-23 00:00:00'
final_multi.iloc[-1,-1] = '2017-07-24 00:00:00'
#final2=final2[columns[:-1]]
#final_multi.to_csv('final_multi_5to30mins.csv')
## =============================================================================
## X, y WRITE TO CSV (input output for ML algorithms)
## =============================================================================
#x = final_multi.iloc[:-1,:].reset_index(drop=True)
#y = final_multi[clusters].iloc[1:,:].reset_index(drop=True)
#
#x.to_csv('x_'+years+'.csv', index=False)
#y.to_csv('y_'+years+'.csv', index=False)
#==============================================================================
# TYPICALLY SCRIPT ENDS HERE DON'T GO FURTHER
#==========================================================================
#==============================================================================
# FROM VALUES IN CLUSTERS TO DISTRIBUTIONS
#==============================================================================
##distribution normalization
#for column in clusters:
# final2[column] = final2[column]/final2['Total users']
#final2 = final2.fillna(0)
#==============================================================================
# ######################## Feature Scaling ##############################
#==============================================================================
#from sklearn.preprocessing import MinMaxScaler
#mini=0
#maxi=1
##total users feature scaling
#sc_tot = MinMaxScaler(feature_range=(mini, maxi))
#final2["Total users"] = sc_tot.fit_transform(final2["Total users"].values.reshape(-1,1))
#
##time index as a sine shifted so that it contains only positive values
#final2['Periodic time index'] = final2["Time index"].apply(lambda x: (np.sin((2*np.pi*x/(max(final2["Time index"]))))))
#
##time index feature scaling
#sc_index = MinMaxScaler(feature_range=(mini, maxi))
#final2["Time index"] = sc_index.fit_transform(final2["Time index"].values.reshape(-1,1))
##year encoding
#if years=='all':
# from sklearn.preprocessing import LabelEncoder
# le = LabelEncoder()
# final2['Year']=le.fit_transform(final2['Year'])
#final2 = (final2.drop(labels='Year',inplace=True,axis=1)).reset_index(drop=True)
##popularity feature scaling
#from artists_period import create_artist
#artists = create_artist(timestep=minutes, df=pois_to_clusters, only_start=only_start)
#sc_pop = MinMaxScaler(feature_range=(mini, maxi))
#sc_pop.fit(pd.concat([artists.iloc[:,0], artists.iloc[:,1]]).values.reshape(-1, 1))
#for cltopred in clwithlive:
# final2[cltopred+"pop"] = sc_pop.transform(final2[cltopred+"pop"].values.reshape(-1,1))
#
##temperature feature scaling
#sc_temp = MinMaxScaler(feature_range=(mini, maxi))
#final2["Temperature"] = sc_tot.fit_transform(final2["Temperature"].values.reshape(-1,1))
#
##time index feature scaling
#sc_index = MinMaxScaler(feature_range=(mini, maxi))
#final2["Conditions"] = sc_index.fit_transform(final2["Conditions"].values.reshape(-1,1))
#==============================================================================
# time series plot
#==============================================================================
"""#live and popularities plot in time
import matplotlib.pyplot as plt
import matplotlib.dates as md
index=[x for x in final_multi['Time series']]
index2 = [x for x in final_multi['Time series']]
tuTS = pd.Series(final_multi['Total users'].values,index=index)
condTS = pd.Series(final_multi['Conditions'].values, index=index2)
tempTS = pd.Series(final_multi['Temperature'].values, index=index2)
liveTS = pd.Series(final_multi['B'].values,index=index)
popTS = pd.Series(final_multi['Bpop'].values,index=index)
exitTS = pd.Series(final_multi['A'].values,index=index)
shopTS = pd.Series(final_multi['E'].values,index=index)
#normalize
maxpeople=max(liveTS)
tuTS = tuTS/(maxpeople)
liveTS = liveTS/(maxpeople)
exitTS = exitTS/(maxpeople)
shopTS = shopTS/(maxpeople)
pt = [(datetime.strptime(tuTS.index[x], '%Y-%m-%d %H:%M:%S')) for x in range(len(tuTS))]
pt2 = [(datetime.strptime(tuTS.index[x], '%Y-%m-%d %H:%M:%S')) for x in range(len(condTS))]
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
#condsc = StandardScaler()
#condTS = condsc.fit_transform(condTS.values.reshape(-1,1))
#tempsc = StandardScaler()
#tempTS = tempsc.fit_transform(tempTS.values.reshape(-1,1))
#weatherTS = tempTS+condTS
#weathersc = MinMaxScaler()
#weatherTS = tempsc.fit_transform(weatherTS.values.reshape(-1,1))
#weatherTS = pd.Series(np.repeat(weatherTS,multiplier), index=index)
condsc = MinMaxScaler()
condTS = condsc.fit_transform(condTS.values.reshape(-1,1))
fig, ax1 = plt.subplots()
ax1.set_xlabel('Time',fontsize=22)
ax1.set_ylabel('Normalized Values',fontsize=22)
#ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
ax2 = ax1
#ax1.plot(pt, tuTS, color = 'black', label ='Total Users', linewidth='2.5', dashes=[15,5,5,15,5,5])
ax1.plot(pt, liveTS, label = 'Main stage',linewidth='2.5',color = 'blue', dashes=[6,3,6,6,3,6])
ax1.plot(pt, exitTS, label = 'Entrance/Exit',linewidth='2.5',color = 'red', linestyle='-')
#ax1.plot(pt, shopTS, label = 'Shopping',linewidth='2.5',color = 'green', dashes=[4,2,4,4,2,4])
ax1.set_ylim(bottom=0, top=max(liveTS)+0.05*max(liveTS))
#ax2.set_ylim(bottom=0, top=max(condTS)+0.1*max(condTS))
#ax2.set_ylabel('', color='black',fontsize=22)
#ax2.plot(pt, condTS, color='cyan',label = 'Conditions', linestyle = '--', linewidth='1.5')
ax2.plot(pt, popTS, color='black',label = 'Popularity', linestyle = '-.', linewidth='2.3')
#ax2.set_ylabel('Normalized Weather Conditions',fontsize=22)
## Set time format and the interval of ticks (every 15 minutes)
xformatter = md.DateFormatter('%m-%d %H:%M')
xlocator = md.MinuteLocator(interval = 180)
minorxlocator = md.MinuteLocator(interval = int(minutes*multiplier))
## Set xtick labels to appear every 15 minutes
ax1.xaxis.set_major_locator(xlocator)
ax1.xaxis.set_minor_locator(minorxlocator)
for x in ax1.xaxis.get_major_ticks():
x.label.set_fontsize(20)
for y in ax1.yaxis.get_major_ticks():
y.label.set_fontsize(20)
## Format xtick labels as HH:MM
plt.gcf().axes[0].xaxis.set_major_formatter(xformatter)
# Customize the major grid
ax1.grid(which='major', linestyle=':', linewidth='0.6',color='gray')
# Customize the minor grid
ax1.grid(which='minor', linestyle='-', linewidth='0.15', color='gray')
plt.setp(ax2.get_yticklabels(),fontsize=20)
fig.autofmt_xdate()
ax1.legend(loc=0,fontsize=22, prop={'size': 22})
plt.show()"""
##%%#
#total_users_friday = sum(final1['Total users'][final1['Day index']==0])
#print(total_users_friday)
#total_users_saturday = sum(final1['Total users'][final1['Day index']==1])
#print(total_users_saturday)
#total_users_sunday = sum(final1['Total users'][final1['Day index']==2])
#print(total_users_sunday)
## =============================================================================
## x, y WRITE TO CSV
## =============================================================================
#x = final_multi.iloc[:-1,:].reset_index(drop=True)
#y = final_multi[clusters].iloc[1:,:].reset_index(drop=True)
#
#x.to_csv('x_'+years+'.csv', index=False)
#y.to_csv('y_'+years+'.csv', index=False)
#
#
##==============================================================================
## IMPORT WRITTEN CSV
##==============================================================================
#x_2018 = pd.read_csv('x_2018.csv')
#y_2018 = pd.read_csv('y_2018.csv')
#
#x_2017 = pd.read_csv('x_2017.csv')
#y_2017 = pd.read_csv('y_2017.csv')
#
##==============================================================================
## PREDICTIONS
##==============================================================================
#for cluster in clusters:
# X_train = x_2017
# y_train = y_2017[cluster]
#
##==============================================================================
## ############# Model Creation, Evaluation and Plots #################
##==============================================================================
##regression model parameters to define
##SVR
#svr_kernel_vector= ['rbf','linear']#poly rbf linear sigmoid precomputed
##random forest numbers of estimators to use for regression
#estimators = [10]
##polynomial regression maximum degree
#poly_deg = [1]
#
#from models_evaluation_and_plots import model_evaluation_and_plots
#modeler = model_evaluation_and_plots(X_train_sc_flat,X_test_sc_flat,y_train_sc, ytrue, y_train, y_test,
# sc_cl)
##SVR
#svr=[]
#for svr_kernel in svr_kernel_vector:
# svr.append(modeler.SVR(svr_kernel))
##Random Forest
#rf=[]
#for estimator in estimators:
# rf.append(modeler.RF(estimator,max_depth=2, min_samples_split=4))
##polynomial
#po=[]
#for deg in poly_deg:
# po.append(modeler.poly(deg))
##MLP
#mlp=[modeler.MLP()]
##svr, rf, po, mlp are formated as below:
##[yhat, ytrain_hat, ytest_hat, Sete, Setr,Se, R2te, R2tr, R2, mapete, mapetr, mape]
#
#
##anastasis
##anasdataset
#Xa, Xb, y, ya, yb, ya_sc, yb_sc =\
#sets2(1,final1['Time index'],output_cols,output_cols_sc,dataset,3)
#X_sc = np.concatenate([Xa, Xb])
#y_sc = np.concatenate([ya_sc, yb_sc])
#
##shift (not a model just a shift)
#shift=[modeler.shift(1,output_cols,final1['Time index'],testing_day,years=years)]
##shift is formatted as below
##yhat, ytrain_hat, ytest_hat, Sete, Setr,Se, R2te, R2tr, R2, mapete, mapetr, mape
#yhatshift=shift[0][0]
#ytrueshift=shift[0][1]
|
[
"pelekhs@gmail.com"
] |
pelekhs@gmail.com
|
5bed0bed476abe47cfc78c6540b1e1b8e780f8b9
|
968b6ec227e6f46db3b571db64603c08eb9b4050
|
/Attention-Classification/NRAS-germline-lr5em2_lm1em4_batch100_q1_e50.py
|
15916f3307ab4026d7a42d990e8736ae136e6469
|
[
"Apache-2.0"
] |
permissive
|
doerlbh/SequenceAttentionClassifier
|
0d053073652dc67ff3d7d2012fb4487598521368
|
ffdc620f796a5808cf9dbf7920364391de40b94e
|
refs/heads/master
| 2020-03-17T21:55:53.491907
| 2018-07-10T10:08:28
| 2018-07-10T10:08:28
| 133,981,326
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,788
|
py
|
# coding: utf-8
# ### Notebook to configure model
# In[1]:
import time
import math
import copy
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_context(context="talk")
import torch
import torch.nn as nn
import tensorflow as tf
import torch.nn.functional as F
from torchvision import datasets
import torchvision.transforms as transforms
from torch.autograd import Variable
from torch.utils import data
# In[2]:
isRetrain = False
# Hyperparameter
QUERY_SIZE = 1
EMBEDDING_SIZE = 50
# HIDDEN_SIZE = 512
# ATTENTION_SIZE = 64
parameter_name = 'lr5em2_lm1em4_batch100_q1_e50'
LEARNING_RATE = 5*1e-2
BATCH_SIZE = 100
LAMBDA = 1e-4
num_epochs = 100
VOCAB_SIZE = 5
NUM_CLASSES = 2
# Data-specific
READ_LENGTH = 100
# # KRAS
# GENOME_START = 25204789
# GENOME_END = 25250936
# # NOTCH1
# GENOME_START = 136494433
# GENOME_END = 136545786
# NRAS
file_name = 'NRAS-germline'
GENOME_START = 114704464
GENOME_END = 114716894
GENOME_LENGTH = GENOME_END - GENOME_START + 1
CONTEXT_SIZE = GENOME_LENGTH
ref_names = ["class", "ref", "sequence"]
VOCAB = ['N','A','T','C','G']
# In[3]:
def load_ref_data(file_name, sample_ratio= 1, n_class=2, names=ref_names):
csv_file = pd.read_csv(file_name, names=ref_names)
shuffle_csv = csv_file.sample(frac=sample_ratio).reset_index()
# x = pd.Series(shuffle_csv["sequence"])
x = list(shuffle_csv["sequence"])
# ref = pd.Series(shuffle_csv["ref"])
ref = list(shuffle_csv["ref"])
y = pd.Series(shuffle_csv["class"])
y = to_one_hot(y, n_class)
print(y.shape)
# print(type(x))
# print(type(y))
# print(type(ref))
return x, ref, y
# In[4]:
def create_synthetic_data(file_name, cancer_genes, benign_genes, num_patients=10, num_reads_per_patients=3, read_length=READ_LENGTH, genome_length=GENOME_LENGTH, vocab=VOCAB, isSomatic=True, print_seq=False):
seq_list = np.random.choice(vocab, [num_patients, genome_length], replace=True)
backup_seq_list = seq_list
for loc, mutation in cancer_genes.items():
seq_list[np.random.choice(num_patients, int(num_patients*mutation[1]), replace=False), loc] = mutation[0]
genomes = []
for r in range(seq_list.shape[0]):
seq = ''.join(seq_list[r,:])
if print_seq:
print(seq)
genomes.append(seq)
locs = np.random.choice(genome_length-read_length, num_patients*num_reads_per_patients)
file = open('./tumor-genome-'+file_name+'.txt','w')
count = 0
reads = []
for genome in genomes:
for t in range(num_reads_per_patients):
index = count*num_reads_per_patients+t
reads.append(genome[locs[index]:locs[index]+read_length])
file.write("%s\n" % genome)
count = count + 1
file.close()
file = open('./tumor-syn-'+file_name+'.csv','w')
for r in range(num_patients*num_reads_per_patients):
file.write("1, %d, %s\n" % (locs[r], reads[r]))
file.close()
tumor_locs = locs
tumor_reads = reads
if isSomatic:
seq_list = backup_seq_list
else:
seq_list = np.random.choice(vocab, [num_patients, genome_length], replace=True)
for loc, mutation in benign_genes.items():
seq_list[np.random.choice(num_patients, int(num_patients*mutation[1]), replace=False), loc] = mutation[0]
genomes = []
for r in range(seq_list.shape[0]):
seq = ''.join(seq_list[r,:])
if print_seq:
print(seq)
genomes.append(seq)
locs = np.random.choice(genome_length-read_length, num_patients*num_reads_per_patients)
file = open('./normal-genome-'+file_name+'.txt','w')
count = 0
reads = []
for genome in genomes:
for t in range(num_reads_per_patients):
index = count*num_reads_per_patients+t
reads.append(genome[locs[index]:locs[index]+read_length])
file.write("%s\n" % genome)
count = count + 1
file.close()
file = open('./normal-syn-'+file_name+'.csv','w')
for r in range(num_patients*num_reads_per_patients):
file.write("0, %d, %s\n" % (locs[r], reads[r]))
file.close()
normal_locs = locs
normal_reads = reads
file = open('./syn-'+file_name+'.csv','w')
for r in range(num_patients*num_reads_per_patients):
file.write("1,%d,%s\n" % (tumor_locs[r], tumor_reads[r]))
file.write("0,%d,%s\n" % (normal_locs[r], normal_reads[r]))
file.close()
return './syn-'+file_name+'.csv'
# In[5]:
def to_one_hot(y, n_class):
return np.eye(n_class)[y.astype(int)]
# In[6]:
def split_ref_dataset(x_test, y_test, ref_test, dev_ratio):
test_size = len(x_test)
print(test_size)
dev_size = (int)(test_size * dev_ratio)
print(dev_size)
x_dev = x_test[:dev_size]
x_test = x_test[dev_size:]
y_dev = y_test[:dev_size]
y_test = y_test[dev_size:]
ref_dev = ref_test[:dev_size]
ref_test = ref_test[dev_size:]
return x_test, x_dev, y_test, y_dev, ref_test, ref_dev, dev_size, test_size - dev_size
# In[7]:
class TensorizedReadDataset(torch.utils.data.DataLoader):
'Characterizes a Tensorized dataset for genome reads in PyTorch'
def __init__(self, reads, ref_locs, labels, read_length=100, genome_start=0, genome_end=0):
# super(TensorizedReadDataset, self).__init__()
# self.read_length = read_length
self.labels = labels
self.reads = reads
self.ref_locs = ref_locs
self.genome_start = genome_start
self.genome_end = genome_end
def __len__(self):
return len(self.reads)
def __getitem__(self, index):
vals = list(self.reads[index])
read_length = len(vals)
locs = list(np.arange(self.ref_locs[index]-self.genome_start,self.ref_locs[index]+read_length-self.genome_start))
# print(len(vals))
# print(len(locs))
vals2idx = {'N': 0, 'A': 1, 'C': 2, 'T': 3, 'G': 4}
# read = torch.LongTensor(np.array([vals2idx[val]+loc*len(vals2idx) for val, loc in zip(vals, locs)], dtype=int), requires_grad=False)
read = torch.autograd.Variable(torch.LongTensor(np.array([vals2idx[val]+loc*len(vals2idx) for val, loc in zip(vals, locs)], dtype=int)), requires_grad=False)
X = read
Y = self.labels[index,:]
# torch.LongTensor(self.labels[index,:])
return X, Y
# In[8]:
class SequenceAttentionClassifier(nn.Module):
def __init__(self, genome_length, vocab_size=5, query_size=10, embedding_size=128, num_classes=2):
super(SequenceAttentionClassifier, self).__init__()
self.genome_length = genome_length
self.vocab_size = vocab_size
self.query_size = query_size
self.embedding_size = embedding_size
self.num_classes = num_classes
self.K = nn.Embedding(vocab_size*genome_length, embedding_size)
self.V = nn.Embedding(vocab_size*genome_length, query_size)
self.W = nn.Linear(query_size, num_classes)
self.Q = nn.Linear(embedding_size, query_size)
def forward(self, read):
# 'read' here should be mapped to a flattened form where X_ij = 1 maps to i*vocab_size + j
K_lookup = self.K(read) # Get the relevant keys
V_lookup = self.V(read) # Get the relevant values
# Get the attention weights
logits = self.Q(K_lookup) / math.sqrt(self.embedding_size)
probs = F.softmax(logits, dim = 0)
# Calculate the covariates for the logistic regression
# X = torch.matmul(probs.transpose(1,2), V_lookup)
# X = probs * V_lookup
X = (probs * V_lookup).sum(dim=1)
# Right now we can just ignore the fact that we're doing a linear-transform.
# In the future we'll add nonlinearities
# Return the logits for the classifier
return self.W(X), K_lookup, V_lookup
# In[9]:
def GetTensorBatch(reads, ref_locs, labels, batch_size=100, genome_start=0, genome_end=0):
batches = {}
set_size = len(ref_locs)
for batch in range(set_size // batch_size):
x_batch = []
y_batch = []
for index in range(batch_size):
vals = list(reads[index])
read_length = len(vals)
locs = list(np.arange(ref_locs[index]-genome_start,ref_locs[index]+read_length-genome_start))
vals2idx = {'N': 0, 'A': 1, 'C': 2, 'T': 3, 'G': 4}
read = torch.autograd.Variable(torch.LongTensor(np.array([vals2idx[val]+loc*len(vals2idx) for val, loc in zip(vals, locs)], dtype=int)), requires_grad=False)
X = read
Y = labels[index,:]
x_batch.append(X)
y_batch.append(Y)
batches[batch] = [x_batch, y_batch]
return batches
# In[14]:
# load data
x_train, refs_train, y_train = load_ref_data("../data/ref-germline-NRAS-train39000.csv", sample_ratio=1)
x_hardtest, refs_hardtest, y_hardtest = load_ref_data("../data/ref-germline-NRAS-test1000.csv", sample_ratio=1)
# split dataset to train and validation
x_train, x_test, y_train, y_test, refs_train, refs_test, test_size, train_size = split_ref_dataset(x_train, y_train, refs_train, 0.2)
# split dataset to test and dev
x_softtest, x_val, y_softtest, y_val, refs_softtest, refs_val, val_size, softtest_size = split_ref_dataset(x_test, y_test, refs_test, 0.5)
print("Training size: ", train_size)
print("Soft Test size: ", softtest_size)
print("Hard Test size: ", len(y_hardtest))
print("Validation size: ", val_size)
# In[15]:
# Generators
# train_dataset = TensorizedReadDataset(reads=x_train,
# ref_locs=refs_train,
# labels=y_train,
# read_length=READ_LENGTH,
# genome_start=GENOME_START,
# genome_end=GENOME_END)
# hardval_dataset = TensorizedReadDataset(reads=x_test,
# ref_locs=refs_test,
# labels=y_test,
# read_length=READ_LENGTH,
# genome_start=GENOME_START,
# genome_end=GENOME_END)
# softval_dataset = TensorizedReadDataset(reads=x_softval,
# ref_locs=refs_softval,
# labels=y_softval,
# read_length=READ_LENGTH,
# genome_start=GENOME_START,
# genome_end=GENOME_END)
# # Input pipeline
# train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
# batch_size=BATCH_SIZE,
# shuffle=True)
# hardtest_loader = torch.utils.data.DataLoader(dataset=hardval_dataset,
# batch_size=BATCH_SIZE,
# shuffle=True)
# softval_loader = torch.utils.data.DataLoader(dataset=softval_dataset,
# batch_size=BATCH_SIZE,
# shuffle=True)
train_loader = GetTensorBatch(reads=x_train,
ref_locs=refs_train,
labels=y_train,
batch_size=BATCH_SIZE,
genome_start=GENOME_START,
genome_end=GENOME_END)
val_loader = GetTensorBatch(reads=x_val,
ref_locs=refs_val,
labels=y_val,
batch_size=BATCH_SIZE,
genome_start=GENOME_START,
genome_end=GENOME_END)
hardtest_loader = GetTensorBatch(reads=x_hardtest,
ref_locs=refs_hardtest,
labels=y_hardtest,
batch_size=BATCH_SIZE,
genome_start=GENOME_START,
genome_end=GENOME_END)
softtest_loader = GetTensorBatch(reads=x_softtest,
ref_locs=refs_softtest,
labels=y_softtest,
batch_size=BATCH_SIZE,
genome_start=GENOME_START,
genome_end=GENOME_END)
# In[16]:
# isRetrain = True
# In[17]:
model = SequenceAttentionClassifier(genome_length=GENOME_LENGTH,
vocab_size=VOCAB_SIZE,
query_size=QUERY_SIZE,
embedding_size=EMBEDDING_SIZE,
num_classes=NUM_CLASSES)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=LEARNING_RATE)
if isRetrain:
model.load_state_dict(torch.load('./'+file_name+'_model.pth'))
# In[18]:
f1 = open('cv_acc_each_epoch_'+parameter_name+'.txt', 'a+')
f2 = open('cv_loss_each_batch_'+parameter_name+'.txt', 'a+')
f3 = open('cv_loss_val_each_batch_'+parameter_name+'.txt', 'a+')
f4 = open('cv_acc_each_batch_'+parameter_name+'.txt', 'a+')
lam = LAMBDA
# Training process
for epoch in range(num_epochs):
# b = 0 # count batch
for b, [x_batch, y_batch] in train_loader.items():
full_loss = 0
full_data_loss = 0
optimizer.zero_grad()
for c in range(BATCH_SIZE):
x_input = x_batch[c].view(1, x_batch[c].shape[0])
y_input = torch.Tensor(y_batch[c]).type(torch.float64).view(1, y_batch[c].shape[0])
outputs, K, V = model(x_input)
data_loss = criterion(outputs, torch.max(y_input, 1)[1])
reg_loss = (torch.norm(K,2,1).mean() + torch.norm(V,2,1).mean())*lam
loss = data_loss + reg_loss
full_loss = full_loss + loss
full_data_loss = full_data_loss + data_loss
loss.backward()
optimizer.step()
f2.write(repr(data_loss)+"\n")
# full_loss = full_loss / 2
# full_loss.backward()
# optimizer.step()
# if (b + 1) % 1 == 0:
print("Epoch {}, Batch {}, loss :{}".format(epoch + 1, b + 1, full_loss))
# b = b + 1
# loss_each_batch.append(full_loss)
# f2.write(repr(full_data_loss)+"\n")
full_loss = 0
full_data_loss = 0
correct = 0
total = 0
for b, [x_batch, y_batch] in val_loader.items():
full_vloss = 0
for c in range(BATCH_SIZE):
x_input = x_batch[c].view(1, x_batch[c].shape[0])
y_input = torch.Tensor(y_batch[c]).type(torch.float64).view(1, y_batch[c].shape[0])
outputs, K, V = model(x_input)
_, predicted = torch.max(outputs.data, 1)
data_vloss = criterion(outputs, torch.max(y_input, 1)[1])
full_vloss = full_vloss + data_vloss
total += len(y_input)
correct += (predicted == torch.max(y_input.type(torch.LongTensor), 1)[1]).sum()
f3.write(repr(data_vloss)+"\n")
# f3.write(repr(full_vloss)+"\n")
acc = int(correct) / total
print('Validation Accuracy: {}%'.format(100 * acc))
# acc_each_epoch.append(acc)
f4.write(repr(acc)+"\n")
correct = 0
total = 0
for b, [x_batch, y_batch] in val_loader.items():
for c in range(BATCH_SIZE):
x_input = x_batch[c].view(1, x_batch[c].shape[0])
y_input = torch.Tensor(y_batch[c]).type(torch.float64).view(1, y_batch[c].shape[0])
outputs, K, V = model(x_input)
_, predicted = torch.max(outputs.data, 1)
total += len(y_input)
correct += (predicted == torch.max(y_input.type(torch.LongTensor), 1)[1]).sum()
acc = int(correct) / total
print('Validation Accuracy: {}%'.format(100 * acc))
# acc_each_epoch.append(acc)
f1.write(repr(acc)+"\n")
f1.close()
f2.close()
f3.close()
f4.close()
# In[202]:
torch.save(model, './full_' + file_name + '_' + parameter_name + '_model.pt')
torch.save(model.state_dict(), './'+file_name + '_' + parameter_name + '_model.pth')
# In[ ]:
# plt.rcParams['figure.figsize'] = [30, 5]
# plt.plot(np.arange(len(loss_each_batch))+1, loss_each_batch)
# plt.xlim(1, len(loss_each_batch))
# plt.title('loss vs. batchs')
# plt.show()
# plt.plot(np.arange(len(acc_each_batch))+1, acc_each_batch)
# plt.xlim(1, len(acc_each_batch))
# plt.title('accuracy vs. batchs')
# plt.show()
# plt.plot(np.arange(len(acc_each_epoch))+1, acc_each_epoch)
# plt.xlim(1, len(acc_each_epoch))
# plt.title('accuracy vs. epochs')
# plt.show()
# In[10]:
# model = torch.load('./full_'+file_name+'_model.pt')
# model.load_state_dict(torch.load('./'+file_name+'_model.pth'))
# In[16]:
correct = 0
total = 0
for b, [x_batch, y_batch] in softtest_loader.items():
for c in range(BATCH_SIZE):
x_input = x_batch[c].view(1, x_batch[c].shape[0])
y_input = torch.Tensor(y_batch[c]).type(torch.float64).view(1, y_batch[c].shape[0])
outputs, K, V = model(x_input)
_, predicted = torch.max(outputs.data, 1)
total += len(y_input)
correct += (predicted == torch.max(y_input.type(torch.LongTensor), 1)[1]).sum()
acc = int(correct) / total
print('Soft Test Accuracy: {}%'.format(100 * acc))
correct = 0
total = 0
for b, [x_batch, y_batch] in hardtest_loader.items():
for c in range(BATCH_SIZE):
x_input = x_batch[c].view(1, x_batch[c].shape[0])
y_input = torch.Tensor(y_batch[c]).type(torch.float64).view(1, y_batch[c].shape[0])
outputs, K, V = model(x_input)
_, predicted = torch.max(outputs.data, 1)
total += len(y_input)
correct += (predicted == torch.max(y_input.type(torch.LongTensor), 1)[1]).sum()
acc = int(correct) / total
print('Hard Test Accuracy: {}%'.format(100 * acc))
|
[
"doerlbh@gmail.com"
] |
doerlbh@gmail.com
|
aac72eb922f89bbf7d21006066e3bab236cf6a54
|
2f458dc9cce0310e2ca1734d30e778e0d8613be7
|
/exercises/tests/test_calculator.py
|
36404b6270cca786080646248821e3b3a11290c4
|
[
"MIT"
] |
permissive
|
yehonadav/python_course
|
d632e206bfa5e36c34d434c2f423482fda6df59b
|
ff1754e84d0b4ca20078ceae2468cc59f99588cb
|
refs/heads/master
| 2023-02-12T11:59:51.003790
| 2022-05-16T12:43:42
| 2022-05-16T12:43:42
| 151,849,907
| 6
| 12
|
MIT
| 2023-02-10T23:31:59
| 2018-10-06T14:50:31
|
Python
|
UTF-8
|
Python
| false
| false
| 850
|
py
|
import random
from exercises.level_1.oop.calculator import Calculator
def test_calculator():
# create test data
calculators = []
for i in range(20):
calculators.append(
Calculator(
random.randint(-1000, 1000),
random.randint(-1000, 1000)
)
)
answer = None
for calculator in calculators:
# assert answer sharing
assert answer == calculator.answer
calculator.add()
answer = calculator.answer
# assert addition
assert answer == calculator.a + calculator.b
for calculator in calculators:
# assert answer sharing
assert answer == calculator.answer
calculator.sub()
answer = calculator.answer
# assert addition
assert answer == calculator.a - calculator.b
|
[
"qaviton@gmail.com"
] |
qaviton@gmail.com
|
cbe119761c108234698ea1c9e8bebb873ede3f1d
|
5cebb43511d7fec27b155dd07426cb07d5a50f0f
|
/setup.py
|
7d85b5b01016a41921f2834b4e6211eccd9fb2ce
|
[
"MIT"
] |
permissive
|
dokalanyi/envelopes
|
ea5b05d3fdd0da78d555786f9a219d5be7a40af6
|
d28e8486ce632cde1300aaaadd44f52e2d2e8d7a
|
refs/heads/master
| 2020-12-24T13:36:20.027283
| 2013-08-07T05:42:11
| 2013-08-07T05:42:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,357
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2013 Tomasz Wójcik <tomek@bthlabs.pl>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import codecs
from setuptools import setup
version = '0.1.1'
desc_file = codecs.open('README.rst', 'r', 'utf-8')
long_description = desc_file.read()
desc_file.close()
setup(
name="Envelopes",
version=version,
packages=['envelopes'],
test_suite='nose.collector',
zip_safe=False,
platforms='any',
tests_require=[
'nose',
],
author=u'Tomasz Wójcik'.encode('utf-8'),
author_email='tomek@bthlabs.pl',
maintainer=u'Tomasz Wójcik'.encode('utf-8'),
maintainer_email='tomek@bthlabs.pl',
url='http://tomekwojcik.github.io/envelopes/',
download_url='http://github.com/tomekwojcik/envelopes/tarball/v%s' % version,
description='Mailing for human beings',
long_description=long_description,
license='https://github.com/tomekwojcik/envelopes/blob/master/LICENSE',
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Other Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules"
]
)
|
[
"labs@tomekwojcik.pl"
] |
labs@tomekwojcik.pl
|
4999ee3668a549429c68491850e36b3ff94cda99
|
caf3596fb540144c31b407b01ddf8b5da6484025
|
/myenv/bin/easy_install-3.7
|
6cf43ffe26200ebfad0b2e9c6bd15b9957b095e0
|
[] |
no_license
|
VitaDorosheva/my-first-blog
|
b2f2050542a705fa925e9da950c7d5ad145c579e
|
8ae40572795dae434ddc5bbd831c81eec34d4fc7
|
refs/heads/master
| 2020-04-28T01:26:39.024715
| 2019-03-17T18:17:40
| 2019-03-17T18:17:40
| 174,857,052
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 277
|
7
|
#!/Users/golosgermesa/Documents/djangogirls/myenv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"vitadorosheva@gmail.com"
] |
vitadorosheva@gmail.com
|
45d9684da8ad09960bcf264b7513b9bdaaac7ac9
|
adb045761913ebe494faf5538bae42da3d6c07d7
|
/coding_test/pass_42583_다리를지나는트럭/solution_LTH.py
|
de71e1753f728b48b5293fbbfb1c17462152b084
|
[] |
no_license
|
inergoul/boostcamp_peer_session
|
c83b49558444b53d6d6cba12ff32d9e44aaa4050
|
5431b37ef46ad11e0dd6adc4fb7010c44bf0c27e
|
refs/heads/main
| 2023-07-27T21:36:24.871227
| 2023-07-19T06:38:24
| 2023-07-19T06:38:24
| 331,229,124
| 1
| 3
| null | 2021-03-15T15:12:04
| 2021-01-20T07:38:49
|
Python
|
UTF-8
|
Python
| false
| false
| 967
|
py
|
from queue import deque
def solution(bridge_length, weight, truck_weights):
ans = 1
truck_weights = deque(truck_weights)
bridge = deque([[truck_weights.popleft(), bridge_length]])
bridge_weight = bridge[-1][0]
while truck_weights:
truck = 0
if bridge and truck_weights[0] + bridge_weight > weight:
truck = bridge.popleft()
bridge_weight -= truck[0]
ans += truck[1]
for arr in bridge:
arr[1] -= truck[1]
if truck_weights and truck_weights[0] + bridge_weight <= weight:
if not truck:
for arr in bridge:
arr[1] -= 1
if bridge and bridge[0][1] == 0:
w = bridge.popleft()[0]
bridge_weight -= w
ans += 1
bridge.append([truck_weights.popleft(), bridge_length])
bridge_weight += bridge[-1][0]
return ans + bridge[-1][1]
|
[
"blush0722@gmail.com"
] |
blush0722@gmail.com
|
c43ec3680a769c7273d6d89abff3ba63d47e3568
|
b3ec20cfa849916facbdfdb3ee4a484d2e66965c
|
/scrummers/basicApp/migrations/0001_initial.py
|
ade81ae8b52a632ee22619c1be87d6d484517dc9
|
[] |
no_license
|
Wtoloza/task_manager_Django
|
8c37f3908fc4a81789f2eb4082afc1b871cbfaeb
|
edb18559cb504c48a929777e6749cdedeb17f3df
|
refs/heads/master
| 2023-07-08T22:08:17.335621
| 2021-08-13T14:22:14
| 2021-08-13T14:22:14
| 395,679,942
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,171
|
py
|
# Generated by Django 3.2.6 on 2021-08-12 02:23
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Project',
fields=[
('id_project', models.SmallIntegerField(primary_key=True, serialize=False)),
('name_project', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Task',
fields=[
('id_task', models.PositiveSmallIntegerField(primary_key=True, serialize=False)),
('tile_task', models.CharField(max_length=50)),
('description_task', models.CharField(max_length=200)),
('imporant_task', models.CharField(choices=[('0', 'Low'), ('1', 'Medium'), ('2', 'Urgent')], default='0', max_length=1)),
('status_task', models.BooleanField(default=False)),
('project_task', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='basicApp.project')),
],
),
]
|
[
"wtorrestoloza@hotmail.com"
] |
wtorrestoloza@hotmail.com
|
103f17c3a8bfd4d1a3467b125e2d66d05d3630f9
|
e9b89062fa0f96f8bf0182cdb1a3096d53951cd6
|
/code/Problem4-test.py
|
a59e276c15abc2d41780f71ec443f0b957d46a2d
|
[] |
no_license
|
Yaoyaoi/DigitClassification
|
4784d545f654e61f2eac0e2b0454bc7c712ba35d
|
492df6d3f24f29d04dbcc97322c2a164f22c7279
|
refs/heads/master
| 2020-12-01T07:38:11.384077
| 2020-03-16T16:22:32
| 2020-03-16T16:22:32
| 230,583,634
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,506
|
py
|
import numpy as np
import matplotlib.pyplot as plt
import DataPreprocess as dp
from keras.models import Sequential, load_model
from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten
from keras.optimizers import Adadelta
from sklearn.model_selection import StratifiedKFold
from keras.utils import to_categorical
from keras.utils import plot_model
# filename of data
fileName = "zip.test.txt"
# creat the model
def initNet(numCategory,learningRate):
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(16,16,1)))
model.add(Conv2D(64, (3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(32, activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(numCategory, activation='softmax'))
# plot the picture of the model
plot_model(model, to_file = 'P4model.png', show_shapes=True)
model.compile(loss='categorical_crossentropy', optimizer= Adadelta(learning_rate=learningRate), metrics=['accuracy'])
return model
if __name__ == "__main__":
# data preprocessing
data = dp.LoadData(fileName)
X = np.array(data)[:,1:]
Xtest = X.reshape((np.size(X,0),16,16,1))
Y = np.array(data)[:,0]
Ytest = to_categorical(Y,10)
# create model
model = initNet(10,1)
# load model
model.set_weights(load_model("P4.h5").get_weights())
# evaluate
loss, acc = model.evaluate(Xtest,Ytest)
print(acc)
|
[
"haruka0314@outlook.com"
] |
haruka0314@outlook.com
|
0a346d61b350f9ef52edcccccd4c1ef81f8ef2ba
|
61a6d4800be8af49b1fccbbcaddd3def6c4e2364
|
/venv/Scripts/easy_install-script.py
|
b99679e9a48c00c6597a3a2dcf635dd78a49e9d6
|
[] |
no_license
|
BIGFISHK/blog
|
ca99e61de0b539d186e3ee6570af8adcd610d99a
|
36e7498f3e1b3eb9147badaf66766e5066c7f2b2
|
refs/heads/master
| 2020-07-12T14:25:06.671170
| 2019-08-30T09:39:09
| 2019-08-30T09:39:09
| 204,830,752
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 419
|
py
|
#!D:\blog\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==28.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==28.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==28.8.0', 'console_scripts', 'easy_install')()
)
|
[
"417965527@qq.com"
] |
417965527@qq.com
|
69f4b590af41bf3821c185b0605fad6d82f36b90
|
55c157879bcdbd5cabaabc6f336f0e742f39a914
|
/caesar.py
|
00df9144609d2f8fbd9f451e238c955ae8c6431b
|
[] |
no_license
|
akr635/web-caesar
|
2eaf41b8193e1ecffab6ee5136a371361535bb4f
|
a205116a61a2bd10fdd7efad0a65f76baf8efc2a
|
refs/heads/master
| 2021-01-11T16:27:19.898637
| 2017-01-26T04:45:34
| 2017-01-26T04:45:34
| 80,086,588
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 990
|
py
|
def alphabet_position(letter):
alphabet_lower = 'abcdefghijklmnopqrstuvwxyz'
alphabet_upper = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
if letter in alphabet_lower:
alphabet_position = alphabet_lower.index(letter)
elif letter in alphabet_upper:
alphabet_position = alphabet_upper.index(letter)
return alphabet_position
def rotate_character(char, rot):
alphabet_lower = 'abcdefghijklmnopqrstuvwxyz'
alphabet_upper = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
if char in alphabet_lower:
rotate_position = (alphabet_position(char) + rot) % 26
rotate_character = alphabet_lower[rotate_position]
elif char in alphabet_upper:
rotate_position = (alphabet_position(char) + rot) % 26
rotate_character = alphabet_upper[rotate_position]
else:
rotate_character = char
return rotate_character
def encrypt(text, rot):
encrypted = ''
for char in text:
encrypted += rotate_character(char, rot)
return encrypted
|
[
"akr635@gmail.com"
] |
akr635@gmail.com
|
7bf743e5d0a9e7f925762099222812d8ced5e43d
|
6db2ca19052182259ae8d81b6f0f719843f86c40
|
/dqn_v03_simple.py
|
d8e0b5eaf5cf41e426eacf91a331fbdbc0eee146
|
[] |
no_license
|
walkingholic/EVNS_RL
|
54a8c0a4ff433c265cce376f073f640cb446aa97
|
1583bcd47d8e75ecc3ad16f2e17cac6fcc487da6
|
refs/heads/master
| 2022-12-07T05:43:10.479190
| 2020-09-06T08:12:56
| 2020-09-06T08:12:56
| 274,387,455
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,997
|
py
|
import sys
import gym
import pylab
import random
import numpy as np
from collections import deque
from keras.layers import Dense
from keras.optimizers import Adam
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten, Conv2D, MaxPooling2D
from keras import optimizers
import matplotlib.pyplot as plt
import routing as rt
import copy
import test_algorithm as ta
import datetime
import os
from Graph import Graph_simple
from Graph import Graph_simple_100
from Graph import Graph_jeju
EPISODES = 5000
def one_hot(x):
return np.identity(100)[x:x + 1]
def createFolder(directory):
try:
if not os.path.isdir(directory):
os.makedirs(directory)
except OSError:
print('error')
class CS:
def __init__(self, node_id, long, lat, alpha):
self.id = node_id
self.price = list()
self.waittime = list()
self.chargingpower = 60 # kw
self.alpha = alpha
self.x = long
self.y = lat
for i in range(288):
p = np.random.normal(alpha, 0.15 * alpha)
while p < 0:
p = np.random.normal(alpha, 0.15 * alpha)
self.price.append(p)
for i in range(288):
waittime = np.random.normal(-1200 * (self.price[i] - 0.07), 20)
while waittime < 0:
waittime = 0
self.waittime.append(waittime/60)
class EV:
def __init__(self, id, t_start, soc, source, destination):
self.id = id
self.t_start = t_start
self.charging_effi = 0.9
self.SOC = soc
self.init_SOC = soc
self.req_SOC = 0.8
self.before_charging_SOC=soc
self.source = source
self.destination = destination
self.maxBCAPA= 60 # kw
self.curr_location = source
self.next_location = source
self.ECRate = 0.2 # kwh/km
self.traveltime = 0 # hour
self.charged = 0
self.cs = None
self.csid = None
self.energyconsumption = 0.0
self.chargingtime = 0.0
self.chargingcost = 0.0
self.waitingtime = 0.0
self.csstayingtime = 0.0
self.drivingdistance = 0.0
self.drivingtime = 0.0
self.charingenergy = 0.0
self.fdist=0
self.rdist=0
self.path=[]
self.predic_totaltraveltime = 0.0
self.totalcost=0.0
self.to_cs_dist = 0
self.to_cs_driving_time = 0
self.to_cs_charging_time = 0
self.to_cs_waiting_time = 0
self.to_cs_soc = 0
# 카트폴 예제에서의 DQN 에이전트
class DQNAgent:
def __init__(self, state_size, action_size):
self.render = False
self.load_model = False
# 상태와 행동의 크기 정의
self.state_size = state_size
self.action_size = action_size
# DQN 하이퍼파라미터
self.discount_factor = 0.99
self.learning_rate = 0.01
self.epsilon = 1.0
self.epsilon_decay = 0.9995
self.epsilon_min = 0.01
self.batch_size = 64
self.train_start = 1000
# 리플레이 메모리, 최대 크기 2000
self.memory = deque(maxlen=4000)
# 모델과 타깃 모델 생성
self.model = self.build_model()
self.target_model = self.build_model()
# 타깃 모델 초기화
self.update_target_model(0)
if self.load_model:
self.model.load_weights("dqn_model.h5")
# 상태가 입력, 큐함수가 출력인 인공신경망 생성
def build_model(self):
model = Sequential()
model.add(Dense(128, input_dim=self.state_size, activation='relu',
kernel_initializer='he_uniform'))
model.add(Dense(64, activation='relu',
kernel_initializer='he_uniform'))
model.add(Dense(32, activation='relu',
kernel_initializer='he_uniform'))
model.add(Dense(self.action_size, activation='softmax',
kernel_initializer='he_uniform'))
model.summary()
model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))
return model
# 타깃 모델을 모델의 가중치로 업데이트
def update_target_model(self, e):
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
self.target_model.set_weights(self.model.get_weights())
# 입실론 탐욕 정책으로 행동 선택
def get_action(self, state):
if np.random.rand() <= self.epsilon:
action = random.choice(range(self.action_size))
else:
state = np.reshape(state, [1, self.state_size])
q_value = self.model.predict(state)
action = np.argmax(q_value[0])
return action
# 샘플 <s, a, r, s'>을 리플레이 메모리에 저장
def append_sample(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done))
# 리플레이 메모리에서 무작위로 추출한 배치로 모델 학습
def train_model(self):
# 메모리에서 배치 크기만큼 무작위로 샘플 추출
mini_batch = random.sample(self.memory, self.batch_size)
states = np.zeros((self.batch_size, self.state_size))
next_states = np.zeros((self.batch_size, self.state_size))
actions, rewards, dones = [], [], []
for i in range(self.batch_size):
states[i] = mini_batch[i][0]
actions.append(mini_batch[i][1])
rewards.append(mini_batch[i][2])
next_states[i] = mini_batch[i][3]
dones.append(mini_batch[i][4])
# 현재 상태에 대한 모델의 큐함수
# 다음 상태에 대한 타깃 모델의 큐함수
target = self.model.predict(states)
target_val = self.target_model.predict(next_states)
# 벨만 최적 방정식을 이용한 업데이트 타깃
for i in range(self.batch_size):
if dones[i]:
target[i][actions[i]] = rewards[i]
else:
target[i][actions[i]] = rewards[i] + self.discount_factor * (
np.amax(target_val[i]))
self.model.fit(states, target, batch_size=self.batch_size,
epochs=1, verbose=0)
class Env:
def __init__(self, state_size, action_size):
# np.random.seed(10)
# self.graph = Graph_simple()
self.graph = Graph_simple_100()
# self.graph = Graph_jeju('data/20191001_5Min_modified.csv')
self.source = 0
self.destination = 99
self.graph.source_node_set = list(self.graph.source_node_set)
self.graph.destination_node_set = list(self.graph.destination_node_set)
self.path = []
self.path_info = []
self.sim_time=0
self.CS_list = []
self.pev = None
self.target = -1
self.state_size = state_size
self.action_size = action_size
for l in self.graph.cs_info:
alpha = np.random.uniform(0.03, 0.07)
cs = CS(l, self.graph.cs_info[l]['long'], self.graph.cs_info[l]['lat'], alpha)
self.CS_list.append(cs)
def reset(self):
t_start = np.random.uniform(0, 1200)
soc = np.random.uniform(0.3, 0.5)
while soc <= 0.0 or soc > 1.0:
soc = np.random.uniform(0.3, 0.5)
self.graph.source_node_set = list(self.graph.source_node_set)
self.graph.destination_node_set = list(self.graph.destination_node_set)
source = self.graph.source_node_set[np.random.random_integers(0, len(self.graph.source_node_set) - 1)]
while source in self.graph.cs_info.keys():
source = self.graph.source_node_set[np.random.random_integers(0, len(self.graph.source_node_set) - 1)]
destination = self.graph.source_node_set[np.random.random_integers(0, len(self.graph.source_node_set) - 1)]
while destination in self.graph.cs_info.keys():
destination = self.graph.source_node_set[np.random.random_integers(0, len(self.graph.source_node_set) - 1)]
self.path_info = []
self.pev = EV(e, t_start, soc, source, destination)
self.sim_time = self.pev.t_start
self.path_info = rt.sim_main_first_time_check(self.pev, self.CS_list, self.graph, self.action_size)
# print('\npev soc', self.pev.SOC)
state = [self.pev.source, self.pev.SOC]
for path in self.path_info:
cs, pev_SOC, front_path, total_d_time, waiting_time, charging_time = path
state += [total_d_time, waiting_time, charging_time]
state = np.reshape(state, [1, self.state_size])
return state, source, destination
def test_reset(self, pev, CS_list):
self.path_info = []
self.CS_list = CS_list
self.pev = pev
self.sim_time = self.pev.t_start
self.path_info = rt.sim_main_first_time_check(self.pev, self.CS_list, self.graph, self.action_size)
state = [self.pev.source, self.pev.curr_SOC]
for path in self.path_info:
cs, pev_SOC, front_path, total_d_time, waiting_time, charging_time = path
state += [total_d_time, waiting_time, charging_time]
state = np.reshape(state, [1, self.state_size])
return state, self.pev.source, self.pev.destination
def step(self, action, done):
cs, pev_SOC, front_path, total_d_time, waiting_time, charging_time = self.path_info[action]
# print(self.sim_time, self.pev.curr_location, cs.id, pev_SOC)
self.target = cs
self.pev.path.append(self.pev.curr_location)
if pev_SOC <= 0.0:
print('** error soc', pev_SOC)
done = 1
reward = -5
return np.zeros((1,self.state_size)), -1, reward, done
if len(front_path)>1:
next_node = front_path[1]
self.sim_time, time = rt.update_ev(self.pev, self.graph, self.pev.curr_location, next_node, self.sim_time)
if self.sim_time == 0 and time == 0:
print('** time idx error')
done = 1
reward = -5
return np.zeros((1, self.state_size)), -1, reward, done
done = 0
reward = -time
self.pev.curr_location = next_node
self.path_info = rt.sim_main_first_time_check(self.pev, self.CS_list, self.graph, self.action_size)
next_state = [self.pev.curr_location, self.pev.curr_SOC]
for path in self.path_info:
cs, pev_SOC, front_path, total_d_time, waiting_time, charging_time = path
next_state += [total_d_time, waiting_time, charging_time]
next_state = np.reshape(next_state, [1, self.state_size])
return next_state, next_node, reward, done
elif self.pev.curr_location == cs.id:
self.pev.before_charging_SOC = self.pev.curr_SOC
self.pev.cscharingenergy = self.pev.maxBCAPA * self.pev.req_SOC - self.pev.curr_SOC * self.pev.maxBCAPA
self.pev.cschargingcost = self.pev.cscharingenergy * cs.price[int(self.sim_time / 5)]
self.pev.curr_SOC = self.pev.req_SOC
self.pev.cschargingtime = charging_time
self.pev.cschargingwaitingtime = waiting_time
self.pev.charged = 1
self.pev.cs = cs
self.pev.csid = cs.id
self.sim_time += charging_time * 60
self.sim_time += waiting_time * 60
# print(waiting_time, waiting_time * 60)
self.pev.csdrivingtime = self.pev.totaldrivingtime
self.pev.csdistance = self.pev.totaldrivingdistance
self.pev.cschargingwaitingtime = self.pev.cschargingwaitingtime
self.pev.cschargingtime = self.pev.cschargingtime
self.pev.cssoc = self.pev.curr_SOC
done = 1
reward = -1 * (waiting_time + charging_time)
print(done, '충전소야')
return np.zeros((1,self.state_size)), -1, reward, done
else:
print("???")
input()
def gen_test_envir_simple(num_evs):
np.random.seed(10)
graph = Graph_simple_100()
EV_list = []
for e in range(num_evs):
t_start = np.random.uniform(0, 1200)
soc = np.random.uniform(0.3, 0.5)
while soc <= 0.0 or soc > 1.0 :
soc = np.random.uniform(0.3, 0.5)
graph.source_node_set = list(graph.source_node_set)
graph.destination_node_set = list(graph.destination_node_set)
source = graph.source_node_set[np.random.random_integers(0, len(graph.source_node_set) - 1)]
while source in graph.cs_info.keys():
source = graph.source_node_set[np.random.random_integers(0, len(graph.source_node_set) - 1)]
destination = graph.source_node_set[np.random.random_integers(0, len(graph.source_node_set) - 1)]
while destination in graph.cs_info.keys():
destination = graph.source_node_set[np.random.random_integers(0, len(graph.source_node_set) - 1)]
# source = 4080021700
# destination = 4070008103
ev = EV(e, t_start, soc, source, destination)
EV_list.append(ev)
CS_list = []
for l in graph.cs_info:
# print('gen cs')
# alpha = np.random.uniform(0.03, 0.07)
alpha = np.random.uniform(0.03, 0.07)
cs = CS(l, graph.cs_info[l]['long'], graph.cs_info[l]['lat'], alpha)
CS_list.append(cs)
return EV_list, CS_list, graph
def gen_test_envir_jeju(traffic_data_path, num_evs):
graph = Graph_jeju(traffic_data_path)
EV_list = []
for e in range(num_evs):
t_start = np.random.uniform(0, 1200)
soc = np.random.uniform(0.3, 0.5)
while soc <= 0.0 or soc > 1.0 :
soc = np.random.uniform(0.3, 0.5)
graph.source_node_set = list(graph.source_node_set)
graph.destination_node_set = list(graph.destination_node_set)
source = graph.source_node_set[np.random.random_integers(0, len(graph.source_node_set) - 1)]
destination = graph.source_node_set[np.random.random_integers(0, len(graph.source_node_set) - 1)]
while destination in graph.cs_info.keys():
destination = graph.source_node_set[np.random.random_integers(0, len(graph.source_node_set) - 1)]
# source = 4080021700
# destination = 4070008103
ev = EV(e, t_start, soc, source, destination)
EV_list.append(ev)
CS_list = []
for l in graph.cs_info:
# print('gen cs')
# alpha = np.random.uniform(0.03, 0.07)
alpha = np.random.uniform(0.03, 0.07)
cs = CS(l, graph.cs_info[l]['long'], graph.cs_info[l]['lat'], alpha)
CS_list.append(cs)
return EV_list, CS_list, graph
if __name__ == "__main__":
now = datetime.datetime.now()
resultdir = '{0:02}-{1:02} {2:02}-{3:02} {4:02}'.format(now.month, now.day, now.hour, now.minute, now.second)
basepath = os.getcwd()
dirpath = os.path.join(basepath, resultdir)
createFolder(dirpath)
action_size = 4
state_size = action_size*3+2
env = Env(state_size, action_size)
agent = DQNAgent(state_size, action_size)
scores, episodes, steps= [], [], []
for e in range(EPISODES):
done = False
score = 0
path=[]
taget_cs = []
state, source, destination = env.reset()
current_node = source
step = 0
print("\nEpi:", e, agent.epsilon)
print(source,'->', destination)
print('sim time:', env.sim_time)
path.append(source)
while not done:
action = agent.get_action(state)
next_state, next_node, reward, done = env.step(action, done)
step += 1
# if step>30:
# done = 1
agent.append_sample(state, action, reward, next_state, done)
if len(agent.memory) >= agent.train_start:
agent.train_model()
score += reward
state = next_state
path.append(next_node)
taget_cs.append(env.target.id)
if done:
# 각 에피소드마다 타깃 모델을 모델의 가중치로 업데이트
# if e % 1 == 0:
agent.update_target_model(e)
print('update model')
# print(path)
print('Score:', score)
print('Step:', step)
print('sim time:', env.sim_time)
print('Distance:', env.pev.totaldrivingdistance)
print('Driving time:', env.pev.totaldrivingtime)
print(env.pev.charged, env.pev.curr_location, env.pev.init_SOC, env.pev.curr_SOC)
print(path)
print(taget_cs)
scores.append(score)
episodes.append(e)
steps.append(step)
# for i, s in enumerate(scores):
# plt.plot(i, s, '.')
# plt.show()
# for i, s in enumerate(steps):
# plt.plot(i, s, '.')
# plt.show()
agent.model.save_weights("{}/dqn_model.h5".format(resultdir))
plt.plot(episodes, scores, 'b')
plt.show()
plt.plot(episodes, steps, 'r')
plt.show()
plt.title('Training Scores')
plt.xlabel('Epoch')
plt.ylabel('score')
plt.plot(episodes, scores, 'b')
fig = plt.gcf()
fig.savefig('{}/train score.png'.format(resultdir), facecolor='#eeeeee', dpi=300)
plt.clf()
plt.title('Training Steps')
plt.xlabel('Epoch')
plt.ylabel('step')
plt.plot(episodes, steps, 'r')
fig = plt.gcf()
fig.savefig('{}/train step.png'.format(resultdir), facecolor='#eeeeee', dpi=300)
plt.clf()
############################### performance evaluation #############
scores, episodes, steps = [], [], []
npev=100
EV_list, CS_list, graph = gen_test_envir_simple(npev)
agent.epsilon = 0
EV_list_DQN = copy.deepcopy(EV_list)
CS_list_DQN = copy.deepcopy(CS_list)
for e, pev in enumerate(EV_list_DQN):
done = False
score = 0
path=[]
taget_cs = []
state, source, destination = env.test_reset(pev, CS_list_DQN)
current_node = source
step = 0
print("\nEpi:", e, agent.epsilon)
print(source,'->', destination)
print('sim time:', env.sim_time)
path.append(source)
while not done:
action = agent.get_action(state)
next_state, next_node, reward, done = env.step(action, done)
score += reward
state = next_state
path.append(next_node)
taget_cs.append(env.target.id)
step += 1
if done:
print('Score:', score)
print('Step:', step)
print('sim time:', env.sim_time)
print('Distance:', env.pev.totaldrivingdistance)
print('Driving time:', env.pev.totaldrivingtime)
print(env.pev.charged, env.pev.curr_location, env.pev.init_SOC, env.pev.curr_SOC)
print(path)
print(taget_cs)
scores.append(score)
episodes.append(e)
steps.append(step)
EV_list_TA = copy.deepcopy(EV_list)
CS_list_TA = copy.deepcopy(CS_list)
ta.one_time_check_timeweight(EV_list_TA, CS_list_TA, graph, 313)
EV_list_TEA = copy.deepcopy(EV_list)
CS_list_TEA = copy.deepcopy(CS_list)
ta.every_time_check_timeweight(EV_list_TEA, CS_list_TEA, graph, 313)
ta.sim_result_text(resultdir, EV_list_DQN=EV_list_DQN, EV_list_TA=EV_list_TA, EV_list_TEA=EV_list_TEA)
ta.sim_result_general_presentation(graph, resultdir, npev, EV_list_DQN=EV_list_DQN, EV_list_TA=EV_list_TA, EV_list_TEA=EV_list_TEA)
# plt.title('to cs Dist')
# plt.xlabel('EV ID')
# plt.ylabel('km')
# cnt = 0
# r1_list = []
# for ev in EV_list_DQN:
# r1_list.append(ev.to_cs_dist)
# plt.plot(r1_list, label='DQN')
# r1_list = []
# for ev in EV_list_TA:
# r1_list.append(ev.to_cs_dist)
# plt.plot(r1_list, label='Astar')
# plt.legend()
# plt.show()
#
# plt.title('Driving time')
# plt.xlabel('EV ID')
# plt.ylabel('time')
# cnt = 0
# r1_list = []
# for ev in EV_list_DQN:
# r1_list.append(ev.to_cs_driving_time)
# plt.plot(r1_list, label='DQN')
# r1_list = []
# for ev in EV_list_TA:
# r1_list.append(ev.to_cs_driving_time)
# plt.plot(r1_list, label='Astar')
# plt.legend()
# plt.show()
#
# plt.title('Charging time')
# plt.xlabel('EV ID')
# plt.ylabel('time')
# cnt = 0
# r1_list = []
# for ev in EV_list_DQN:
# r1_list.append(ev.to_cs_charging_time)
# plt.plot(r1_list, label='DQN')
# r1_list = []
# for ev in EV_list_TA:
# r1_list.append(ev.to_cs_charging_time)
# plt.plot(r1_list, label='Astar')
# plt.legend()
# plt.show()
#
# plt.title('Wating time')
# plt.xlabel('EV ID')
# plt.ylabel('time')
# cnt = 0
# r1_list = []
# for ev in EV_list_DQN:
# r1_list.append(ev.to_cs_waiting_time)
# plt.plot(r1_list, label='DQN')
# r1_list = []
# for ev in EV_list_TA:
# r1_list.append(ev.to_cs_waiting_time)
# plt.plot(r1_list, label='Astar')
# plt.legend()
# plt.show()
#
# plt.title('Total time')
# plt.xlabel('EV ID')
# plt.ylabel('time')
# cnt = 0
# r1_list = []
# for ev in EV_list_DQN:
# r1_list.append(ev.to_cs_waiting_time + ev.to_cs_charging_time + ev.to_cs_driving_time)
# plt.plot(r1_list, label='DQN')
# r1_list = []
# for ev in EV_list_TA:
# r1_list.append(ev.to_cs_waiting_time + ev.to_cs_charging_time + ev.to_cs_driving_time)
# plt.plot(r1_list, label='Astar')
# plt.legend()
# plt.show()
|
[
"40564892+walkingholic@users.noreply.github.com"
] |
40564892+walkingholic@users.noreply.github.com
|
71494e7f27e6411ad51089b1e1615b9a55f49730
|
7b2fdceaafa6dd13f3f02a24b3eeda5f7574ac16
|
/untitled0.py
|
820d9843e7d010c29d56243cfad79dbad6d3e75b
|
[] |
no_license
|
zkmastereng/fandangoscrape
|
372fd3328c71a594f2b8fb6f4c09de53a914ddef
|
4bdd9f50830e1de9f99da84fc539e82da80c9547
|
refs/heads/main
| 2023-05-02T09:51:18.635033
| 2021-05-26T15:31:14
| 2021-05-26T15:31:14
| 371,083,970
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,642
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 5 23:20:03 2021
@author: ozkan
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
fandango = pd.read_csv("fandango_scrape.csv")
x = fandango['VOTES']
y = fandango['RATING']
#plt.scatter(y,x)
#plt.ylabel('VOTES')
#plt.xlabel('RATING')
corre = fandango.corr()
fandango['YEAR'] = fandango['FILM'].apply(lambda title: title.split('(')[-1])
fandango['YEAR'] = fandango['YEAR'].apply(lambda x: x.replace(')',''))
count_per_year = fandango['YEAR'].value_counts()
#plt.hist(fandango['YEAR'])
#plt.xlabel('YEAR')
#plt.ylabel('count')
#plt.show()
highest_10_ratings = fandango.sort_values('VOTES',ascending=False)[:10]
zero_votes_sum = fandango.value_counts([fandango['VOTES']==0])
fan_reviewed = fandango.drop(fandango[fandango.VOTES==0].index)
#plt.figure(figsize=(10,4),dpi=150)
#sns.kdeplot(data=fan_reviewed,x='RATING',clip=[0,5],fill=True,label='True Rating')
#sns.kdeplot(data=fan_reviewed,x='STARS',clip=[0,5],fill=True,label='Stars Displayed')
#plt.legend(loc=(1.05,0.5))
fan_reviewed['STARS_DIFF'] = (fan_reviewed['STARS']-fan_reviewed['RATING']).round(2)
#sns.countplot(data=fan_reviewed,x='STARS_DIFF',palette='magma')
one_star_different = fan_reviewed[fan_reviewed['STARS_DIFF']==1]
all_sites = pd.read_csv('all_sites_scores.csv')
all_sites.info()
all_sites.describe()
#sns.scatterplot(data=all_sites,x='RottenTomatoes',y='RottenTomatoes_User')
#plt.xlim(0,100)
#plt.ylim(0,100)
all_sites['DIFF'] = all_sites['RottenTomatoes']-all_sites['RottenTomatoes_User']
mean_diff = all_sites['DIFF'].mad()
|
[
"noreply@github.com"
] |
zkmastereng.noreply@github.com
|
1db38bdb0515f41cd87409073f4c12651308a9fd
|
776f103a404c904af0a3b53285404815acd20497
|
/script/ab.py
|
4f895619c92f59fb62ab77de48645bc1a14c8e20
|
[] |
no_license
|
johndpope/spy611scripts
|
cb8b94ba6b0ddbcd73f4e51261007814618c9d45
|
3cfb0a61f17713b2540ec3f7c6762d18a943bc7a
|
refs/heads/master
| 2020-12-24T08:11:34.780700
| 2014-10-10T16:10:07
| 2014-10-10T16:10:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,952
|
py
|
#!/usr/bin/env python
# ~/spy611/script/ab.py
# This script is called by:
# ~/spy611/script/pred_gbm.bash
# I use this script to create variables from CSV files.
# Then I feed the variables to:
# sklearn.ensemble.AdaBoostClassifier
import sklearn.ensemble
import numpy
x_is = numpy.loadtxt('/tmp/x_is.csv', delimiter=',')
x_oos = numpy.loadtxt('/tmp/x_oos.csv', delimiter=',')
y_is_n1dg = numpy.loadtxt('/tmp/y_is_n1dg.csv', delimiter=',')
y_is_n2dg = numpy.loadtxt('/tmp/y_is_n2dg.csv', delimiter=',')
y_is_n1wg = numpy.loadtxt('/tmp/y_is_n1wg.csv', delimiter=',')
y_oos_n1dg = numpy.loadtxt('/tmp/y_oos_n1dg.csv', delimiter=',')
y_oos_n2dg = numpy.loadtxt('/tmp/y_oos_n2dg.csv', delimiter=',')
y_oos_n1wg = numpy.loadtxt('/tmp/y_oos_n1wg.csv', delimiter=',')
clf_n1dg = sklearn.ensemble.AdaBoostClassifier(n_estimators=100).fit(x_is,y_is_n1dg)
clf_n2dg = sklearn.ensemble.AdaBoostClassifier(n_estimators=100).fit(x_is,y_is_n2dg)
clf_n1wg = sklearn.ensemble.AdaBoostClassifier(n_estimators=100).fit(x_is,y_is_n1wg)
print('AdaBoost gives me access to a measurement called score.')
print('Here are scores for out of sample data.')
print('Higher scores are better. A score of 1 is perfect:')
print('clf_n1dg.score(x_oos,y_oos_n1dg)) is:')
print(clf_n1dg.score(x_oos,y_oos_n1dg))
print('clf_n2dg.score(x_oos,y_oos_n2dg)) is:')
print(clf_n2dg.score(x_oos,y_oos_n2dg))
print('clf_n1wg.score(x_oos,y_oos_n1wg)) is:')
print(clf_n1wg.score(x_oos,y_oos_n1wg))
# Copy the predictions into tables:
# ab_predictions_n1dg
# ab_predictions_n2dg
# ab_predictions_n1wg
# SQL creation syntax
# CREATE TABLE ab_predictions_n1dg (pnum INTEGER, prob_willbetrue INTEGER);
import psycopg2
import sys
import pprint
conn_string = "host='localhost' dbname='madlib' user='madlib' password='madlib'"
# print the connection string we will use to connect
print "Connecting to database\n ->%s" % (conn_string)
# get a connection, if a connect cannot be made an exception will be raised here
conn = psycopg2.connect(conn_string)
# conn.cursor will return a cursor object, you can use this cursor to perform queries
cursor = conn.cursor()
print "Connected!\n"
cursor.execute("TRUNCATE TABLE ab_predictions_n1dg")
cursor.execute("TRUNCATE TABLE ab_predictions_n2dg")
cursor.execute("TRUNCATE TABLE ab_predictions_n1wg")
rnum = 0
for prediction in clf_n1dg.predict(x_oos) :
rnum = rnum + 1
cursor.execute("INSERT INTO ab_predictions_n1dg (pnum,prob_willbetrue) values(%s,%s)",(rnum,prediction))
conn.commit()
rnum = 0
for prediction in clf_n2dg.predict(x_oos) :
rnum = rnum + 1
cursor.execute("INSERT INTO ab_predictions_n2dg (pnum,prob_willbetrue) values(%s,%s)",(rnum,prediction))
conn.commit()
rnum = 0
for prediction in clf_n1wg.predict(x_oos) :
rnum = rnum + 1
cursor.execute("INSERT INTO ab_predictions_n1wg (pnum,prob_willbetrue) values(%s,%s)",(rnum,prediction))
conn.commit()
# select * from ab_predictions_n1dg;
|
[
"dan.bikle@gmail.com"
] |
dan.bikle@gmail.com
|
318d00346ef383eef61967f31cc6f769f3b9f81d
|
8adead984d1e2fd4f36ae4088a0363597fbca8a3
|
/Strings/ReverseStringII.py
|
4a3617e788cc4374cde3f6720fd50e5c511985f6
|
[] |
no_license
|
ravisjoshi/python_snippets
|
2590650c673763d46c16c9f9b8908997530070d6
|
f37ed822b5863a5a11b09550dd32a73d68e7070b
|
refs/heads/master
| 2022-11-05T03:48:10.842858
| 2020-06-14T09:19:46
| 2020-06-14T09:19:46
| 256,961,137
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,000
|
py
|
"""
Given a string and an integer k, you need to reverse the first k characters for every 2k characters counting from the start of the string. If there are less than k characters left, reverse all of them. If there are less than 2k but greater than or equal to k characters, then reverse the first k characters and left the other as original.
Input: s = "abcdefg", k = 2 / Output: "bacdfeg"
Restrictions:
The string consists of lower English letters only.
Length of the given string and k will in the range [1, 10000]
"""
class Solution:
def reverseStr(self, _String, k):
inputString = list(_String)
for index in range(0, len(inputString), 2 * k):
inputString[index:index + k] = inputString[index:index + k][::-1]
return ''.join(inputString)
if __name__ == '__main__':
s = Solution()
inputString = "abcdefg"
k = 2
print(s.reverseStr(inputString, k))
inputString = "abcdef"
k = 3
print(s.reverseStr(inputString, k))
|
[
"ravishankar.joshi@centurylink.com"
] |
ravishankar.joshi@centurylink.com
|
d77b93a55d82d3c5f1748d40a09163e24c848444
|
6ef78d888bf0940645e47237cd9e5214d254aa80
|
/socket_learning/socketserver_server.py
|
63f3111452092805403896aeb89127350a3f3444
|
[] |
no_license
|
EnochMeng/exercise_library
|
ae477d246eee2c252dcd516c006fc68b588c750b
|
e041619787c9045947366166deb7c4d001b0020c
|
refs/heads/master
| 2021-09-07T17:04:39.630195
| 2018-02-26T14:59:22
| 2018-02-26T14:59:22
| 115,167,496
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 730
|
py
|
# 实现并发,也就是多个client端可以和我聊天
import socketserver
class MyServer(socketserver.BaseRequestHandler):
def handle(self):
print('server端启动')
while True:
conn = self.request
print(self.client_address)
while True:
client_data = conn.recv(1024)
print(str(client_data, 'utf8'))
if str(client_data, 'utf8') == 'exit':
break
conn.sendall(client_data) # 返回client端发来的数据,主要观察并发
conn.close()
if __name__ == '__main__':
server = socketserver.ThreadingTCPServer(('127.0.0.1', 8080), MyServer)
server.serve_forever()
|
[
"lingchuanmeng@foxmail"
] |
lingchuanmeng@foxmail
|
b71e35d7636b285e2fa0f57e27afaa6f59a0d693
|
9fb7a7fc8509b414f9e6b1dd0e1549911042134f
|
/tests/apps/standard/__init__.py
|
02e3e6e6649451a91bdefbcce4e3bf04407aff3f
|
[
"MIT"
] |
permissive
|
jperla/weby
|
5134657463876d009de1310efdce7d9c6b40509f
|
eb019cfdc00f14f22c0b65b757045c621f1cac28
|
refs/heads/master
| 2020-05-17T10:00:59.078844
| 2010-11-15T16:17:25
| 2010-11-15T16:17:25
| 388,681
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 525
|
py
|
#!/usr/bin/env python
import time
import weby
app = weby.defaults.App()
# Controllers
@app.subapp('/')
@weby.urlable_page()
def index(req, page):
page(u'Hello, world!')
static_path = u'tests/apps/standard/static/'
static = app.subapp('static')(weby.apps.standard.static(static_path))
# Middleware
from weby.middleware import EvalException
wrapped_app = weby.wsgify(app, EvalException)
# Server
from weby.http import server
if __name__ == '__main__':
server.serve(wrapped_app, host='127.0.0.1', port=8080)
|
[
"donotrealemailme @ OR @@@ jperla.com"
] |
donotrealemailme @ OR @@@ jperla.com
|
7ca4ccedd72570a8260c517fcd924e85739c71fc
|
c3d2a483b6b1e05cf66b916055d7438df4d5dccf
|
/Book_case_study/lanuage.py
|
ecea9154ead51419b228b12c7c0fd406098fc14e
|
[] |
no_license
|
vuminhdiep/EdX-Python-for-Research
|
5b627d83b507771c5460b56f98c7ae3a0c12d897
|
ad56bf2e2c9f757e0a9f2e90f9ab4ad4e4e910d9
|
refs/heads/master
| 2023-01-05T06:34:46.584271
| 2020-10-21T08:40:27
| 2020-10-21T08:40:27
| 292,648,892
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,768
|
py
|
text = "This is my test text. We will use this text to test the function."
def count_word(text):
text = text.lower()
skips = [",", ".", ":", ";", "'", '"']
for ch in skips:
text = text.replace(ch, "")
word_counts = {}
for word in text.split(" "):
if word in word_counts:
word_counts[word] += 1
else:
word_counts[word] = 1
return word_counts
count_word(text)
from collections import Counter
text = "This is my test text. We will use this text to test the function."
def count_word_fast(text):
text = text.lower()
skips = [",", ".", ":", ";", "'", '"']
for ch in skips:
text = text.replace(ch, "")
word_counts = Counter(text.split(" "))
return word_counts
count_word_fast(text)
def read_book(title_path):
with open(title_path, "r", encoding="utf8") as current_file:
text = current_file.read()
text = text.replace("\n", "").replace("\r", "")
return text
def word_stats(word_counts):
num_unique = len(word_counts)
counts = word_counts.values()
return (num_unique, counts)
#Read multiple files from different directories;
# import os
# book_dir = "./Book_case_study"
# for language in os.listdir(book_dir):
# for author in os.listdir(book_dir + "/" + language):
# for title in os.listdir(book_dir + "/" + language + "/" + author):
# inputfile = book_dir + "/" + language + "/" + author + "/" + title
# print(inputfile)
# text = read_book(inputfile)
# (num_unique, counts) = word_stats(count_word(text))
import pandas as pd
table = pd.DataFrame(columns = ("name", "age"))
table.loc[1] = "James", 22
table.loc[2] = "Jess", 32
print(table)
|
[
"51457124+vuminhdiep@users.noreply.github.com"
] |
51457124+vuminhdiep@users.noreply.github.com
|
336c2ef3c5d4ccb00dd38840fad47831e67bb075
|
b1f1a3bbfefb29a6b5f730814aafb253400019d7
|
/chi_analysis/MG_Pythia8_cfg.py
|
15b1820cf9548e011a4ecd2758bbcd1b7963f712
|
[] |
no_license
|
ahinzmann/cmsusercode
|
9ef4a98110ddbebd09f2f98f1db509f555863364
|
8085bc036696981d28287e9e6524b963c1fee7b9
|
refs/heads/master
| 2023-07-07T06:11:34.001241
| 2023-06-27T07:12:56
| 2023-06-27T07:12:56
| 10,263,991
| 0
| 1
| null | 2018-03-28T07:47:13
| 2013-05-24T10:09:48
|
Python
|
UTF-8
|
Python
| false
| false
| 7,815
|
py
|
# Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: Configuration/GenProduction/python/B2G-RunIIFall17wmLHEGS-00474-fragment.py --fileout file:B2G-RunIIFall17wmLHEGS-00474.root --mc --eventcontent RAWSIM,LHE --datatier GEN-SIM,LHE --conditions 93X_mc2017_realistic_v3 --beamspot Realistic25ns13TeVEarly2017Collision --step LHE,GEN --geometry DB:Extended --era Run2_2017 --python_filename B2G-RunIIFall17wmLHEGS-00474_1_cfg.py --no_exec --customise Configuration/DataProcessing/Utils.addMonitoring --customise_commands process.RandomNumberGeneratorService.externalLHEProducer.initialSeed=int(1561379105%100) -n 10000
import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.Eras import eras
OutName='OUTPUTNAME'
gridpack_path='GRIDPACKPATH'
n_Events=NEVENTS
random_seed=RANDOMSEED
process = cms.Process('GEN',eras.Run2_2016)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_cff')
process.load('Configuration.StandardSequences.Generator_cff')
process.load('IOMC.EventVertexGenerators.VtxSmearedRealistic25ns13TeV2016Collision_cfi')
process.load('GeneratorInterface.Core.genFilterSummary_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(n_Events)
)
# Input source
process.source = cms.Source("EmptySource")
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('Configuration/GenProduction/python/B2G-RunIIFall17wmLHEGS-00474-fragment.py nevts:%i'%n_Events),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.out = cms.OutputModule("PoolOutputModule",
compressionAlgorithm = cms.untracked.string('LZMA'),
compressionLevel = cms.untracked.int32(9),
eventAutoFlushCompressedSize = cms.untracked.int32(20971520),
fileName = cms.untracked.string('file:%s_GEN.root'%OutName),
outputCommands = process.AODSIMEventContent.outputCommands,
splitLevel = cms.untracked.int32(0)
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '94X_mcRun2_asymptotic_v3', '')
process.generator = cms.EDFilter("Pythia8HadronizerFilter",
PythiaParameters = cms.PSet(
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CP5Settings'),
pythia8CP5Settings = cms.vstring('Tune:pp 14',
'Tune:ee 7',
'MultipartonInteractions:ecmPow=0.03344',
'PDF:pSet=20',
'MultipartonInteractions:bProfile=2',
'MultipartonInteractions:pT0Ref=1.41',
'MultipartonInteractions:coreRadius=0.7634',
'MultipartonInteractions:coreFraction=0.63',
'ColourReconnection:range=5.176',
'SigmaTotal:zeroAXB=off',
'SpaceShower:alphaSorder=2',
'SpaceShower:alphaSvalue=0.118',
'SigmaProcess:alphaSvalue=0.118',
'SigmaProcess:alphaSorder=2',
'MultipartonInteractions:alphaSvalue=0.118',
'MultipartonInteractions:alphaSorder=2',
'TimeShower:alphaSorder=2',
'TimeShower:alphaSvalue=0.118'),
processParameters = cms.vstring(
'JetMatching:outTree_flag = 1'
),
pythia8CommonSettings = cms.vstring('Tune:preferLHAPDF = 2',
'Main:timesAllowErrors = %i'%n_Events,
'Check:epTolErr = 0.01',
'Beams:setProductionScalesFromLHEF = off',
'SLHA:keepSM = on',
'SLHA:minMassSM = 1000.',
'ParticleDecays:limitTau0 = on',
'ParticleDecays:tau0Max = 10',
'ParticleDecays:allowPhotonRadiation = on')
),
comEnergy = cms.double(13000.0),
filterEfficiency = cms.untracked.double(1.0),
maxEventsToPrint = cms.untracked.int32(1),
pythiaHepMCVerbosity = cms.untracked.bool(False),
pythiaPylistVerbosity = cms.untracked.int32(1)
)
process.out.outputCommands.extend(
[
'keep GenEventInfoProduct_*_*_*',
'keep *_ak4GenJets_*_*',
'keep *_ak4CaloJets_*_*',
'keep *_ak4JetID_*_*',
'keep *_ak4JetExtender_*_*',
#------- Trigger collections ------
'keep edmTriggerResults_TriggerResults_*_*',
'keep *_hltTriggerSummaryAOD_*_*',
'keep L1GlobalTriggerObjectMapRecord_*_*_*',
'keep L1GlobalTriggerReadoutRecord_*_*_*',
#------- Various collections ------
'keep *_EventAuxilary_*_*',
'keep *_offlinePrimaryVertices_*_*',
'keep *_offlinePrimaryVerticesWithBS_*_*',
])
process.externalLHEProducer = cms.EDProducer("ExternalLHEProducer",
args = cms.vstring(gridpack_path),
nEvents = cms.untracked.uint32(n_Events),
numberOfParameters = cms.uint32(1),
outputFile = cms.string('cmsgrid_final.lhe'),
scriptName = cms.FileInPath('GeneratorInterface/LHEInterface/data/run_generic_tarball_cvmfs.sh')
)
# Path and EndPath definitions
process.load("RecoJets.Configuration.GenJetParticles_cff")
process.load("RecoJets.Configuration.RecoGenJets_cff")
process.ak4GenJets.jetPtMin=200
#process.genParticles.src="generator"
process.ProductionFilterSequence = cms.Sequence(process.externalLHEProducer*process.generator)
# Path and EndPath definitions
process.generation_step = cms.Path(process.pgen)
process.genfiltersummary_step = cms.EndPath(process.genFilterSummary)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.RAWSIMoutput_step = cms.EndPath(process.out)
# Schedule definition
process.schedule = cms.Schedule(process.generation_step,process.genfiltersummary_step,process.endjob_step,process.RAWSIMoutput_step)
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
# filter all path with the production filter sequence
for path in process.paths:
getattr(process,path).insert(0, process.ProductionFilterSequence)
#process.p = cms.Path(process.externalLHEProducer*process.generator*process.genParticles*process.genJetParticles*process.ak4GenJets)
#process.endpath = cms.EndPath(process.out)
#process.schedule = cms.Schedule(process.p,process.endpath)
process.out.outputCommands=cms.untracked.vstring('keep *','drop edmHepMCProduct_generator_*_*','drop *_genParticles*_*_*','drop *_genParticlesForJets*_*_*')
# customisation of the process.
# Automatic addition of the customisation function from Configuration.DataProcessing.Utils
from Configuration.DataProcessing.Utils import addMonitoring
#call to customisation function addMonitoring imported from Configuration.DataProcessing.Utils
process = addMonitoring(process)
# End of customisation functions
# Customisation from command line
process.RandomNumberGeneratorService.externalLHEProducer.initialSeed=int(random_seed)
process.RandomNumberGeneratorService.generator.initialSeed=int(random_seed)
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion
|
[
"hinzmann@cern.ch"
] |
hinzmann@cern.ch
|
bafa2524937ca9751f5a72349f2c3e4e32c92369
|
c0c4a82243978cbf52fc6a4c01edc060f4611153
|
/SampleProjects/POMProjectDemo/Locators/locators.py
|
744230beacf69c6d090677fca97925f78e6f9aba
|
[] |
no_license
|
Lalesh-code/Lalesh_Python
|
8536c9f8ac35f10d7d2f87d98b925ff651c22abb
|
aad5ab0bec5144708234c3f73460777bd1274a9a
|
refs/heads/master
| 2022-12-09T14:04:54.605522
| 2020-08-15T14:07:59
| 2020-08-15T14:07:59
| 287,758,370
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 251
|
py
|
class Locators():
# Login page objects
username_textbox_id = "txtUsername"
password_textbox_id = "txtPassword"
login_button_id = "btnLogin"
# Home page objects
welcome_link_id = "welcome"
logout_link_linkText = "Logout"
|
[
"lalesh.garud@gmail.com"
] |
lalesh.garud@gmail.com
|
49b3855070fbfa231c6b4ef2412572433fe8d649
|
735a315ea82893f2acd5ac141f1a9b8be89f5cb9
|
/pylib/v6.1.84/mdsplus_wsgi.py
|
f50ed1d37ecf60acd8495f2d17c36a4e1ff2fe9c
|
[] |
no_license
|
drsmith48/pppl-mdsplus-python
|
5ce6f7ccef4a23ea4b8296aa06f51f3a646dd36f
|
0fb5100e6718c8c10f04c3aac120558f521f9a59
|
refs/heads/master
| 2021-07-08T02:29:59.069616
| 2017-10-04T20:17:32
| 2017-10-04T20:17:32
| 105,808,853
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,942
|
py
|
"""Use as a mod_wsgi handler in apache.
This module provide access to MDSplus events and data and is designed for use with AJAX web based
applications using the XMLHttpRequest object.
A sample apache configuration to enable the use of this module would look something like:
WSGIScriptAlias /mdsplusWsgi /usr/local/cgi/mdsplus.wsgi
Where mdsplus.wsgi contains something like:
from MDSplus.mdsplus_wsgi import application
The application class in this module will add the directory where the mdsplus.wsgi file resides to the python
path and then try to import a module named mdsplus_wsgi_config. If you provide a file called
mdsplus_wsgi_config.py in the same directory as the mdsplus.wsgi then you can perform some environment
initialization such as defining tree paths, UDP_EVENT settings etc.
As sample mdsplus_wsgi_config.py file might contain:
import os
os.environ['MDS_PATH']='/usr/local/mdsplus/tdi'
os.environ['mytree_path']='/mytree-directory-path'
os.environ['mytree2_path']='/mytree2-directory-path'
os.environ['UDP_EVENTS']='YES'
Once the mdsplus.wsgi is configured the web server will serve requests with the following format:
http://mywebserver-host/mdsplusWsgi/request-type/[param0[/param1[.../paramn]]?query-options
Currently the following request-types are supported:
event - Wait for an MDSplus event and produce either a xml document or hand the event info over to a python
handler to format a custom response.
URL: http://mywebserver-host/mdsplusWsgi/event/event-name[?timeout=seconds[&handler=handler-module]]
If no handler is specified then the default handler will be used.
The default handler does the following:
If the event occurs before the timeout (default timeout is 60 seconds):
returns a page of xml which looks like:
<event><name>event-name</name><time>time-of-event</time></event> or
<event><name>event-name</name><time>time-of-event</time><data>
<data_bytes>[byte1,byte2,byte3,....,byte4]</data_bytes>
<data_text>text_bytes</data_text></data>
</event>
If the event times out then returns an empty page with status of NO_RESPONSE(204)
If there is an error during processing the event it returns a page of xml which
looks like:
<exception>text-of-exception</exception>
If a handler is specified, a module of the name of handler specified will be
imported and the function called "handler" in that module will be called with
two arguments, the req and the MDSplus event instance in that order. The handler can use the req
object to write the response and the event object to get the event data. The
event-instance.exception will = None if no problem occurred during the event processing or
the exception string if an error occurred. The handler should return a valid
HTTP status if it handles the event otherwise it can return None to resume the
default processing. The following is a handler used on the C-Mod experiment
for monitoring the shot cycle state machine. Information about the state and
shot number is encoded in an event called CMOD_COUNTDOWN_EVENT.
import time
import numpy
import sys,os
states=["starting", "standby", "cooldown", "test", "recool", "init", "check", "pulse", "abort"]
lastData=None
lastTime=time.time()
def handler(e):
if e.exception is None:
response_headers=list()
response_headers.append(('Content-type','text/xml'))
status='200 OK'
data=e.getRaw()
shot=int(data[range(4,8)].view(numpy.uint32).item())
toState=states[int(data[1])]
fromState=states[int(data[0])]
output='<?xml version="1.0" encoding="ISO-8859-1" ?>\n'
output += '<cmodState>\n <shot>%d</shot>\n <toState>%s</toState>\n <fromState>%s</fromState>\n</cmodState>" % (shot,toState,fromState))
return (reponse_headers,status,output)
1darray - Evaluate an expression and return a 1-D array of numeric values
URL: http://mywebserver-host/mdsplusWsgi/1darray/[tree-name/shot-number]?expr=expression
This request type will result in binary data returned. Request headers returned returned include one or more of
DTYPE (i.e. "Float32Array"), LENGTH, TREE, SHOT, ERROR.
In javascript you would read the data using code similar to:
var a = eval('new '+req.getResponseHeader('DTYPE')+'(req.response)');
NOTE: Typed arrays in javascript may not be supported in all browsers yet.
If there is an error then there will be a request header returned called "ERROR" and its value is the error message.
1dsignal - Evaluate an expression and resturn a 1-D array of x values followed by a 1-D array of y values.
URL: http://mywebserver-host/mdsplusWsgi/1dsignal/[tree-name/shot-number]?expr=expression
Similar to the array mode but request headers returned include XDTYPE,XLENGTH,YDTYPE,YLENGTH. The contents returned
consist of the x axis followed by the y axis. In javascript you would read the data using something like:
var xlength=req.getResponseHeader('XLENGTH')
var x = eval('new '+req.getResponseHeader('XDTYPE')+'(req.response,xlength)');
var y = eval('new '+req.getResponseHeader('YDTYPE')+'(req.response,xlength*x.BYTES_PER_ELEMENT)');
NOTE: Typed arrays in javascript may not be supported in all browsers yet.
treepath - Return tree_path environment variable
URL: http://mywebserver-host/mdsplusWsgi/treename
getnid - Return node identifier
URL: http://mywebserver-host/mdsplusWsgi/treename/shot-number?node=node-name-spec
example: http://mywebserver-host/mdsplusWsgi/cmod/-1?node=\ip
"""
from MDSplus import *
import time
import os
import sys
from cgi import parse_qs
class application:
def __init__(self, environ, start_response):
sys.path.insert(0,os.path.dirname(environ['SCRIPT_FILENAME']))
try:
import mdsplus_wsgi_config
except:
pass
self.environ = environ
self.start = start_response
self.tree=None
self.shot=None
self.args=parse_qs(self.environ['QUERY_STRING'],keep_blank_values=1)
self.path_parts=self.environ['PATH_INFO'].split('/')[1:]
doername='do'+self.path_parts[0].capitalize()
try:
exec ('from wsgi import '+doername,globals())
self.doer=eval(doername)
except Exception:
self.doer=None
def __iter__(self):
try:
if self.doer is None:
status = '500 BAD_REQUEST'
response_headers=[('Content-type','text/text')]
self.start('500 BAD_REQUEST',[('Content-type','text/text')])
output="Unsupported request type: "+self.path_parts[0]
else:
status, response_headers,output = self.doer(self)
self.start(status,response_headers)
yield output
except Exception:
import traceback
self.start('500 BAD REQUEST',[('Content-Type','text/xml')])
yield '<?xml version="1.0" encoding="ISO-8859-1" ?>'+"\n<exception>%s</exception>" % (str(traceback.format_exc()),)
def openTree(self,tree,shot):
Tree.usePrivateCtx()
try:
shot=int(shot)
except Exception:
raise Exception("Invalid shot specified, must be an integer value: %s<br /><br />Error: %s" % (shot,sys.exc_info()))
try:
t=Tree(tree,shot)
self.tree=t.tree
self.shot=str(t.shot)
self.treeObj=t
except Exception:
raise Exception("Error opening tree named %s for shot %d<br /><br />Error: %s" % (tree,shot,sys.exc_info()))
|
[
"drsmith8@wisc.edu"
] |
drsmith8@wisc.edu
|
373f7bc18a434af3069c03740c24b3c36f2ce09e
|
25051ecfc40a3431ff76444a45d8ca6d52258bae
|
/TutoScrapy/scrap/scrap.py
|
531832c6b40ffade44effdacb210d8185994fed7
|
[] |
no_license
|
Gbane26/TutoScrapping
|
9b5ecf9ee2c94596ada770fadfba890dc4dc5a09
|
3233c0831187573967aa4499307e6181cd9b8f98
|
refs/heads/master
| 2020-09-06T02:55:58.339955
| 2019-11-07T17:51:31
| 2019-11-07T17:51:31
| 220,296,485
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,573
|
py
|
import requests
from bs4 import BeautifulSoup
from requests import get
url = 'http://www.abidjanguide.com/'
response = get(url)
# print(response.status_code)
if response.status_code == 200:
# --- Récuperation HTML --- #
htmlsoup = BeautifulSoup(response.text, 'html.parser')
# --- Récuperation TITRE --- #
div_title = htmlsoup.find('div', attrs={'class': 'navbar-header'})
h1title = div_title.find('h1')
#print(h1title.text)
# print(htmlsoup)
# --- Récuperation PRESENTATION --- #
div_presentation = htmlsoup.find_all('div', attrs={'class': 'col-md-2'})
div_row = htmlsoup.find_all('div', attrs={'class': 'row'})
print(div_row)
compt = 1
# --- Boucle au niveau de div_presentation --- #
# for item in div_presentation:
# if compt < 4:
# # --- Récuperation --- #
# ba = item.find('a')
# img = ba.find('img')
# h3 = ba.find('h3')
# # --- Affectation --- #
# image = img['src']
# url = ba['href']
# titre = h3.text
# --- Affichage --- #
#print(item.text)
# print(compt, "\n" + "titre: " + titre + "\n"+ "url: " + url + "\n"+ "image: " + image)
# print(titre)
# print(url)
# print(image)
compt += 1
# print(div_presentation)
$
# print(div_presentation)
else:
print("Erreur", response.status_code)
|
[
"medhijrgbanes@gmail.com"
] |
medhijrgbanes@gmail.com
|
1e9dd632c45917dbc866f1bc0751c7c6277a5886
|
b13d315ed99664f187639dda76dbc64184461ce9
|
/cookbook/数据结构与算法.py
|
8bf43f49f00f646f2b9c6eaf9b2ee972420c04d6
|
[] |
no_license
|
a233894432/learn_python
|
ed92251b4baa23425121b1882634f088c94fcec8
|
92b1ae3e81d935741e54768f0e3b370b3a04a23d
|
refs/heads/master
| 2021-01-18T21:21:38.565846
| 2016-06-10T17:51:34
| 2016-06-10T17:51:34
| 55,217,553
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 482
|
py
|
#!/usr/bin/env python
# coding=utf-8
records = [
('foo', 1, 2),
('bar', 'hello'),
('foo', 3, 4),
]
def do_foo(x, y):
print('foo', x, y)
def do_bar(s):
print('bar', s)
for tag, *args in records:
if tag == 'foo':
do_foo(*args)
elif tag == 'bar':
do_bar(*args)
#字符串的分隔
line = 'nobody:*:-2:-2:Unprivileged User:/var/empty:/usr/bin/false'
uname, *fields, homedir, sh = line.split(':')
print(uname)
print(homedir)
print(sh)
|
[
"a233894432@gmail.com"
] |
a233894432@gmail.com
|
962ce135e71ace306eedd5a4b9db4ffdbce4b519
|
9c17c2809e06cf934cb9fb378a4f505c63a3e599
|
/distribution/transform.py
|
e3d3465262a9f36a247cf8dd38185fe72b34bb40
|
[] |
no_license
|
AmjadiM/metadata_manager
|
6fd91161e364072880fdf9119fc4dd001dc5e472
|
3eccf3c47c7a0bea08f2cb2d3dfb000de18c4721
|
refs/heads/master
| 2021-03-18T07:10:40.866608
| 2020-03-13T12:45:41
| 2020-03-13T12:45:41
| 246,821,202
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,723
|
py
|
import pandas as pd
import os
"""
Can create several files based off the master metadata for distribution
Pi Data Descriptors -- Pivotted version of master metadata for SQL Server
Subscriptions -- All tags with metric and server for PI data collector subscription file
Statuses -- All the tags and start date for PI data collector status file
"""
# TODO :: Create data cleaner
# TODO :: Create single insert to mssql for each new row in metadata
# TODO :: Create unit tests for the run methods
class metadata_manager:
def __init__(self, input_file, output_file):
self.input_file = input_file
self.output_file = output_file
print(os.getcwd())
def extract_data(self):
# Load metadata excel
xl_file = pd.read_excel(self.input_file, sheet_name="PI_METADATA")
return xl_file
def write_data(self, df, filetype):
# Option to write as CSV or excel
if filetype.lower() == "excel":
df.to_excel(self.output_file + ".xlsx", engine="openpyxl", index=False,
sheet_name=os.path.basename(self.output_file))
elif filetype.lower() == "csv":
df.to_csv(self.output_file + ".csv", index=False, header=False)
else:
raise ValueError
def create_pi_data_descriptors(self):
xl_file = self.extract_data()
# pivot_df = self.pivot_data(xl_file)
pivot_df = pd.melt(xl_file, ['tag']) \
.rename(columns={"variable": "descriptor_name", "value": "descriptor_value"}) \
.sort_values(['tag'])
self.write_data(pivot_df, "excel")
def create_pi_data_groupings(self):
pass
def create_subscriptions(self):
# Create subscription tags for PI Data Collector
xl_file = self.extract_data()
xl_file['server'] = "PISERVER_MP"
sub_df = xl_file[['tag', 'metric', 'server']]
sub_df = sub_df[['tag', 'server', 'metric']]
self.write_data(sub_df, "csv")
return sub_df
def create_statuses(self):
# Create status file for PI Data Collector
xl_file = self.extract_data()
xl_file['last_updated'] = "19700101000000000"
status_df = xl_file[['tag', 'last_updated']]
self.write_data(status_df, "csv")
return status_df
def load_to_mssql(self):
# Load the metadata to SQL server either using truncate and load or append
pass
def check_data(self):
# Check for duplicates
pass
def clean_data(self):
# Strip whitespace from ends
pass
if __name__ == "__main__":
metadata_manager(input_file="data/sql_metadata.xlsx", output_file="data/subscriptions").create_subscriptions()
|
[
"mikeamjadi1@gmail.com"
] |
mikeamjadi1@gmail.com
|
92d2472e3c46d30136c9a4f3f7e444fa32d539e7
|
edf2ea979cc2a2ad9ebea18b6bb57d743b5d8a44
|
/app/wordlists/sv.py
|
89caadabea355713f9aa7add729cb52faca9ff9f
|
[
"MIT"
] |
permissive
|
cfarvidson/gender-decoder
|
ce04398a217e1c5f089a11c8fb6d4f66111730a5
|
a8f5477c55a802fa20a549401b1bb234f0f4f22f
|
refs/heads/master
| 2020-05-03T03:55:12.007859
| 2019-04-10T10:39:16
| 2019-04-10T10:39:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,248
|
py
|
source = ""
language_name = "Swedish"
language_code = "sv"
# These words are written as the stem to make it easier to match all variants.
# In other words, the suffix is intentionally left out.
# The following is from
# https://github.com/Muamaidbengt/gender-decoder/blob/edd2eae6402df58c7981b3af6c04fbfd38aebe86/app/wordlists.py
feminine_coded_words = [
# from http://uu.diva-portal.org/smash/get/diva2:896718/FULLTEXT01.pdf with some minor modifications
"anknyt", # a, ning
"artig",
"anspråkslös",
"ansvar",
"barn",
"beroende",
"empati",
"engage", # mang, rad
"femini",
"förstå",
"glad",
"gnäll", # a, ig
"känsl", # osam, ig, ofylld
"medlidande",
"omtänksam",
"lojal",
"mild",
"omhänderta",
"snäll",
"social",
"släktskap",
"smickra",
"stöd",
"stött", # ande, epelare
"sympati",
"samarbet",
"tjat",
"trevlig",
"tillgiven" "tyst",
"tillsammans",
"tillit",
"lita",
"undergiven",
"varm",
"ärlig",
"öm"
# Additions
"entusias",
"inkluder",
"inklusi",
"inkänna",
"interperson",
"kvinn",
"stödj",
"stött",
"subjektiv",
"vänlig",
"ödmjuk",
]
masculine_coded_words = [
# from http://uu.diva-portal.org/smash/get/diva2:896718/FULLTEXT01.pdf with some minor modifications
"aktiv",
"aggress",
"ambiti", # on, ös
"analy", # tisk, s
"atlet",
"autonom",
"beslut",
"bestäm", # ma, d
"domin", # era, ant
"envis",
"fientlig",
"frispråkig",
"girig",
"hierarki",
"hänsynslös",
"individ",
"intellekt",
"intelligen", # s, t
"impulsiv",
"kraft", # ig, full
"kompetent",
"ledare",
"logik",
"logisk",
"maskulin",
"modig",
"objektiv",
"självsäker",
"självständig",
"självförtroende",
"åsikt",
# Additions
"drive", # n, r
"driva",
"förkämpe",
"försvar",
"grym",
"hungrig",
"intellekt",
"intelligen",
"kämp", # e, a
"leda",
"manlig",
"självgå",
"tjurig",
"tjurskall", # e, ig
"utman", # ar, ande, ing
"vass",
"äventyr",
]
|
[
"carl-fredrik@arvidson.io"
] |
carl-fredrik@arvidson.io
|
0d3bdfd5cc84358b4ce03552466c4e30a6c2e0f5
|
efd28e8dd82afe274ed5acb71cb6902077f03f8c
|
/hst.py
|
956430e1094e911d55e675a1eb6c1d628cf41c65
|
[] |
no_license
|
Borlaff/astrobox
|
18e123b9a57fc4cff51df9bb3df35568d8057826
|
de134f15086a5131391ca7fbe34680c22fb11153
|
refs/heads/master
| 2022-08-24T07:11:19.676557
| 2022-07-21T23:34:43
| 2022-07-21T23:34:43
| 248,322,856
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 28,408
|
py
|
import sys
import os
import glob
sys.path.append("/home/borlaff/GRID/")
import utils
from tqdm import tqdm
import tars
from astropy.io import fits
from astropy.wcs import WCS
import drizzlepac
import bottleneck as bn
#import ellipse
import numpy as np
import ds9tomask as dm
from bootmedian import bootmedian as bm
from drizzlepac import ablot
from stsci.tools import teal
from skimage import data
from skimage.restoration import inpaint
from astropy.modeling import models, fitting
import subprocess
from scipy.ndimage import gaussian_filter
xfin = 3000
yfin = 3000
ra_fin = 150.1126499
dec_fin = 2.3561308
def reset_mdrizsky(fits_list):
for i in fits_list:
print(i)
reset_MDRIZSKY(i)
def create_deltaflat_files():
F606W_dflat = fits.open("/home/borlaff/DF4/DELTAFLAT/final_acs-wfc_f606w_avg.fits")
F814W_dflat = fits.open("/home/borlaff/DF4/DELTAFLAT/final_acs-wfc_f814w_avg.fits")
F606W_dflat_sci1 = F606W_dflat[0].data[0:2048,:]
F606W_dflat_sci2 = F606W_dflat[0].data[2048:,:]
F814W_dflat_sci1 = F814W_dflat[0].data[0:2048,:]
F814W_dflat_sci2 = F814W_dflat[0].data[2048:,:]
F606W_dflat_sci1[np.where(F606W_dflat_sci1 < 0.3)] = 1
F606W_dflat_sci1[np.where(F606W_dflat_sci1 > 1.7)] = 1
F606W_dflat[0].data = gaussian_filter(F606W_dflat_sci1, sigma=3)
F606W_dflat.verify("silentfix")
os.system("rm /home/borlaff/DF4/DELTAFLAT/dflat_f606w_sci1.fits")
F606W_dflat.writeto("/home/borlaff/DF4/DELTAFLAT/dflat_f606w_sci1.fits")
F606W_dflat_sci2[np.where(F606W_dflat_sci2 < 0.3)] = 1
F606W_dflat_sci2[np.where(F606W_dflat_sci2 < 1.7)] = 1
F606W_dflat[0].data = gaussian_filter(F606W_dflat_sci2, sigma=3)
F606W_dflat.verify("silentfix")
os.system("rm /home/borlaff/DF4/DELTAFLAT/dflat_f606w_sci2.fits")
F606W_dflat.writeto("/home/borlaff/DF4/DELTAFLAT/dflat_f606w_sci2.fits")
F814W_dflat_sci1[np.where(F814W_dflat_sci1 < 0.3)] = 1
F814W_dflat_sci1[np.where(F814W_dflat_sci1 < 1.7)] = 1
F814W_dflat[0].data = gaussian_filter(F814W_dflat_sci1, sigma=3)
F814W_dflat.verify("silentfix")
os.system("rm /home/borlaff/DF4/DELTAFLAT/dflat_f814w_sci1.fits")
F814W_dflat.writeto("/home/borlaff/DF4/DELTAFLAT/dflat_f814w_sci1.fits")
F814W_dflat_sci2[np.where(F814W_dflat_sci2 < 0.3)] = 1
F814W_dflat_sci2[np.where(F814W_dflat_sci2 < 1.7)] = 1
F814W_dflat[0].data = gaussian_filter(F814W_dflat_sci2, sigma=3)
F814W_dflat.verify("silentfix")
os.system("rm /home/borlaff/DF4/DELTAFLAT/dflat_f814w_sci2.fits")
F814W_dflat.writeto("/home/borlaff/DF4/DELTAFLAT/dflat_f814w_sci2.fits")
print("Delta flats created!")
def launch_multidrizzle(subfix):
HUDF_list = glob.glob(subfix)
drizzlepac.astrodrizzle.AstroDrizzle(input=subfix, skysub=0,
num_cores=1, clean=1,
in_memory=0, preserve=0,
combine_type='imedian', final_scale=0.06,
final_kernel="lanczos3", driz_cr=1,
updatewcs=0, final_pixfrac=1,
final_outnx=xfin,final_outny=yfin,
final_ra=ra_fin,final_rot=0,
final_dec=dec_fin)
def run_astnoisechisel(fits_name, ext, tilesize=80):
os.system("astnoisechisel -h" + str(ext) + " --tilesize="+str(tilesize)+","+str(tilesize)+" --interpnumngb=4 --keepinputdir --smoothwidth=15 " + fits_name)
return(fits_name.replace(".fits", "_labeled.fits"))
def reset_bad_pixels(fits_name, ext):
limit_down = -5000
fits_file = fits.open(fits_name, mode = "update")
fits_file[ext].data[(fits_file[ext].data < limit_down)] = 0
fits_file.flush()
fits_file.close()
def reset_MDRIZSKY(fits_name):
fits_file = fits.open(fits_name, mode = "update")
fits_file[1].header["MDRIZSKY"] = 0
fits_file[4].header["MDRIZSKY"] = 0
fits_file.flush()
fits_file.close()
def remove_sky(fits_name, tilesize):
# Make copy of flc
os.system("cp " + fits_name + " " + fits_name.replace("_flc.fits", "_fld.fits"))
output_name = fits_name.replace("_flc.fits","_sky.fits")
fits_name = fits_name.replace("_flc.fits", "_fld.fits")
reset_bad_pixels(fits_name,1)
reset_bad_pixels(fits_name,4)
# First extension
labeled_name_1 = run_astnoisechisel(fits_name, 1, tilesize)
labeled_fits = fits.open(labeled_name_1)
fits_file = fits.open(fits_name)
sky_gradient = labeled_fits[4].data
print(labeled_fits[4].header)
fits_file[1].data = fits_file[1].data - sky_gradient
# Second extension
labeled_name_2 = run_astnoisechisel(fits_name, 4)
labeled_fits = fits.open(labeled_name_2)
sky_gradient = labeled_fits[4].data
print(labeled_fits[4].header)
fits_file[4].data = fits_file[4].data - sky_gradient
os.system("rm " + output_name)
fits_file.verify("silentfix")
fits_file.writeto(output_name)
def remove_gradient(input_flc, ext, detected_name):
input_fits = fits.open(input_flc)
detected_fits = fits.open(detected_name)
input_fits[ext].data = detected_fits[1].data
os.system("rm " + input_flc)
input_fits.verify("silentfix")
input_fits.writeto(input_flc)
input_fits.close()
detected_fits.close()
def mask_image(fits_list, ext=1, flat=True, dq_mask=True):
# Check if astnoisechisel is installed:
try:
subprocess.call(["astnoisechisel"])
except OSError as e:
# handle file not found error.
print("ERROR: Astnoisechisel not found!")
os.kill(os.getpid(), signal.SIGUSR1)
out_list = []
if isinstance(fits_list, str):
fits_list = [fits_list]
if isinstance(fits_list, list):
for i in range(len(fits_list)):
fits_name = fits_list[i]
fits_image_flatted = fits.open(fits_name)
fits_image_flatted_name = fits_name.replace(".fits", "_flatted.fits")
if flat:
# flat_name = fits_image_flatted[0].header['PFLTFILE']
flat_name = choose_master_flat(fits_name)
flat_image = fits.open(flat_name)
border = 5
if (flat_image[ext].data.shape == (1024, 1024)) & (fits_image_flatted[ext].data.shape == (1014, 1014)):
flat_image_array = flat_image[ext].data[0+border:1024-border,
0 + border:1024-border]
else:
flat_image_array = flat_image[ext].data
fits_image_flatted[ext].data = np.divide(fits_image_flatted[ext].data, flat_image_array)
hdul = fits.PrimaryHDU(fits_image_flatted[ext].data)
hdul.verify("silentfix")
os.system("rm "+fits_image_flatted_name)
hdul.writeto(fits_image_flatted_name)
print(fits_name)
if fits_name[-6:] == "i.fits":
out_name = fits_name.replace("i.fits", "m_ext" + str(ext) + ".fits")
else:
out_name = fits_name.replace(".fits", "_masked_ext" + str(ext) + ".fits")
# Astnoisechisel
cmd = "astnoisechisel -h0 --keepinputdir --tilesize=50,50 --interpnumngb=1 --smoothwidth=3 " + fits_image_flatted_name
print(cmd)
os.system(cmd)
labeled_name = fits_image_flatted_name.replace(".fits", "_detected.fits")
try:
labeled_image = fits.open(labeled_name)
fits_image = fits.open(fits_name)
masked = fits_image[ext].data
masked[(labeled_image[2].data !=0)] = np.nan
if dq_mask:
masked[fits_image_flatted[3].data > 0] = np.nan
hdu1 = fits.PrimaryHDU(masked)
hdu1.verify("silentfix")
os.system("rm "+out_name)
hdu1.writeto(out_name)
new_hdul = fits.open(out_name, "update")
new_hdul[0].header = fits_image[ext].header
new_hdul.verify("silentfix")
new_hdul[0].header['SKYLVL'] = np.nanmedian(labeled_image[3].data)
new_hdul[0].header['SKYSTD'] = np.nanmedian(labeled_image[4].data)
new_hdul.flush()
out_list.append(out_name)
except IOError:
print("Cannot run astnoisechisel! Probably too noisy image")
return(["Error"])
return(out_list)
def mask_and_crude_skycor_sci(input_sci, master_mask):
mask_1 = mask_image(fits_list=[input_sci],ext=0, flat=False, dq_mask=False)
print(mask_1)
mask_1_fits = fits.open(mask_1[0], mode="update")
mask_1_DF = fits.open(master_mask)
mask_1_fits[0].data[mask_1_DF[0].data == 1] = np.nan
mask_1_fits.flush()
crude_skycor(input_sci, 0, mask_1[0], nsimul=1, noisechisel_grad=True, bootmedian=False)
def mask_and_crude_skycor(input_flc, mask_1_name, mask_2_name, nsimul=100):
mask_1 = mask_image(fits_list=[input_flc],ext=1, flat=False)
print(mask_1)
mask_1_fits = fits.open(mask_1[0], mode="update")
mask_1_DF = fits.open(mask_1_name)
mask_1_fits[0].data[mask_1_DF[0].data == 1] = np.nan
mask_1_fits.flush()
crude_skycor(input_flc, 1, mask_1[0], nsimul=nsimul, noisechisel_grad=False, bootmedian=False)
mask_4 = mask_image(fits_list=[input_flc],ext=4, flat=False)
mask_4_fits = fits.open(mask_4[0], mode="update")
mask_4_DF = fits.open(mask_2_name)
mask_4_fits[0].data[mask_4_DF[0].data == 1] = np.nan
mask_4_fits.flush()
crude_skycor(input_flc, 4, mask_4[0], nsimul=nsimul, noisechisel_grad=False, bootmedian=False)
def copy_dq(input_flc, input_crcor):
flc_fits = fits.open(input_flc, mode="update")
crcor_fits = fits.open(input_crcor, mode="update")
flc_fits[1].data = crcor_fits[1].data
flc_fits[4].data = crcor_fits[4].data
flc_fits[1].header = crcor_fits[1].header
flc_fits[4].header = crcor_fits[4].header
flc_fits.flush()
def crude_skycor(fitslist, ext, mask=None, nsimul=100, noisechisel_grad=False, bootmedian=True):
if isinstance(fitslist, str):
fitslist = [fitslist]
if isinstance(fitslist, list):
for fits_name in fitslist:
print(fits_name)
fits_image = fits.open(fits_name)
if mask is not None:
print("Input mask accepted: " + mask)
mask_fits = fits.open(mask)
shape_mask = mask_fits[0].data.shape
shape_fits = fits_image[ext].data.shape
mask_array = mask_fits[0].data
if (shape_mask == (1014, 1014)) & (shape_fits == (1024, 1024)):
mask_array = np.zeros(shape_fits)
border = 5
mask_array[0+border:1024-border,
0+border:1024-border] = mask_fits[0].data
if bootmedian:
skylvl = bm.bootmedian(sample_input=fits_image[ext].data[~np.isnan(mask_array)],
nsimul=nsimul, errors=1)
if not bootmedian:
median_sky = bn.nanmedian(fits_image[ext].data[~np.isnan(mask_array)])
#sigma_sky = bn.nanstd(fits_image[ext].data[~np.isnan(mask_array)])
sigma_sky = 0
skylvl = {"median": median_sky,
"s1_up": median_sky+sigma_sky,
"s1_down": median_sky-sigma_sky,
"std1_down": sigma_sky,
"std1_up": sigma_sky}
else:
if bootmedian:
skylvl = bm.bootmedian(sample_input=fits_image[ext].data, nsimul=nsimul, errors=1)
if not bootmedian:
median_sky = bn.nanmedian(fits_image[ext].data)
#sigma_sky = bn.nanstd(fits_image[ext].data)
sigma_sky = 0
skylvl = {"median": median_sky,
"s1_up": median_sky+sigma_sky,
"s1_down": median_sky-sigma_sky,
"std1_down": sigma_sky,
"std1_up": sigma_sky}
print(skylvl)
print(np.abs(skylvl["median"] - skylvl["s1_up"])/2.)
print("Skylvl: " + str(skylvl["median"]) + " +/- " + str(np.abs(skylvl["s1_up"] - skylvl["s1_down"])/2.))
fits_image[ext].data = fits_image[ext].data - skylvl["median"]
fits_image[0].header['SKYSTD'] = skylvl["std1_down"]
fits_image[0].header['SKYLVL'] = skylvl["median"]
fits_image[ext].header['SKYSTD'] = skylvl["std1_down"]
fits_image[ext].header['SKYLVL'] = skylvl["median"]
os.system("rm " + fits_name)
fits_image.verify("silentfix")
fits_image.writeto(fits_name)
fits_image.close()
def final_skycor(input_flc, ext, nsimul=25):
mask_1 = mask_image(fits_list=[input_flc],ext=ext, flat=False, dq_mask = False)
detected_name = input_flc.replace("_flc.fits", "_flc_flatted_detected.fits")
remove_gradient(input_flc, ext, detected_name)
crude_skycor(input_flc, ext, mask_1[0], nsimul, False)
def delta_flat_cor(input_flc, input_flat1, input_flat2):
fits_file = fits.open(input_flc)
dflat_file_sci1 = fits.open(input_flat1)
dflat_file_sci2 = fits.open(input_flat2)
dflat_file_sci1[0].data[np.isnan(dflat_file_sci1[0].data)]=1
dflat_file_sci2[0].data[np.isnan(dflat_file_sci2[0].data)]=1
dflat_file_sci1[0].data[np.where(dflat_file_sci1[0].data == 0)]=1
dflat_file_sci2[0].data[np.where(dflat_file_sci2[0].data == 0)]=1
fits_file[1].header["idcscale"] = 0.05
fits_file[4].header["idcscale"] = 0.05
fits_file[1].data = np.divide(fits_file[1].data,dflat_file_sci1[0].data)
fits_file[4].data = np.divide(fits_file[4].data,dflat_file_sci2[0].data)
fits_file.verify("silentfix")
#outname = input_flc.replace("_flc.fits", "_flc.fits")
outname=input_flc
os.system("rm " + outname)
fits_file.writeto(outname)
def get_parameters_list(fits_list, index, ext=0):
PARAM = []
for j in range(len(index)):
PARAM.append([])
for raw_name in fits_list:
print(raw_name)
raw_fits = fits.open(raw_name)
for j in range(len(index)):
try:
PARAM[j].append(raw_fits[ext].header[index[j]])
except KeyError:
print("KeyError: Header keyword not found")
PARAM[j].append("NONE")
return(list(PARAM))
def separate_visits(fitslist):
PARAMS = get_parameters_list(fitslist, ["ASN_ID"])
unique_visits = np.array(list(set(PARAMS[0])))
flocks = []
fits_array = np.array(fitslist)
for i in unique_visits:
print(i)
flock = fits_array[np.array(PARAMS[0]) == i]
print(flock)
flocks.append(list(flock))
return(flocks)
def ensure_idcscale(input_flc):
fits_file = fits.open(input_flc)
fits_file[1].header["idcscale"] = 0.05
fits_file[4].header["idcscale"] = 0.05
fits_file.verify("silentfix")
#outname = input_flc.replace("_flc.fits", "_flc_dflat.fits")
outname=input_flc
os.system("rm " + outname)
fits_file.writeto(outname)
def astrometry_list(fitslist, refcat, rmax):
#OLD SETUP, USING ASTRODRIZZLE's METHOD TO REMOVE CR
for fitsimages in fitslist:
rootname = os.path.basename(fitsimages).split("_")[0]
os.system("rm " + rootname + "_crclean.fits")
print(rootname)
#chunks_final = separate_for_astrometry(fitslist, rmax)
chunks_final = separate_visits(fitslist)
for subset in chunks_final:
if len(subset) > 20:
step1, step2 = split_list(subset)
drizzlepac.astrodrizzle.AstroDrizzle(input=step1, skysub=0,
static=0,
in_memory=1, preserve=0,
clean=0,
driz_sep_pixfrac=1.,
num_cores=1,
driz_sep_wcs=1,
driz_cr_corr=1,
resetbits=128,
driz_cr_snr='4.0 3.0',
driz_cr_scale='1.5 0.7',
driz_combine=0, updatewcs=0,
combine_type="imedian")
drizzlepac.astrodrizzle.AstroDrizzle(input=step2, skysub=0,
static=0,
in_memory=1, preserve=0,
clean=0,
driz_sep_pixfrac=1.,
num_cores=1,
driz_sep_wcs=1,
driz_cr_corr=1,
resetbits=128,
driz_cr_snr='4.0 3.0',
driz_cr_scale='1.5 0.7',
driz_combine=0, updatewcs=0,
combine_type="imedian")
else:
drizzlepac.astrodrizzle.AstroDrizzle(input=subset, skysub=0,
static=0,
in_memory=1, preserve=0,
clean=0,
driz_sep_pixfrac=1.,
num_cores=1,
driz_sep_wcs=1,
driz_cr_corr=1,
resetbits=128,
driz_cr_snr='4.0 3.0',
driz_cr_scale='1.5 0.7',
driz_combine=0, updatewcs=0,
combine_type="imedian")
cr_corrected_subset = []
for i in subset:
rootname = os.path.basename(i).split("_")[0]
extname = os.path.basename(i).split("_")[1].split(".fits")[0]
if extname == "flc":
cr_name = rootname + "_crclean.fits"
else:
cr_name = rootname + "_" + extname + "_crclean.fits"
print(cr_name)
cr_corrected_subset.append(cr_name)
ensure_dtype(cr_name)
# Generamos los catalogos manualmente con SEXTRACTOR
catalog_1_name = rootname + "_" + extname + "_crclean_sci1.cat"
catalog_2_name = rootname + "_" + extname + "_crclean_sci2.cat"
catfile_name = rootname + "_catfile.txt"
os.system("sex " + cr_name + "[1] -c " + "/media/borlaff/CARGO/PHD/SEX_files/confi_tweakreg.sex " +
"-CHECKIMAGE_NAME image_segmentation.fits " +
"-CATALOG_NAME " + catalog_1_name)
os.system("sex " + cr_name + "[4] -c " + "/media/borlaff/CARGO/PHD/SEX_files/confi_tweakreg.sex " +
"-CHECKIMAGE_NAME image_segmentation.fits " +
"-CATALOG_NAME " + catalog_2_name)
os.system("echo " + cr_name + " " + catalog_1_name + " " + catalog_2_name + " > " + catfile_name)
drizzlepac.tweakreg.TweakReg(files=cr_name,
updatewcs=False, updatehdr=True,
verbose=True, searchrad=5,
searchunits="arcseconds",
refcat=refcat, refxcol=4, refycol=5,
catfile=catfile_name, xcol=2, ycol=3,
interactive=0, use2dhist=True, see2dplot=True, fitgeometry="general")
def generate_manual_masks():
for i in glob.glob("/home/borlaff/DF4/F*W/*sci1.reg"):
dm.ds9tomask(fname=i, nx=4096, ny=2048, outname=i.replace(".reg","_mask.fits"))
for i in glob.glob("/home/borlaff/DF4/F*W/*sci2.reg"):
dm.ds9tomask(fname=i, nx=4096, ny=2048, outname=i.replace(".reg","_mask.fits"))
def generate_full_masks(fits_list):
for input_flc in fits_list:
mask_1 = mask_image(fits_list=[input_flc],ext=1, flat=False)
print(mask_1)
mask_1_fits = fits.open(mask_1[0])
mask_1_DF = fits.open(input_flc.replace("_flc.fits","_") + "sci1_mask.fits")
mask_1_fits[0].data[mask_1_DF[0].data == 1] = np.nan
mask_1_fits.verify("silentfix")
os.system("rm " + mask_1[0])
mask_1_fits.writeto(mask_1[0])
mask_1_fits.close()
#crude_skycor(input_flc, 1, mask_1[0], nsimul=1, noisechisel_grad=False, bootmedian=False)
mask_4 = mask_image(fits_list=[input_flc],ext=4, flat=False)
mask_4_fits = fits.open(mask_4[0])
mask_4_DF = fits.open(input_flc.replace("_flc.fits","_") + "sci2_mask.fits")
mask_4_fits[0].data[mask_4_DF[0].data == 1] = np.nan
mask_4_fits.verify("silentfix")
os.system("rm " + mask_4[0])
mask_4_fits.writeto(mask_4[0])
mask_4_fits.close()
def remove_iref_list(filename_list, calibration_path):
for filename in filename_list:
remove_iref(filename, calibration_path)
def remove_iref(filename,calibration_path):
raw_fits = fits.open(filename,ignore_missing_end=True)
if(raw_fits[0].header['INSTRUME'] == 'WFC3'):
if (raw_fits[0].header['BPIXTAB'][0:4]=="iref"):
raw_fits[0].header['BPIXTAB'] = calibration_path + str.split(raw_fits[0].header['BPIXTAB'],"$")[-1]
raw_fits[0].header['CCDTAB'] = calibration_path + str.split(raw_fits[0].header['CCDTAB'],"$")[-1]
raw_fits[0].header['OSCNTAB'] = calibration_path + str.split(raw_fits[0].header['OSCNTAB'],"$")[-1]
raw_fits[0].header['CRREJTAB'] = calibration_path + str.split(raw_fits[0].header['CRREJTAB'],"$")[-1]
raw_fits[0].header['DARKFILE'] = calibration_path + str.split(raw_fits[0].header['DARKFILE'],"$")[-1]
raw_fits[0].header['NLINFILE'] = calibration_path + str.split(raw_fits[0].header['NLINFILE'],"$")[-1]
raw_fits[0].header['PFLTFILE'] = calibration_path + str.split(raw_fits[0].header['PFLTFILE'],"$")[-1]
raw_fits[0].header['IMPHTTAB'] = calibration_path + str.split(raw_fits[0].header['IMPHTTAB'],"$")[-1]
raw_fits[0].header['IDCTAB'] = calibration_path + str.split(raw_fits[0].header['IDCTAB'],"$")[-1]
raw_fits[0].header['MDRIZTAB'] = calibration_path + str.split(raw_fits[0].header['MDRIZTAB'],"$")[-1]
else:
raw_fits[0].header['BPIXTAB'] = calibration_path + str.split(raw_fits[0].header['BPIXTAB'],"/")[-1]
raw_fits[0].header['CCDTAB'] = calibration_path + str.split(raw_fits[0].header['CCDTAB'],"/")[-1]
raw_fits[0].header['OSCNTAB'] = calibration_path + str.split(raw_fits[0].header['OSCNTAB'],"/")[-1]
raw_fits[0].header['CRREJTAB'] = calibration_path + str.split(raw_fits[0].header['CRREJTAB'],"/")[-1]
raw_fits[0].header['DARKFILE'] = calibration_path + str.split(raw_fits[0].header['DARKFILE'],"/")[-1]
raw_fits[0].header['NLINFILE'] = calibration_path + str.split(raw_fits[0].header['NLINFILE'],"/")[-1]
raw_fits[0].header['PFLTFILE'] = calibration_path + str.split(raw_fits[0].header['PFLTFILE'],"/")[-1]
raw_fits[0].header['IMPHTTAB'] = calibration_path + str.split(raw_fits[0].header['IMPHTTAB'],"/")[-1]
raw_fits[0].header['IDCTAB'] = calibration_path + str.split(raw_fits[0].header['IDCTAB'],"/")[-1]
raw_fits[0].header['MDRIZTAB'] = calibration_path + str.split(raw_fits[0].header['MDRIZTAB'],"/")[-1]
os.remove(filename)
raw_fits.verify('silentfix')
raw_fits.writeto(filename)
raw_fits.close()
if(raw_fits[0].header['INSTRUME'] == 'ACS'):
for i in range(len(raw_fits)):
for j in range(len(raw_fits[i].header)):
if(isinstance(raw_fits[i].header[j], str)):
raw_fits[i].header[j] = raw_fits[i].header[j].replace("jref$",calibration_path)
os.remove(filename)
raw_fits.verify('silentfix')
raw_fits.writeto(filename)
raw_fits.close()
def mask_and_crude_skycor_DF4(fits_list):
# F606W reset_MDRIZSKY to 0
for i in glob.glob("/home/borlaff/DF4/F606W/*flc.fits"):
print(i)
mask_and_crude_skycor(i, i.replace("_flc.fits", "_sci1_mask.fits"), i.replace("_flc.fits", "_sci2_mask.fits"))
def ensure_dtype(fits_name):
original_image = fits.open(fits_name)
try:
original_image[0].data = original_image[0].data.astype('>f4')
except:
print("No ext=0")
try:
original_image[1].data = original_image[1].data.astype('>f4')
except:
print("No ext=1")
try:
original_image[2].data = original_image[2].data.astype('>f4')
except:
print("No ext=2")
try:
original_image[3].data = original_image[3].data.astype('>i2')
except:
print("No ext=3")
try:
original_image[4].data = original_image[4].data.astype('>i2')
except:
print("No ext=4")
try:
original_image[5].data = original_image[5].data.astype('>f4')
except:
print("No ext=5")
original_image.verify('silentfix')
os.system("rm " + fits_name)
original_image.writeto(fits_name)
original_image.close()
def generate_stripe_model(masked_array):
image_shape = masked_array.shape
med_axis_0 = bn.nanmean(masked_array.astype("float32"), axis=0)
med_axis_1 = bn.nanmean(masked_array.astype("float32"), axis=1)
print(len(med_axis_0))
stripe_model_axis_0 = np.zeros(image_shape)
stripe_model_axis_1 = np.zeros(image_shape)
for j in tqdm(range(image_shape[1])):
stripe_model_axis_0[:,j] = med_axis_0[j]
for i in tqdm(range(image_shape[0])):
stripe_model_axis_1[i,:] = med_axis_1[i]
stripe_model = (stripe_model_axis_0 + stripe_model_axis_1)/2.
return(stripe_model)
def correct_amplifiers(masked_array):
masked_array[:,0:2048] = masked_array[:,0:2048] - bn.nanmedian(masked_array[:,0:2048])
masked_array[:,2048:] = masked_array[:,2048:] - bn.nanmedian(masked_array[:,2048:])
return(masked_array)
def destripe_acs(flc_name, mask1_name, mask2_name):
flc_fits = fits.open(flc_name)
mask1_fits = fits.open(mask1_name)
mask2_fits = fits.open(mask2_name)
image_shape = flc_fits[1].data.shape
flc_fits[1].data = correct_amplifiers(flc_fits[1].data.astype("float32"))
flc_fits[4].data = correct_amplifiers(flc_fits[4].data.astype("float32"))
masked_1_data = np.copy(flc_fits[1].data)
masked_2_data = np.copy(flc_fits[4].data)
masked_1_data[np.isnan(mask1_fits[0].data)] = np.nan
masked_2_data[np.isnan(mask2_fits[0].data)] = np.nan
stripe_model_1 = generate_stripe_model(masked_1_data)
stripe_model_2 = generate_stripe_model(masked_2_data)
stripe_model_1[np.isnan(stripe_model_1)] = 0.0
stripe_model_2[np.isnan(stripe_model_2)] = 0.0
flc_fits[1].data = flc_fits[1].data.astype("float32") - stripe_model_1
flc_fits[4].data = flc_fits[4].data.astype("float32") - stripe_model_2
os.system("rm " + flc_name)
flc_fits.verify("silentfix")
flc_fits.writeto(flc_name)
mask1_fits[0].data = stripe_model_1
mask2_fits[0].data = stripe_model_2
os.system("rm stripe_model_1.fits stripe_model_2.fits")
mask1_fits.verify("silentfix")
mask2_fits.verify("silentfix")
mask1_fits.writeto("stripe_model_1.fits")
mask2_fits.writeto("stripe_model_2.fits")
return(flc_name)
|
[
"asborlaff@gmail.com"
] |
asborlaff@gmail.com
|
73057923b1a85ddf0796baa5ca565b85f2369ebe
|
c271b95f99fd1632502803346b066ad762ef7c79
|
/api/views.py
|
8797ff396c6c17ed4365d9184b18ac6124706a12
|
[] |
no_license
|
mr8bit/untitled
|
48d35cc18e226a5e06fec65566c7a12f4a8b9c71
|
9a51e33a7216e327b75c8d8cb85066e43c9a55ea
|
refs/heads/master
| 2020-06-15T02:42:48.471326
| 2016-12-13T00:05:06
| 2016-12-13T00:05:06
| 75,336,816
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,067
|
py
|
from django.shortcuts import HttpResponse
import urllib.request as ur
import xmltodict
import json
def homepage(request):
file = ur.urlopen('http://www.skiplan.com/php/getXmlInter.php?country=russia®ion=alpes&resort=ROSA%20KHUTOR')
data = file.read()
file.close()
data = xmltodict.parse(data)
lifts = []
list = data["RESORT"]["STATES"]["AREA"]
for x in list:
if 'LIFTS' == x['@nom_en']:
for i in x['LIFT']:
if i['@name_en'] == 'OLIMPIA':
if len(i['@state']) > 1:
lifts.append([1, str(i['@state']).lower()])
else:
lifts.append([1, str(i['@state']).upper()])
elif i['@name_en'] == 'ZAPOVEDNYY LES':
if len(i['@state']) > 1:
lifts.append([2, str(i['@state']).lower()])
else:
lifts.append([2, str(i['@state']).upper()])
elif i['@name_en'] == 'KAVKAZSKIY EXPRESS above 1600':
if len(i['@state']) > 1:
lifts.append([3, str(i['@state']).lower()])
else:
lifts.append([3, str(i['@state']).upper()])
elif i['@name_en'] == 'KAVKAZSKIY EXPRESS below 1600':
if len(i['@state']) > 1:
lifts.append([4, str(i['@state']).lower()])
else:
lifts.append([4, str(i['@state']).upper()])
elif i['@name_en'] == 'VOLCHYA SKALA':
if len(i['@state']) > 1:
lifts.append([5, str(i['@state']).lower()])
else:
lifts.append([5, str(i['@state']).upper()])
elif i['@name_en'] == 'BESEDA':
if len(i['@state']) > 1:
lifts.append([6, str(i['@state']).lower()])
else:
lifts.append([6, str(i['@state']).upper()])
elif i['@name_en'] == 'KVARTET':
if len(i['@state']) > 1:
lifts.append([7, str(i['@state']).lower()])
else:
lifts.append([7, str(i['@state']).upper()])
elif i['@name_en'] == 'SKAZKA':
if len(i['@state']) > 1:
lifts.append([8, str(i['@state']).lower()])
else:
lifts.append([8, str(i['@state']).upper()])
elif i['@name_en'] == 'KABAN':
if len(i['@state']) > 1:
lifts.append([9, str(i['@state']).lower()])
else:
lifts.append([9, str(i['@state']).upper()])
elif i['@name_en'] == 'EXTREME':
if len(i['@state']) > 1:
lifts.append([10, str(i['@state']).lower()])
else:
lifts.append([10, str(i['@state']).upper()])
elif i['@name_en'] == 'STRELA':
if len(i['@state']) > 1:
lifts.append([11, str(i['@state']).lower()])
else:
lifts.append([11, str(i['@state']).upper()])
elif i['@name_en'] == 'CHALET':
if len(i['@state']) > 1:
lifts.append([12, str(i['@state']).lower()])
else:
lifts.append([12, str(i['@state']).upper()])
elif i['@name_en'] == 'KROKUS':
if len(i['@state']) > 1:
lifts.append([13, str(i['@state']).lower()])
else:
lifts.append([13, str(i['@state']).upper()])
elif i['@name_en'] == 'EDELWEISS':
if len(i['@state']) > 1:
lifts.append([14, str(i['@state']).lower()])
else:
lifts.append([14, str(i['@state']).upper()])
elif i['@name_en'] == 'JUVENTA':
lifts.append([15, i['@state']])
if len(i['@state']) > 1:
lifts.append([15, str(i['@state']).lower()])
else:
lifts.append([15, str(i['@state']).upper()])
elif i['@name_en'] == 'DRIADA':
if len(i['@state']) > 1:
lifts.append([16, str(i['@state']).lower()])
else:
lifts.append([16, str(i['@state']).upper()])
elif i['@name_en'] == 'TUNDRA':
if len(i['@state']) > 1:
lifts.append([17, str(i['@state']).lower()])
else:
lifts.append([17, str(i['@state']).upper()])
elif i['@name_en'] == 'Rope tow Tubing':
if len(i['@state']) > 1:
lifts.append([19, str(i['@state']).lower()])
else:
lifts.append([19, str(i['@state']).upper()])
elif i['@name_en'] == 'Rope tow Snowpark':
if len(i['@state']) > 1:
lifts.append([20, str(i['@state']).lower()])
else:
lifts.append([20, str(i['@state']).upper()])
elif i['@name_en'] == 'Rope tow Rosa 1600':
if len(i['@state']) > 1:
lifts.append([21, str(i['@state']).lower()])
else:
lifts.append([21, str(i['@state']).upper()])
elif i['@name_en'] == 'Magic carpet Junior Training Slope':
if len(i['@state']) > 1:
lifts.append([22, str(i['@state']).lower()])
else:
lifts.append([22, str(i['@state']).upper()])
if 'SLOPES' == x['@nom_en']:
for i in x['TRAIL']:
if i['@name_en'] == 'Mini-Park':
if len(i['@state']) > 1:
lifts.append([23, str(i['@state']).lower()])
else:
lifts.append([23, str(i['@state']).upper()])
elif i['@name_en'] == 'THE STASH PARK':
if len(i['@state']) > 1:
lifts.append([24, str(i['@state']).lower()])
else:
lifts.append([24, str(i['@state']).upper()])
elif i['@name_en'] == 'Snow-park':
if len(i['@state']) > 1:
lifts.append([25, str(i['@state']).lower()])
else:
lifts.append([25, str(i['@state']).upper()])
elif i['@name_en'] == 'Arbor':
if len(i['@state']) > 1:
lifts.append([26, str(i['@state']).lower()])
else:
lifts.append([26, str(i['@state']).upper()])
elif i['@name_en'] == 'Chalet':
if len(i['@state']) > 1:
lifts.append([27, str(i['@state']).lower()])
else:
lifts.append([27, str(i['@state']).upper()])
elif i['@name_en'] == 'Juventa':
if len(i['@state']) > 1:
lifts.append([28, str(i['@state']).lower()])
else:
lifts.append([28, str(i['@state']).upper()])
elif i['@name_en'] == 'Borey':
if len(i['@state']) > 1:
lifts.append([29, str(i['@state']).lower()])
else:
lifts.append([29, str(i['@state']).upper()])
elif i['@name_en'] == 'Ehkho':
if len(i['@state']) > 1:
lifts.append([30, str(i['@state']).lower()])
else:
lifts.append([30, str(i['@state']).upper()])
elif i['@name_en'] == 'AIBGA':
if len(i['@state']) > 1:
lifts.append([31, str(i['@state']).lower()])
else:
lifts.append([31, str(i['@state']).upper()])
elif i['@name_en'] == 'B 52 above 1600':
if len(i['@state']) > 1:
lifts.append([32, str(i['@state']).lower()])
else:
lifts.append([32, str(i['@state']).upper()])
elif i['@name_en'] == 'B 52 below 1600':
if len(i['@state']) > 1:
lifts.append([33, str(i['@state']).lower()])
else:
lifts.append([33, str(i['@state']).upper()])
elif i['@name_en'] == 'VANCOUVER 10':
if len(i['@state']) > 1:
lifts.append([34, str(i['@state']).lower()])
else:
lifts.append([34, str(i['@state']).upper()])
elif i['@name_en'] == 'VERONIKA':
if len(i['@state']) > 1:
lifts.append([35, str(i['@state']).lower()])
else:
lifts.append([35, str(i['@state']).upper()])
elif i['@name_en'] == 'VIRAZH above 1800':
if len(i['@state']) > 1:
lifts.append([36, str(i['@state']).lower()])
else:
lifts.append([36, str(i['@state']).upper()])
elif i['@name_en'] == 'VIRAZH below 1800':
if len(i['@state']) > 1:
lifts.append([37, str(i['@state']).lower()])
else:
lifts.append([37, str(i['@state']).upper()])
elif i['@name_en'] == 'Vstrecha':
if len(i['@state']) > 1:
lifts.append([38, str(i['@state']).lower()])
else:
lifts.append([38, str(i['@state']).upper()])
elif i['@name_en'] == 'GORIZONT above 1600':
if len(i['@state']) > 1:
lifts.append([39, str(i['@state']).lower()])
else:
lifts.append([39, str(i['@state']).upper()])
elif i['@name_en'] == 'GORIZONT below 1600':
if len(i['@state']) > 1:
lifts.append([40, str(i['@state']).lower()])
else:
lifts.append([40, str(i['@state']).upper()])
elif i['@name_en'] == 'ZHENSKIY OLYMPIC above 1600':
if len(i['@state']) > 1:
lifts.append([41, str(i['@state']).lower()])
else:
lifts.append([41, str(i['@state']).upper()])
elif i['@name_en'] == 'ZHENSKIY OLYMPIC B below 1600':
if len(i['@state']) > 1:
lifts.append([42, str(i['@state']).lower()])
else:
lifts.append([42, str(i['@state']).upper()])
elif i['@name_en'] == 'ZMEIKA above 1665':
if len(i['@state']) > 1:
lifts.append([43, str(i['@state']).lower()])
else:
lifts.append([43, str(i['@state']).upper()])
elif i['@name_en'] == 'ZMEIKA below 1665':
if len(i['@state']) > 1:
lifts.append([44, str(i['@state']).lower()])
else:
lifts.append([44, str(i['@state']).upper()])
elif i['@name_en'] == 'KASKAD above 1600':
if len(i['@state']) > 1:
lifts.append([45, str(i['@state']).lower()])
else:
lifts.append([45, str(i['@state']).upper()])
elif i['@name_en'] == 'KASKAD below 1600':
if len(i['@state']) > 1:
lifts.append([46, str(i['@state']).lower()])
else:
lifts.append([46, str(i['@state']).upper()])
elif i['@name_en'] == 'KVARTET':
if len(i['@state']) > 1:
lifts.append([47, str(i['@state']).lower()])
else:
lifts.append([47, str(i['@state']).upper()])
elif i['@name_en'] == 'CRAZY KHUTOR above 1400':
if len(i['@state']) > 1:
lifts.append([48, str(i['@state']).lower()])
else:
lifts.append([48, str(i['@state']).upper()])
elif i['@name_en'] == 'CRAZY KHUTOR below 1400':
if len(i['@state']) > 1:
lifts.append([49, str(i['@state']).lower()])
else:
lifts.append([49, str(i['@state']).upper()])
elif i['@name_en'] == 'LABIRINT above 1400':
if len(i['@state']) > 1:
lifts.append([50, str(i['@state']).lower()])
else:
lifts.append([50, str(i['@state']).upper()])
elif i['@name_en'] == 'LABIRINT below 1400':
if len(i['@state']) > 1:
lifts.append([51, str(i['@state']).lower()])
else:
lifts.append([51, str(i['@state']).upper()])
elif i['@name_en'] == 'MUZHSKOY OLYMPIC above 1600':
if len(i['@state']) > 1:
lifts.append([52, str(i['@state']).lower()])
else:
lifts.append([52, str(i['@state']).upper()])
elif i['@name_en'] == 'MUZHSKOY OLYMPIC below 1600':
if len(i['@state']) > 1:
lifts.append([53, str(i['@state']).lower()])
else:
lifts.append([53, str(i['@state']).upper()])
elif i['@name_en'] == 'NAGANO 98':
if len(i['@state']) > 1:
lifts.append([54, str(i['@state']).lower()])
else:
lifts.append([54, str(i['@state']).upper()])
elif i['@name_en'] == 'OBER KHUTOR above 1600':
if len(i['@state']) > 1:
lifts.append([55, str(i['@state']).lower()])
else:
lifts.append([55, str(i['@state']).upper()])
elif i['@name_en'] == 'OBER KHUTOR below 1600':
if len(i['@state']) > 1:
lifts.append([56, str(i['@state']).lower()])
else:
lifts.append([56, str(i['@state']).upper()])
elif i['@name_en'] == 'OZERNAYA':
if len(i['@state']) > 1:
lifts.append([57, str(i['@state']).lower()])
else:
lifts.append([57, str(i['@state']).upper()])
elif i['@name_en'] == 'OREL':
if len(i['@state']) > 1:
lifts.append([58, str(i['@state']).lower()])
else:
lifts.append([58, str(i['@state']).upper()])
elif i['@name_en'] == 'Perehod':
if len(i['@state']) > 1:
lifts.append([59, str(i['@state']).lower()])
else:
lifts.append([59, str(i['@state']).upper()])
elif i['@name_en'] == 'PLATO':
if len(i['@state']) > 1:
lifts.append([60, str(i['@state']).lower()])
else:
lifts.append([60, str(i['@state']).upper()])
elif i['@name_en'] == 'PRIMULA':
if len(i['@state']) > 1:
lifts.append([61, str(i['@state']).lower()])
else:
lifts.append([61, str(i['@state']).upper()])
elif i['@name_en'] == 'ROSA STAR':
if len(i['@state']) > 1:
lifts.append([62, str(i['@state']).lower()])
else:
lifts.append([62, str(i['@state']).upper()])
elif i['@name_en'] == 'ROSA BLADE':
if len(i['@state']) > 1:
lifts.append([63, str(i['@state']).lower()])
else:
lifts.append([63, str(i['@state']).upper()])
elif i['@name_en'] == 'TRITON above 2000':
if len(i['@state']) > 1:
lifts.append([64, str(i['@state']).lower()])
else:
lifts.append([64, str(i['@state']).upper()])
elif i['@name_en'] == 'TRITON middle 1600':
if len(i['@state']) > 1:
lifts.append([65, str(i['@state']).lower()])
else:
lifts.append([65, str(i['@state']).upper()])
elif i['@name_en'] == 'TRITON below 1600':
if len(i['@state']) > 1:
lifts.append([66, str(i['@state']).lower()])
else:
lifts.append([66, str(i['@state']).upper()])
elif i['@name_en'] == 'FIALKA':
if len(i['@state']) > 1:
lifts.append([67, str(i['@state']).lower()])
else:
lifts.append([67, str(i['@state']).upper()])
elif i['@name_en'] == 'CHAMONIX 24':
if len(i['@state']) > 1:
lifts.append([68, str(i['@state']).lower()])
else:
lifts.append([68, str(i['@state']).upper()])
elif i['@name_en'] == 'YUREV KHUTOR':
if len(i['@state']) > 1:
lifts.append([69, str(i['@state']).lower()])
else:
lifts.append([69, str(i['@state']).upper()])
elif i['@name_en'] == 'OREADA':
if len(i['@state']) > 1:
lifts.append([70, str(i['@state']).lower()])
else:
lifts.append([70, str(i['@state']).upper()])
elif i['@name_en'] == 'STOROZHKA':
if len(i['@state']) > 1:
lifts.append([71, str(i['@state']).lower()])
else:
lifts.append([71, str(i['@state']).upper()])
elif i['@name_en'] == 'YAVOR (2320 – 2221)':
if len(i['@state']) > 1:
lifts.append([72, str(i['@state']).lower()])
else:
lifts.append([72, str(i['@state']).upper()])
elif i['@name_en'] == 'YAVOR (2221 – 2118)':
if len(i['@state']) > 1:
lifts.append([73, str(i['@state']).lower()])
else:
lifts.append([73, str(i['@state']).upper()])
elif i['@name_en'] == 'YAVOR (2118 – 1472)':
if len(i['@state']) > 1:
lifts.append([74, str(i['@state']).lower()])
else:
lifts.append([74, str(i['@state']).upper()])
elif i['@name_en'] == 'NAYADA':
if len(i['@state']) > 1:
lifts.append([75, str(i['@state']).lower()])
else:
lifts.append([75, str(i['@state']).upper()])
elif i['@name_en'] == 'PRIMAVERA':
if len(i['@state']) > 1:
lifts.append([76, str(i['@state']).lower()])
else:
lifts.append([76, str(i['@state']).upper()])
elif i['@name_en'] == 'BARVINOK':
if len(i['@state']) > 1:
lifts.append([77, str(i['@state']).lower()])
else:
lifts.append([77, str(i['@state']).upper()])
elif i['@name_en'] == 'GORITSVET':
if len(i['@state']) > 1:
lifts.append([78, str(i['@state']).lower()])
else:
lifts.append([78, str(i['@state']).upper()])
data = {"data": lifts}
return HttpResponse(json.dumps(data), content_type='application/json')
|
[
"info@mrbyte.ru"
] |
info@mrbyte.ru
|
49d3d3532f3536cfb86059dd33e6dbd876b50189
|
a2d36e471988e0fae32e9a9d559204ebb065ab7f
|
/huaweicloud-sdk-rds/huaweicloudsdkrds/v3/model/revoke_request_body_users.py
|
8d1885f130e0e18a1abc502f27eae2fc38912a04
|
[
"Apache-2.0"
] |
permissive
|
zhouxy666/huaweicloud-sdk-python-v3
|
4d878a90b8e003875fc803a61414788e5e4c2c34
|
cc6f10a53205be4cb111d3ecfef8135ea804fa15
|
refs/heads/master
| 2023-09-02T07:41:12.605394
| 2021-11-12T03:20:11
| 2021-11-12T03:20:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,926
|
py
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class RevokeRequestBodyUsers:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'name': 'str'
}
attribute_map = {
'name': 'name'
}
def __init__(self, name=None):
"""RevokeRequestBodyUsers - a model defined in huaweicloud sdk"""
self._name = None
self.discriminator = None
self.name = name
@property
def name(self):
"""Gets the name of this RevokeRequestBodyUsers.
数据库用户名称。
:return: The name of this RevokeRequestBodyUsers.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this RevokeRequestBodyUsers.
数据库用户名称。
:param name: The name of this RevokeRequestBodyUsers.
:type: str
"""
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RevokeRequestBodyUsers):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
b0c92f6fc55e605dea9564d9b8101e44e6a55755
|
fbb0d695adadd9b5a8dfe08d613be9b2d52f9bb8
|
/third/views.py
|
8035d11b071c9c86f972ad7a30050cd2353bcd7f
|
[] |
no_license
|
junteken/DjangoExam
|
23df19d65bcca3a378bbd8653b27ab18783d91c6
|
b8df34d4b72b20c022763ff6273de4011f700935
|
refs/heads/master
| 2023-05-01T04:25:22.434470
| 2019-06-17T11:30:54
| 2019-06-17T11:30:54
| 192,332,603
| 0
| 0
| null | 2023-04-21T20:32:44
| 2019-06-17T11:21:11
|
Python
|
UTF-8
|
Python
| false
| false
| 3,341
|
py
|
from django.shortcuts import render, get_object_or_404, redirect
from django.http import HttpResponseRedirect
from third.models import Restaurant, Review
from django.core.paginator import Paginator
from third.forms import RestaurantForm, ReviewForm, UpdateRestaurantForm
from django.db.models import Count, Avg
# Create your views here.
def list(request):
restaurants= Restaurant.objects.all().annotate(reviews_count=Count('review'))\
.annotate(average_point=Avg('review__point'))
paginator= Paginator(restaurants, 5)
page= request.GET.get('page')
items= paginator.get_page(page)
context={
'restaurants':items,
}
return render(request, 'third/list.html', context)
def create(request):
if request.method == 'POST':
form= RestaurantForm(request.POST)
if form.is_valid():
new_item= form.save()
return HttpResponseRedirect('/third/list')
form= RestaurantForm()
return render(request, 'third/create.html', {'form':form})
def update(request):
if request.method == 'POST' and 'id' in request.POST:
# item= Restaurant.objects.get(pk=request.POST.get('id'))
item= get_object_or_404(Restaurant, pk=request.POST.get('id'))
password= request.POST.get('password', '')
form= UpdateRestaurantForm(request.POST, instance=item)
if form.is_valid() and password==item.password:
item= form.save()
elif request.method== 'GET':
item= Restaurant.objects.get(pk=request.GET.get('id'))
form= RestaurantForm(instance=item)
return render(request, 'third/update.html', {'form':form})
return HttpResponseRedirect('/third/list/')
def detail(request, id):
if id is not None:
item= get_object_or_404(Restaurant, pk= id)
reviews= Review.objects.filter(restaurant=item).all()
return render(request, 'third/detail.html', {'item':item, 'reviews':reviews})
return HttpResponseRedirect('/third/list/')
def delete(request, id):
item= get_object_or_404(Restaurant, pk=id)
if request.method =='POST' and 'password' in request.POST:
if item.password == request.POST.get('password') or item.password is None:
item.delete()
return redirect('list')
return redirect('restaurant-detail', id=id)
return render(request, 'third/delete.html', {'item':item})
def review_create(request, restaurant_id):
if request.method == 'POST':
form= ReviewForm(request.POST)
if form.is_valid():
new_item= form.save()
return redirect('restaurant-detail', id=restaurant_id)
item= get_object_or_404(Restaurant, pk=restaurant_id)
form= ReviewForm(initial={'restaurant':item})
return render(request, 'third/review_create.html', {'form':form, 'item':item})
def review_delete(request, restaurant_id, review_id):
item= get_object_or_404(Review, pk=review_id)
item.delete()
return redirect('restaurant-detail', id=restaurant_id)
def review_list(request):
reviews= Review.objects.all().select_related().order_by('-created_at')
paginator= Paginator(reviews, 10)
page= request.GET.get('page')
items= paginator.get_page(page)
context={
'reviews': items
}
return render(request, 'third/review_list.html', context)
|
[
"junteken@gmail.com"
] |
junteken@gmail.com
|
30029fc338d9dc0eb9631f1210a22673d58db581
|
6351afb0fbfea3a1fde468e17e2fee7d3e2e6631
|
/msurrogate/meta_daemon.py
|
a1a4c17e7647126745fb1318a45a956232d7d9ae
|
[
"Apache-2.0"
] |
permissive
|
mccullerlp/msurrogate
|
b74889ffacd3a28ce5cf15b9662a528bf3eedd3c
|
26e0269ed2a2c370f45810522ac9cb47da033519
|
refs/heads/master
| 2021-05-11T14:22:27.838732
| 2018-02-15T17:10:08
| 2018-02-15T17:10:08
| 117,701,262
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,868
|
py
|
"""
"""
from __future__ import division, print_function, unicode_literals
import Pyro4
import threading
import contextlib
from . import meta_proxy
class MetaDaemon(object):
def __init__(
self,
daemon = None,
workers_N = 1,
):
if daemon is None:
daemon = Pyro4.Daemon()
self.daemon = daemon
self._worker_sem_value = None
self.workers_N_set(workers_N)
self.MTsafe = threading.Lock()
#for caching GC
self._metas_done = dict()
#name -> object
#to be wrapped and started in daemon
self._objects = {}
self._name_uri_reg = dict()
#URI -> metaproxy(object) registry for json
self._uri_registry = {}
self._name_registry = {}
return
def workers_N_set(self, workers_N):
#capture the remaining uses of the semaphore
#if self._worker_sem_value is not None:
# for idx in range(self._worker_sem_value - 1):
# self.worker_sem.acquire()
if workers_N is not None:
self.worker_sem = threading.Semaphore(workers_N)
else:
#only used via the "with" interface, so this is OK
@contextlib.contextmanager
def fake_lock():
yield
fake_lock.acquire = lambda : None
fake_lock.release = lambda : None
self.worker_sem = fake_lock
def register(
self,
obj = None,
name = None,
remove = False,
):
if not remove:
assert(obj is not None)
if name is None:
name = obj.__name__
meta_object = meta_proxy.MetaProxy(self, obj, register = name)
uri = str(meta_object.uri)
self._objects[name] = obj
self._uri_registry[uri] = meta_object
self._name_registry[name] = meta_object
self._name_uri_reg[name] = uri
else:
if name is None:
assert(obj is not None)
name = obj.__name__
meta_obj = self._name_registry[name]
obj_reg = self._objects[name]
if obj is not None:
assert(obj is obj_reg)
del self._objects[name]
del self._name_registry[name]
del self._uri_registry[meta_obj.uri]
del self._name_uri_reg[name]
self.daemon.unregister(name)
def uri2obj(self, uri):
uri = str(uri)
name, loc = uri.split('@')
if not name.startswith('PYRO:'):
raise RuntimeError("Proxy unwrapping can't recognize proxy")
name = name[5:]
obj = self.daemon.objectsById[name]
return obj
def workspace_dict(self):
d = dict()
for k, v in self._name_uri_reg.items():
d[k] = str(v)
return d
def workspace_reset(self):
#TODO
temp = dict(self.daemon.objectsById)
for k, v in temp.items():
if k not in self._objects:
self.daemon.unregister(k)
return
def workspace_gc(self):
for k, v in self._metas_done.items():
self.daemon.unregister(k)
self._metas_done = dict()
self.workspace_counts()
for k, v in self._metas_done.items():
self.daemon.unregister(k)
self._metas_done = dict()
return len(self.daemon.objectsById)
def workspace_counts(self):
#temp is for threadsafety
temp = dict(self.daemon.objectsById)
for k, v in temp.items():
if isinstance(v, meta_proxy.MetaProxy):
if not v.protect and v.done:
self._metas_done[k] = v
return len(self._metas_done), len(self.temp)
def workspace_close(self):
self.daemon.shutdown()
|
[
"Lee.McCuller@gmail.com"
] |
Lee.McCuller@gmail.com
|
6c9c677ebbd1cb3ccbee6d6edb1da9963f31841d
|
c59e65267ca6b2cea83cc00a136cd4e1a18da0a1
|
/Blender/Blender GIS/Blender Code Snippets.py
|
aca69b39da706d188ba8c8bba1e2a892ef3b6ede
|
[
"MIT"
] |
permissive
|
victorcalixto/FOSS-BIM-Experiments
|
c46bb4cd6a0f1e2d240f98f86296735bcd6748cb
|
9a4a126b7ba4bff43dec21fa1560b4d22ae34558
|
refs/heads/main
| 2023-08-14T20:20:01.833767
| 2021-10-01T09:02:13
| 2021-10-01T09:02:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 507
|
py
|
bpy.data.scenes["Scene"]
bpy.context.object["MyOwnProperty"] = 42
if "SomeProp" in bpy.context.object:
print("Property found")
# Use the get function like a Python dictionary
# which can have a fallback value.
value = bpy.data.scenes["Scene"].get("test_prop", "fallback value")
# dictionaries can be assigned as long as they only use basic types.
collection = bpy.data.collections.new("MyTestCollection")
collection["MySettings"] = {"foo": 10, "bar": "spam", "baz": {}}
del collection["MySettings"]
|
[
"30430941+DutchSailor@users.noreply.github.com"
] |
30430941+DutchSailor@users.noreply.github.com
|
3a09b09497bf9bff50a058a3f5afde88be96033f
|
ea588ceaeec885fb31606a6fe909191624f0b00f
|
/job/urls.py
|
28cbc303a2e0fc22df29a493f7b9c13b8d3094c9
|
[] |
no_license
|
Ahmedsalah000/jobboard
|
f2073843653f654a2aa0da67fb204977bf1a0f37
|
14a85565f2338a5b3ea6d11bf3f8198113ad3643
|
refs/heads/main
| 2023-06-21T02:33:43.336066
| 2021-07-19T01:42:35
| 2021-07-19T01:42:35
| 387,303,244
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 183
|
py
|
from django.urls import include, path
from . import views
urlpatterns = [
path('',views.job_list , name='job_list'),
path('<int:id>',views.job_detail , name='job_detail'),
]
|
[
"abushendy345@gmail.com"
] |
abushendy345@gmail.com
|
68d36d5c10149f55b9bccfef5ef95ed46f71d621
|
9cb902a7653645d77679a737a5061e340c579bc5
|
/hw/hw04/test_break_me.py
|
029841d05c6f6361d6c7862bd3ca07fbe469227f
|
[] |
no_license
|
isaacchen/sea-c42-python
|
7ff03bc621a5ba5bcb23a25d6352b26f097960bb
|
b7104df3805cb7d1abe95394534c949e986fb025
|
refs/heads/master
| 2021-01-15T10:25:27.635435
| 2015-09-26T05:50:31
| 2015-09-26T05:50:31
| 37,506,343
| 0
| 0
| null | 2015-09-26T05:44:04
| 2015-06-16T03:42:49
|
Python
|
UTF-8
|
Python
| false
| false
| 567
|
py
|
#!/usr/bin/env python
"""
code that tests the break_me.py functions
"""
# import pytest # used for the exception testing
import break_me
def test_name_error():
try:
break_me.exhibit_name_error()
assert(False)
except NameError:
assert(True)
def test_attribute_error():
try:
break_me.exhibit_attribute_error()
assert(False)
except AttributeError:
assert(True)
def test_type_error():
try:
break_me.exhibit_type_error()
assert(False)
except TypeError:
assert(True)
|
[
"paul@codefellows.com"
] |
paul@codefellows.com
|
c32db1ff05ef63b6466aaa6ae10cbbf03bdd8bf9
|
9d0195aa83cc594a8c61f334b90375961e62d4fe
|
/JTTest/SL7/CMSSW_10_2_15/src/dataRunA/nano2507.py
|
85792e6675dc10640391dadac3be6185e9eba923
|
[] |
no_license
|
rsk146/CMS
|
4e49592fc64f6438051544c5de18598db36ed985
|
5f8dab8c59ae556598b9747b52b88205fffc4dbe
|
refs/heads/master
| 2022-12-01T03:57:12.126113
| 2020-08-04T03:29:27
| 2020-08-04T03:29:27
| 284,863,383
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,293
|
py
|
# Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: nanoAOD_jetToolbox_cff -s NANO --data --eventcontent NANOAOD --datatier NANOAOD --no_exec --conditions 102X_dataRun2_Sep2018Rereco_v1 --era Run2_2018,run2_nanoAOD_102Xv1 --customise_commands=process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False))) --customise JMEAnalysis/JetToolbox/nanoAOD_jetToolbox_cff.nanoJTB_customizeMC --filein /users/h2/rsk146/JTTest/SL7/CMSSW_10_6_12/src/ttbarCutTest/dataReprocessing/0004A5E9-9F18-6B42-B31D-4206406CE423.root --fileout file:jetToolbox_nano_datatest.root
import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.Eras import eras
process = cms.Process('NANO',eras.Run2_2018,eras.run2_nanoAOD_102Xv1)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff')
process.load('PhysicsTools.NanoAOD.nano_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:root://cms-xrd-global.cern.ch//store/data/Run2018A/EGamma/MINIAOD/17Sep2018-v2/110000/816E7CF9-B54A-EE4A-ADD4-23543EF466D5.root'),
secondaryFileNames = cms.untracked.vstring()
)
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('nanoAOD_jetToolbox_cff nevts:1'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.NANOAODoutput = cms.OutputModule("NanoAODOutputModule",
compressionAlgorithm = cms.untracked.string('LZMA'),
compressionLevel = cms.untracked.int32(9),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('NANOAOD'),
filterName = cms.untracked.string('')
),
fileName = cms.untracked.string('file:jetToolbox_nano_datatest2507.root'),
outputCommands = process.NANOAODEventContent.outputCommands
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '102X_dataRun2_Sep2018Rereco_v1', '')
# Path and EndPath definitions
process.nanoAOD_step = cms.Path(process.nanoSequence)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.NANOAODoutput_step = cms.EndPath(process.NANOAODoutput)
# Schedule definition
process.schedule = cms.Schedule(process.nanoAOD_step,process.endjob_step,process.NANOAODoutput_step)
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
# customisation of the process.
# Automatic addition of the customisation function from PhysicsTools.NanoAOD.nano_cff
from PhysicsTools.NanoAOD.nano_cff import nanoAOD_customizeData
#call to customisation function nanoAOD_customizeData imported from PhysicsTools.NanoAOD.nano_cff
process = nanoAOD_customizeData(process)
# Automatic addition of the customisation function from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff import nanoJTB_customizeMC
#call to customisation function nanoJTB_customizeMC imported from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
process = nanoJTB_customizeMC(process)
# End of customisation functions
# Customisation from command line
process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False)))
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion
|
[
"rsk146@scarletmail.rutgers.edu"
] |
rsk146@scarletmail.rutgers.edu
|
c69c539cfceaec8d7c04a4d416c97db74126a136
|
9360288aff5e271c9dd2a560839bfee8148eef90
|
/python-code/problem-1.py
|
a6cbb74ecb57ad20f759beb18b36712e6bdcb26e
|
[] |
no_license
|
LOOIENTING07/computing-folder
|
b147c094cc529e464c08adcd28bd9dcdb9d13c42
|
959b1fbd8260b44a5aa2966c805c3bd27b6e19eb
|
refs/heads/master
| 2022-12-16T11:53:47.114203
| 2020-09-21T11:12:29
| 2020-09-21T11:12:29
| 289,183,562
| 0
| 1
| null | 2020-09-21T11:12:30
| 2020-08-21T05:15:56
|
Python
|
UTF-8
|
Python
| false
| false
| 364
|
py
|
#this microbit code should display a happy face if you press button a but a sad face if you press b
#BUt there is an error! Can you solve it?
from microbit import*
while True:
if button_a.is_pressed():
display.show(Image.HAPPY)
sleep (3000)
display.clear()
if button-b.is_pressed():
display.show(Image.SAD)
sleep (3000)
display.clear()
|
[
"noreply@github.com"
] |
LOOIENTING07.noreply@github.com
|
7b75b5605b47cd15dd639548cf76b7b54efd9277
|
d850b6400ea29e2419a5e15b0fafd3a505bc9486
|
/app/core/migrations/0003_ingredient.py
|
24d1062b985f73c1e84c2534547aeb8758011844
|
[
"MIT"
] |
permissive
|
Aditya0721/recipe-app-api
|
ec62c07f0d662cde63243cbc23b091480cd3b131
|
0d5cd850779019f16d9ad8dffa357ae1427160d0
|
refs/heads/master
| 2021-05-21T17:28:54.834496
| 2020-06-08T17:53:47
| 2020-06-08T17:53:47
| 252,735,033
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 685
|
py
|
# Generated by Django 3.0.5 on 2020-04-24 08:23
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0002_tag'),
]
operations = [
migrations.CreateModel(
name='Ingredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"adityaprasad246278@gmail.com"
] |
adityaprasad246278@gmail.com
|
0291c71dfc2c07b3ec3777a4e5668bd1d7c5bde8
|
cd6163a79f85e59f68fc8f26030273c0d4df67df
|
/tests/test_connection.py
|
c5d6440d6530f35f17c360216155345dbcedac19
|
[] |
no_license
|
aktechthoughts/ci_test
|
cd9602bf1ba567fd851eea48028678a1d840ba79
|
7082768904a3cbc9576697cbb8cc947d726a81e3
|
refs/heads/master
| 2022-06-29T16:08:40.765144
| 2020-05-15T12:25:24
| 2020-05-15T12:25:24
| 258,782,583
| 0
| 0
| null | 2020-05-14T18:14:55
| 2020-04-25T13:28:56
|
Python
|
UTF-8
|
Python
| false
| false
| 698
|
py
|
# -*- coding: utf-8 -*-
"""Test connecting to database"""
import pytest
@pytest.mark.skip(reason="Disabling Docker as no real tests yet")
def test__create_table__dummy__no_error(exaplus):
assert exaplus("tests/sql_samples/sandbox_ddl.sql") == 0
@pytest.mark.skip(reason="Disabling Docker as no real tests yet")
def test__insert__to_dummy__no_error(exaplus):
assert exaplus("tests/sql_samples/sandbox_etl.sql") == 0
@pytest.mark.skip(reason="Disabling Docker as no real tests yet")
def test__select__from_dummy__no_error(query):
result = query("SELECT * FROM SANDBOX.DUMMY_TABLE WHERE TEST_DECIMAL = 31337")
assert result["TEST_STRING"][0] == "It works"
|
[
"abhsihek_ku@yahoo.com"
] |
abhsihek_ku@yahoo.com
|
f8f0c90c737f09566f8ed311dba55538a78a9e90
|
ad4edce627609127687c12fa12a0fa4a6a416abf
|
/Inreeding code/Run/MainData Code/MSHomo 5 Loci.py
|
5fb25f6fdf5aac8114f81e0f5d0c83b0f8613d20
|
[] |
no_license
|
GabeDO/Detecting-non-random-mating-or-selection-in-natural-populations-using-multi-locus-gene-families
|
ca9332d10f7496e470ed53da246eec57c1678610
|
6525cfd6b187c25775a87a7cc8c4b9ed09a2a945
|
refs/heads/main
| 2023-04-10T04:25:04.844806
| 2021-04-22T05:05:51
| 2021-04-22T05:05:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 65,612
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 20 11:19:26 2019
@author: z5035086
"""
from FisSimFunctions import *
from simuPOP import *
import simuPOP as sim
from simuPOP.utils import export
import subprocess
import csv
import numpy as np
import lib
import matplotlib.pyplot as plt
import pandas
from simuPOP.sampling import drawRandomSample
def Eveneness(resultt):
import math
IndiEveness = []
IndiShan = []
IndiMxEnt = []
for r in resultt:
Prop = []
total = sum(r)
for l in r:
p = l/total
if p == 0:
yeet = 0
else:
Shp = -p*(math.log(p))
Prop.append(Shp)
Shan = sum(Prop)
MxEnt = math.log(len(Prop))
if MxEnt == 0:
IndiEveness.append(0)
else:
IndiEveness.append(Shan/MxEnt)
IndiShan.append(Shan)
IndiMxEnt.append(MxEnt)
AvgIndiEveness = sum(IndiEveness)/len(IndiEveness)
AvgIndiShan = sum(IndiShan)/len(IndiShan)
AvgIndiMxEnt = sum(IndiMxEnt)/len(IndiMxEnt)
t = resultt
TotalAlleles = [sum(i) for i in zip(*t)]
TotalShanProps = []
for h in TotalAlleles:
Total = sum(TotalAlleles)
p = h/Total
if p == 0:
yeet = 0
else:
TSP = -p*(math.log(p))
TotalShanProps.append(TSP)
TMxEnt = math.log(len(TotalShanProps))
TShan = sum(TotalShanProps)
if TMxEnt == 0:
TotalEveness = 0
else:
TotalEveness = TShan/TMxEnt
return [AvgIndiEveness, TotalEveness,TShan, TMxEnt, AvgIndiShan, AvgIndiMxEnt]
def MaxEnt(resultt):
import csv
import numpy
import math
def func(a):
if a[0] == a[-1]:
return 2
else:
return 1
#EXPECTED HET PER LOCI
a = numpy.genfromtxt(file, delimiter=',', skip_header=True)[:, 1:]
IndiMaxEnt = []
for i in range(NumLoci):
res = numpy.unique(a[i,1:],return_counts=True)
IndiMaxEnt.append(math.log(len(res[1])))
#[:, 1+(i*2):3+(i*2)]
avgIndiMaxEnt = sum(IndiMaxEnt) / len(IndiMaxEnt)
TotalVars = numpy.unique(a[:,1:],return_counts=True)
TotalMaxEnt = math.log(len(TotalVars[1]))
return [avgIndiMaxEnt,TotalMaxEnt]
# write header
#with open("ShanOutputTrial.csv", "a", newline='') as fp:
# wr = csv.writer(fp, dialect='excel')
# wr.writerow(['Mating','Allele Distro','Population Size','NoOfAlleles','RealNoLoci','esitmateNoLoci','Fis','AverageShanPerIndi-1Hi','TotalShanOfPop-1Hs','Average1HiOver1Hs','MaxAlleleValue'])
#lstOfAlleleDis = [[0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1],[0.3,0.16,0.09,0.09,0.06,0.06,0.06,0.06,0.06,0.06],[0.46,0.06,0.06,0.06,0.06,0.06,0.06,0.06,0.06,0.06]]
lstOfAlleleDis = [[0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1], [0.5,0.1,0.05,0.05,0.05,0.05,0.05,0.05,0.05,0.05]]
for PopSize in [40,400]:
for A in lstOfAlleleDis:
for g in [10,30,50]:
for i in range(100):
#[0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1]
#[0.9, 0.02, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01]
#[0.46,0.06,0.06,0.06,0.06,0.06,0.06,0.06,0.06,0.06]
#[0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01]
#[[0.05,0.05,0.05,0.05,0.05,0.05,0.05,0.05,0.05,0.05,0.05,0.05,0.05,0.05,0.05,0.05,0.05,0.05,0.05,0.05]]
allele_freq = A
NoOfAlleles = len(allele_freq)
NoOfLoci = 5
pop = Population(size=PopSize, loci=[NoOfLoci],infoFields=['fitness'])
pop.evolve(
#setting up the initial perameters, even sex ratio and allele frequencies
initOps = [
InitSex(),
InitGenotype(freq=allele_freq, loci = 0),
InitGenotype(freq=allele_freq, loci = 1),
InitGenotype(freq=allele_freq, loci = 2),
InitGenotype(freq=allele_freq, loci = 3),
InitGenotype(freq=allele_freq, loci = 4),
],
#preOps=[
#sim.MlSelector([
#sim.MapSelector(loci=0, fitness={ (0,0):0, (1,0):1, (1,1):0, (2,0):1, (2,1):1, (2,2):0, (3,0):1, (3,1):1, (3,2):1, (3,3):0, (4,0):1, (4,1):1, (4,2):1, (4,3):1, (4,4):0, (5,0):1, (5,1):1, (5,2):1, (5,3):1, (5,4):1, (5,5):0, (6,0):1, (6,1):1, (6,2):1, (6,3):1, (6,4):1, (6,5):1, (6,6):0, (7,0):1, (7,1):1, (7,2):1, (7,3):1, (7,4):1, (7,5):1, (7,6):1, (7,7):0, (8,0):1, (8,1):1, (8,2):1, (8,3):1, (8,4):1, (8,5):1, (8,6):1, (8,7):1, (8,8):0, (9,0):1, (9,1):1, (9,2):1, (9,3):1, (9,4):1, (9,5):1, (9,6):1, (9,7):1, (9,8):1, (9,9):0, (10,0):1, (10,1):1, (10,2):1, (10,3):1, (10,4):1, (10,5):1, (10,6):1, (10,7):1, (10,8):1, (10,9):1, (10,10):0, (11,0):1, (11,1):1, (11,2):1, (11,3):1, (11,4):1, (11,5):1, (11,6):1, (11,7):1, (11,8):1, (11,9):1, (11,10):1, (11,11):0, (12,0):1, (12,1):1, (12,2):1, (12,3):1, (12,4):1, (12,5):1, (12,6):1, (12,7):1, (12,8):1, (12,9):1, (12,10):1, (12,11):1, (12,12):0, (13,0):1, (13,1):1, (13,2):1, (13,3):1, (13,4):1, (13,5):1, (13,6):1, (13,7):1, (13,8):1, (13,9):1, (13,10):1, (13,11):1, (13,12):1, (13,13):0, (14,0):1, (14,1):1, (14,2):1, (14,3):1, (14,4):1, (14,5):1, (14,6):1, (14,7):1, (14,8):1, (14,9):1, (14,10):1, (14,11):1, (14,12):1, (14,13):1, (14,14):0, (15,0):1, (15,1):1, (15,2):1, (15,3):1, (15,4):1, (15,5):1, (15,6):1, (15,7):1, (15,8):1, (15,9):1, (15,10):1, (15,11):1, (15,12):1, (15,13):1, (15,14):1, (15,15):0, (16,0):1, (16,1):1, (16,2):1, (16,3):1, (16,4):1, (16,5):1, (16,6):1, (16,7):1, (16,8):1, (16,9):1, (16,10):1, (16,11):1, (16,12):1, (16,13):1, (16,14):1, (16,15):1, (16,16):0, (17,0):1, (17,1):1, (17,2):1, (17,3):1, (17,4):1, (17,5):1, (17,6):1, (17,7):1, (17,8):1, (17,9):1, (17,10):1, (17,11):1, (17,12):1, (17,13):1, (17,14):1, (17,15):1, (17,16):1, (17,17):0, (18,0):1, (18,1):1, (18,2):1, (18,3):1, (18,4):1, (18,5):1, (18,6):1, (18,7):1, (18,8):1, (18,9):1, (18,10):1, (18,11):1, (18,12):1, (18,13):1, (18,14):1, (18,15):1, (18,16):1, (18,17):1, (18,18):0, (19,0):1, (19,1):1, (19,2):1, (19,3):1, (19,4):1, (19,5):1, (19,6):1, (19,7):1, (19,8):1, (19,9):1, (19,10):1, (19,11):1, (19,12):1, (19,13):1, (19,14):1, (19,15):1, (19,16):1, (19,17):1, (19,18):1, (19,19):0}),
#sim.MapSelector(loci=1, fitness={ (0,0):0, (1,0):1, (1,1):0, (2,0):1, (2,1):1, (2,2):0, (3,0):1, (3,1):1, (3,2):1, (3,3):0, (4,0):1, (4,1):1, (4,2):1, (4,3):1, (4,4):0, (5,0):1, (5,1):1, (5,2):1, (5,3):1, (5,4):1, (5,5):0, (6,0):1, (6,1):1, (6,2):1, (6,3):1, (6,4):1, (6,5):1, (6,6):0, (7,0):1, (7,1):1, (7,2):1, (7,3):1, (7,4):1, (7,5):1, (7,6):1, (7,7):0, (8,0):1, (8,1):1, (8,2):1, (8,3):1, (8,4):1, (8,5):1, (8,6):1, (8,7):1, (8,8):0, (9,0):1, (9,1):1, (9,2):1, (9,3):1, (9,4):1, (9,5):1, (9,6):1, (9,7):1, (9,8):1, (9,9):0, (10,0):1, (10,1):1, (10,2):1, (10,3):1, (10,4):1, (10,5):1, (10,6):1, (10,7):1, (10,8):1, (10,9):1, (10,10):0, (11,0):1, (11,1):1, (11,2):1, (11,3):1, (11,4):1, (11,5):1, (11,6):1, (11,7):1, (11,8):1, (11,9):1, (11,10):1, (11,11):0, (12,0):1, (12,1):1, (12,2):1, (12,3):1, (12,4):1, (12,5):1, (12,6):1, (12,7):1, (12,8):1, (12,9):1, (12,10):1, (12,11):1, (12,12):0, (13,0):1, (13,1):1, (13,2):1, (13,3):1, (13,4):1, (13,5):1, (13,6):1, (13,7):1, (13,8):1, (13,9):1, (13,10):1, (13,11):1, (13,12):1, (13,13):0, (14,0):1, (14,1):1, (14,2):1, (14,3):1, (14,4):1, (14,5):1, (14,6):1, (14,7):1, (14,8):1, (14,9):1, (14,10):1, (14,11):1, (14,12):1, (14,13):1, (14,14):0, (15,0):1, (15,1):1, (15,2):1, (15,3):1, (15,4):1, (15,5):1, (15,6):1, (15,7):1, (15,8):1, (15,9):1, (15,10):1, (15,11):1, (15,12):1, (15,13):1, (15,14):1, (15,15):0, (16,0):1, (16,1):1, (16,2):1, (16,3):1, (16,4):1, (16,5):1, (16,6):1, (16,7):1, (16,8):1, (16,9):1, (16,10):1, (16,11):1, (16,12):1, (16,13):1, (16,14):1, (16,15):1, (16,16):0, (17,0):1, (17,1):1, (17,2):1, (17,3):1, (17,4):1, (17,5):1, (17,6):1, (17,7):1, (17,8):1, (17,9):1, (17,10):1, (17,11):1, (17,12):1, (17,13):1, (17,14):1, (17,15):1, (17,16):1, (17,17):0, (18,0):1, (18,1):1, (18,2):1, (18,3):1, (18,4):1, (18,5):1, (18,6):1, (18,7):1, (18,8):1, (18,9):1, (18,10):1, (18,11):1, (18,12):1, (18,13):1, (18,14):1, (18,15):1, (18,16):1, (18,17):1, (18,18):0, (19,0):1, (19,1):1, (19,2):1, (19,3):1, (19,4):1, (19,5):1, (19,6):1, (19,7):1, (19,8):1, (19,9):1, (19,10):1, (19,11):1, (19,12):1, (19,13):1, (19,14):1, (19,15):1, (19,16):1, (19,17):1, (19,18):1, (19,19):0}),
#sim.MapSelector(loci=2, fitness={ (0,0):0, (1,0):1, (1,1):0, (2,0):1, (2,1):1, (2,2):0, (3,0):1, (3,1):1, (3,2):1, (3,3):0, (4,0):1, (4,1):1, (4,2):1, (4,3):1, (4,4):0, (5,0):1, (5,1):1, (5,2):1, (5,3):1, (5,4):1, (5,5):0, (6,0):1, (6,1):1, (6,2):1, (6,3):1, (6,4):1, (6,5):1, (6,6):0, (7,0):1, (7,1):1, (7,2):1, (7,3):1, (7,4):1, (7,5):1, (7,6):1, (7,7):0, (8,0):1, (8,1):1, (8,2):1, (8,3):1, (8,4):1, (8,5):1, (8,6):1, (8,7):1, (8,8):0, (9,0):1, (9,1):1, (9,2):1, (9,3):1, (9,4):1, (9,5):1, (9,6):1, (9,7):1, (9,8):1, (9,9):0, (10,0):1, (10,1):1, (10,2):1, (10,3):1, (10,4):1, (10,5):1, (10,6):1, (10,7):1, (10,8):1, (10,9):1, (10,10):0, (11,0):1, (11,1):1, (11,2):1, (11,3):1, (11,4):1, (11,5):1, (11,6):1, (11,7):1, (11,8):1, (11,9):1, (11,10):1, (11,11):0, (12,0):1, (12,1):1, (12,2):1, (12,3):1, (12,4):1, (12,5):1, (12,6):1, (12,7):1, (12,8):1, (12,9):1, (12,10):1, (12,11):1, (12,12):0, (13,0):1, (13,1):1, (13,2):1, (13,3):1, (13,4):1, (13,5):1, (13,6):1, (13,7):1, (13,8):1, (13,9):1, (13,10):1, (13,11):1, (13,12):1, (13,13):0, (14,0):1, (14,1):1, (14,2):1, (14,3):1, (14,4):1, (14,5):1, (14,6):1, (14,7):1, (14,8):1, (14,9):1, (14,10):1, (14,11):1, (14,12):1, (14,13):1, (14,14):0, (15,0):1, (15,1):1, (15,2):1, (15,3):1, (15,4):1, (15,5):1, (15,6):1, (15,7):1, (15,8):1, (15,9):1, (15,10):1, (15,11):1, (15,12):1, (15,13):1, (15,14):1, (15,15):0, (16,0):1, (16,1):1, (16,2):1, (16,3):1, (16,4):1, (16,5):1, (16,6):1, (16,7):1, (16,8):1, (16,9):1, (16,10):1, (16,11):1, (16,12):1, (16,13):1, (16,14):1, (16,15):1, (16,16):0, (17,0):1, (17,1):1, (17,2):1, (17,3):1, (17,4):1, (17,5):1, (17,6):1, (17,7):1, (17,8):1, (17,9):1, (17,10):1, (17,11):1, (17,12):1, (17,13):1, (17,14):1, (17,15):1, (17,16):1, (17,17):0, (18,0):1, (18,1):1, (18,2):1, (18,3):1, (18,4):1, (18,5):1, (18,6):1, (18,7):1, (18,8):1, (18,9):1, (18,10):1, (18,11):1, (18,12):1, (18,13):1, (18,14):1, (18,15):1, (18,16):1, (18,17):1, (18,18):0, (19,0):1, (19,1):1, (19,2):1, (19,3):1, (19,4):1, (19,5):1, (19,6):1, (19,7):1, (19,8):1, (19,9):1, (19,10):1, (19,11):1, (19,12):1, (19,13):1, (19,14):1, (19,15):1, (19,16):1, (19,17):1, (19,18):1, (19,19):0}),
#sim.MapSelector(loci=3, fitness={ (0,0):0, (1,0):1, (1,1):0, (2,0):1, (2,1):1, (2,2):0, (3,0):1, (3,1):1, (3,2):1, (3,3):0, (4,0):1, (4,1):1, (4,2):1, (4,3):1, (4,4):0, (5,0):1, (5,1):1, (5,2):1, (5,3):1, (5,4):1, (5,5):0, (6,0):1, (6,1):1, (6,2):1, (6,3):1, (6,4):1, (6,5):1, (6,6):0, (7,0):1, (7,1):1, (7,2):1, (7,3):1, (7,4):1, (7,5):1, (7,6):1, (7,7):0, (8,0):1, (8,1):1, (8,2):1, (8,3):1, (8,4):1, (8,5):1, (8,6):1, (8,7):1, (8,8):0, (9,0):1, (9,1):1, (9,2):1, (9,3):1, (9,4):1, (9,5):1, (9,6):1, (9,7):1, (9,8):1, (9,9):0, (10,0):1, (10,1):1, (10,2):1, (10,3):1, (10,4):1, (10,5):1, (10,6):1, (10,7):1, (10,8):1, (10,9):1, (10,10):0, (11,0):1, (11,1):1, (11,2):1, (11,3):1, (11,4):1, (11,5):1, (11,6):1, (11,7):1, (11,8):1, (11,9):1, (11,10):1, (11,11):0, (12,0):1, (12,1):1, (12,2):1, (12,3):1, (12,4):1, (12,5):1, (12,6):1, (12,7):1, (12,8):1, (12,9):1, (12,10):1, (12,11):1, (12,12):0, (13,0):1, (13,1):1, (13,2):1, (13,3):1, (13,4):1, (13,5):1, (13,6):1, (13,7):1, (13,8):1, (13,9):1, (13,10):1, (13,11):1, (13,12):1, (13,13):0, (14,0):1, (14,1):1, (14,2):1, (14,3):1, (14,4):1, (14,5):1, (14,6):1, (14,7):1, (14,8):1, (14,9):1, (14,10):1, (14,11):1, (14,12):1, (14,13):1, (14,14):0, (15,0):1, (15,1):1, (15,2):1, (15,3):1, (15,4):1, (15,5):1, (15,6):1, (15,7):1, (15,8):1, (15,9):1, (15,10):1, (15,11):1, (15,12):1, (15,13):1, (15,14):1, (15,15):0, (16,0):1, (16,1):1, (16,2):1, (16,3):1, (16,4):1, (16,5):1, (16,6):1, (16,7):1, (16,8):1, (16,9):1, (16,10):1, (16,11):1, (16,12):1, (16,13):1, (16,14):1, (16,15):1, (16,16):0, (17,0):1, (17,1):1, (17,2):1, (17,3):1, (17,4):1, (17,5):1, (17,6):1, (17,7):1, (17,8):1, (17,9):1, (17,10):1, (17,11):1, (17,12):1, (17,13):1, (17,14):1, (17,15):1, (17,16):1, (17,17):0, (18,0):1, (18,1):1, (18,2):1, (18,3):1, (18,4):1, (18,5):1, (18,6):1, (18,7):1, (18,8):1, (18,9):1, (18,10):1, (18,11):1, (18,12):1, (18,13):1, (18,14):1, (18,15):1, (18,16):1, (18,17):1, (18,18):0, (19,0):1, (19,1):1, (19,2):1, (19,3):1, (19,4):1, (19,5):1, (19,6):1, (19,7):1, (19,8):1, (19,9):1, (19,10):1, (19,11):1, (19,12):1, (19,13):1, (19,14):1, (19,15):1, (19,16):1, (19,17):1, (19,18):1, (19,19):0}),
#sim.MapSelector(loci=4, fitness={ (0,0):0, (1,0):1, (1,1):0, (2,0):1, (2,1):1, (2,2):0, (3,0):1, (3,1):1, (3,2):1, (3,3):0, (4,0):1, (4,1):1, (4,2):1, (4,3):1, (4,4):0, (5,0):1, (5,1):1, (5,2):1, (5,3):1, (5,4):1, (5,5):0, (6,0):1, (6,1):1, (6,2):1, (6,3):1, (6,4):1, (6,5):1, (6,6):0, (7,0):1, (7,1):1, (7,2):1, (7,3):1, (7,4):1, (7,5):1, (7,6):1, (7,7):0, (8,0):1, (8,1):1, (8,2):1, (8,3):1, (8,4):1, (8,5):1, (8,6):1, (8,7):1, (8,8):0, (9,0):1, (9,1):1, (9,2):1, (9,3):1, (9,4):1, (9,5):1, (9,6):1, (9,7):1, (9,8):1, (9,9):0, (10,0):1, (10,1):1, (10,2):1, (10,3):1, (10,4):1, (10,5):1, (10,6):1, (10,7):1, (10,8):1, (10,9):1, (10,10):0, (11,0):1, (11,1):1, (11,2):1, (11,3):1, (11,4):1, (11,5):1, (11,6):1, (11,7):1, (11,8):1, (11,9):1, (11,10):1, (11,11):0, (12,0):1, (12,1):1, (12,2):1, (12,3):1, (12,4):1, (12,5):1, (12,6):1, (12,7):1, (12,8):1, (12,9):1, (12,10):1, (12,11):1, (12,12):0, (13,0):1, (13,1):1, (13,2):1, (13,3):1, (13,4):1, (13,5):1, (13,6):1, (13,7):1, (13,8):1, (13,9):1, (13,10):1, (13,11):1, (13,12):1, (13,13):0, (14,0):1, (14,1):1, (14,2):1, (14,3):1, (14,4):1, (14,5):1, (14,6):1, (14,7):1, (14,8):1, (14,9):1, (14,10):1, (14,11):1, (14,12):1, (14,13):1, (14,14):0, (15,0):1, (15,1):1, (15,2):1, (15,3):1, (15,4):1, (15,5):1, (15,6):1, (15,7):1, (15,8):1, (15,9):1, (15,10):1, (15,11):1, (15,12):1, (15,13):1, (15,14):1, (15,15):0, (16,0):1, (16,1):1, (16,2):1, (16,3):1, (16,4):1, (16,5):1, (16,6):1, (16,7):1, (16,8):1, (16,9):1, (16,10):1, (16,11):1, (16,12):1, (16,13):1, (16,14):1, (16,15):1, (16,16):0, (17,0):1, (17,1):1, (17,2):1, (17,3):1, (17,4):1, (17,5):1, (17,6):1, (17,7):1, (17,8):1, (17,9):1, (17,10):1, (17,11):1, (17,12):1, (17,13):1, (17,14):1, (17,15):1, (17,16):1, (17,17):0, (18,0):1, (18,1):1, (18,2):1, (18,3):1, (18,4):1, (18,5):1, (18,6):1, (18,7):1, (18,8):1, (18,9):1, (18,10):1, (18,11):1, (18,12):1, (18,13):1, (18,14):1, (18,15):1, (18,16):1, (18,17):1, (18,18):0, (19,0):1, (19,1):1, (19,2):1, (19,3):1, (19,4):1, (19,5):1, (19,6):1, (19,7):1, (19,8):1, (19,9):1, (19,10):1, (19,11):1, (19,12):1, (19,13):1, (19,14):1, (19,15):1, (19,16):1, (19,17):1, (19,18):1, (19,19):0}),
#sim.MapSelector(loci=5, fitness={ (0,0):0, (1,0):1, (1,1):0, (2,0):1, (2,1):1, (2,2):0, (3,0):1, (3,1):1, (3,2):1, (3,3):0, (4,0):1, (4,1):1, (4,2):1, (4,3):1, (4,4):0, (5,0):1, (5,1):1, (5,2):1, (5,3):1, (5,4):1, (5,5):0, (6,0):1, (6,1):1, (6,2):1, (6,3):1, (6,4):1, (6,5):1, (6,6):0, (7,0):1, (7,1):1, (7,2):1, (7,3):1, (7,4):1, (7,5):1, (7,6):1, (7,7):0, (8,0):1, (8,1):1, (8,2):1, (8,3):1, (8,4):1, (8,5):1, (8,6):1, (8,7):1, (8,8):0, (9,0):1, (9,1):1, (9,2):1, (9,3):1, (9,4):1, (9,5):1, (9,6):1, (9,7):1, (9,8):1, (9,9):0, (10,0):1, (10,1):1, (10,2):1, (10,3):1, (10,4):1, (10,5):1, (10,6):1, (10,7):1, (10,8):1, (10,9):1, (10,10):0, (11,0):1, (11,1):1, (11,2):1, (11,3):1, (11,4):1, (11,5):1, (11,6):1, (11,7):1, (11,8):1, (11,9):1, (11,10):1, (11,11):0, (12,0):1, (12,1):1, (12,2):1, (12,3):1, (12,4):1, (12,5):1, (12,6):1, (12,7):1, (12,8):1, (12,9):1, (12,10):1, (12,11):1, (12,12):0, (13,0):1, (13,1):1, (13,2):1, (13,3):1, (13,4):1, (13,5):1, (13,6):1, (13,7):1, (13,8):1, (13,9):1, (13,10):1, (13,11):1, (13,12):1, (13,13):0, (14,0):1, (14,1):1, (14,2):1, (14,3):1, (14,4):1, (14,5):1, (14,6):1, (14,7):1, (14,8):1, (14,9):1, (14,10):1, (14,11):1, (14,12):1, (14,13):1, (14,14):0, (15,0):1, (15,1):1, (15,2):1, (15,3):1, (15,4):1, (15,5):1, (15,6):1, (15,7):1, (15,8):1, (15,9):1, (15,10):1, (15,11):1, (15,12):1, (15,13):1, (15,14):1, (15,15):0, (16,0):1, (16,1):1, (16,2):1, (16,3):1, (16,4):1, (16,5):1, (16,6):1, (16,7):1, (16,8):1, (16,9):1, (16,10):1, (16,11):1, (16,12):1, (16,13):1, (16,14):1, (16,15):1, (16,16):0, (17,0):1, (17,1):1, (17,2):1, (17,3):1, (17,4):1, (17,5):1, (17,6):1, (17,7):1, (17,8):1, (17,9):1, (17,10):1, (17,11):1, (17,12):1, (17,13):1, (17,14):1, (17,15):1, (17,16):1, (17,17):0, (18,0):1, (18,1):1, (18,2):1, (18,3):1, (18,4):1, (18,5):1, (18,6):1, (18,7):1, (18,8):1, (18,9):1, (18,10):1, (18,11):1, (18,12):1, (18,13):1, (18,14):1, (18,15):1, (18,16):1, (18,17):1, (18,18):0, (19,0):1, (19,1):1, (19,2):1, (19,3):1, (19,4):1, (19,5):1, (19,6):1, (19,7):1, (19,8):1, (19,9):1, (19,10):1, (19,11):1, (19,12):1, (19,13):1, (19,14):1, (19,15):1, (19,16):1, (19,17):1, (19,18):1, (19,19):0}),
#sim.MapSelector(loci=6, fitness={ (0,0):0, (1,0):1, (1,1):0, (2,0):1, (2,1):1, (2,2):0, (3,0):1, (3,1):1, (3,2):1, (3,3):0, (4,0):1, (4,1):1, (4,2):1, (4,3):1, (4,4):0, (5,0):1, (5,1):1, (5,2):1, (5,3):1, (5,4):1, (5,5):0, (6,0):1, (6,1):1, (6,2):1, (6,3):1, (6,4):1, (6,5):1, (6,6):0, (7,0):1, (7,1):1, (7,2):1, (7,3):1, (7,4):1, (7,5):1, (7,6):1, (7,7):0, (8,0):1, (8,1):1, (8,2):1, (8,3):1, (8,4):1, (8,5):1, (8,6):1, (8,7):1, (8,8):0, (9,0):1, (9,1):1, (9,2):1, (9,3):1, (9,4):1, (9,5):1, (9,6):1, (9,7):1, (9,8):1, (9,9):0, (10,0):1, (10,1):1, (10,2):1, (10,3):1, (10,4):1, (10,5):1, (10,6):1, (10,7):1, (10,8):1, (10,9):1, (10,10):0, (11,0):1, (11,1):1, (11,2):1, (11,3):1, (11,4):1, (11,5):1, (11,6):1, (11,7):1, (11,8):1, (11,9):1, (11,10):1, (11,11):0, (12,0):1, (12,1):1, (12,2):1, (12,3):1, (12,4):1, (12,5):1, (12,6):1, (12,7):1, (12,8):1, (12,9):1, (12,10):1, (12,11):1, (12,12):0, (13,0):1, (13,1):1, (13,2):1, (13,3):1, (13,4):1, (13,5):1, (13,6):1, (13,7):1, (13,8):1, (13,9):1, (13,10):1, (13,11):1, (13,12):1, (13,13):0, (14,0):1, (14,1):1, (14,2):1, (14,3):1, (14,4):1, (14,5):1, (14,6):1, (14,7):1, (14,8):1, (14,9):1, (14,10):1, (14,11):1, (14,12):1, (14,13):1, (14,14):0, (15,0):1, (15,1):1, (15,2):1, (15,3):1, (15,4):1, (15,5):1, (15,6):1, (15,7):1, (15,8):1, (15,9):1, (15,10):1, (15,11):1, (15,12):1, (15,13):1, (15,14):1, (15,15):0, (16,0):1, (16,1):1, (16,2):1, (16,3):1, (16,4):1, (16,5):1, (16,6):1, (16,7):1, (16,8):1, (16,9):1, (16,10):1, (16,11):1, (16,12):1, (16,13):1, (16,14):1, (16,15):1, (16,16):0, (17,0):1, (17,1):1, (17,2):1, (17,3):1, (17,4):1, (17,5):1, (17,6):1, (17,7):1, (17,8):1, (17,9):1, (17,10):1, (17,11):1, (17,12):1, (17,13):1, (17,14):1, (17,15):1, (17,16):1, (17,17):0, (18,0):1, (18,1):1, (18,2):1, (18,3):1, (18,4):1, (18,5):1, (18,6):1, (18,7):1, (18,8):1, (18,9):1, (18,10):1, (18,11):1, (18,12):1, (18,13):1, (18,14):1, (18,15):1, (18,16):1, (18,17):1, (18,18):0, (19,0):1, (19,1):1, (19,2):1, (19,3):1, (19,4):1, (19,5):1, (19,6):1, (19,7):1, (19,8):1, (19,9):1, (19,10):1, (19,11):1, (19,12):1, (19,13):1, (19,14):1, (19,15):1, (19,16):1, (19,17):1, (19,18):1, (19,19):0}),
#sim.MapSelector(loci=7, fitness={ (0,0):0, (1,0):1, (1,1):0, (2,0):1, (2,1):1, (2,2):0, (3,0):1, (3,1):1, (3,2):1, (3,3):0, (4,0):1, (4,1):1, (4,2):1, (4,3):1, (4,4):0, (5,0):1, (5,1):1, (5,2):1, (5,3):1, (5,4):1, (5,5):0, (6,0):1, (6,1):1, (6,2):1, (6,3):1, (6,4):1, (6,5):1, (6,6):0, (7,0):1, (7,1):1, (7,2):1, (7,3):1, (7,4):1, (7,5):1, (7,6):1, (7,7):0, (8,0):1, (8,1):1, (8,2):1, (8,3):1, (8,4):1, (8,5):1, (8,6):1, (8,7):1, (8,8):0, (9,0):1, (9,1):1, (9,2):1, (9,3):1, (9,4):1, (9,5):1, (9,6):1, (9,7):1, (9,8):1, (9,9):0, (10,0):1, (10,1):1, (10,2):1, (10,3):1, (10,4):1, (10,5):1, (10,6):1, (10,7):1, (10,8):1, (10,9):1, (10,10):0, (11,0):1, (11,1):1, (11,2):1, (11,3):1, (11,4):1, (11,5):1, (11,6):1, (11,7):1, (11,8):1, (11,9):1, (11,10):1, (11,11):0, (12,0):1, (12,1):1, (12,2):1, (12,3):1, (12,4):1, (12,5):1, (12,6):1, (12,7):1, (12,8):1, (12,9):1, (12,10):1, (12,11):1, (12,12):0, (13,0):1, (13,1):1, (13,2):1, (13,3):1, (13,4):1, (13,5):1, (13,6):1, (13,7):1, (13,8):1, (13,9):1, (13,10):1, (13,11):1, (13,12):1, (13,13):0, (14,0):1, (14,1):1, (14,2):1, (14,3):1, (14,4):1, (14,5):1, (14,6):1, (14,7):1, (14,8):1, (14,9):1, (14,10):1, (14,11):1, (14,12):1, (14,13):1, (14,14):0, (15,0):1, (15,1):1, (15,2):1, (15,3):1, (15,4):1, (15,5):1, (15,6):1, (15,7):1, (15,8):1, (15,9):1, (15,10):1, (15,11):1, (15,12):1, (15,13):1, (15,14):1, (15,15):0, (16,0):1, (16,1):1, (16,2):1, (16,3):1, (16,4):1, (16,5):1, (16,6):1, (16,7):1, (16,8):1, (16,9):1, (16,10):1, (16,11):1, (16,12):1, (16,13):1, (16,14):1, (16,15):1, (16,16):0, (17,0):1, (17,1):1, (17,2):1, (17,3):1, (17,4):1, (17,5):1, (17,6):1, (17,7):1, (17,8):1, (17,9):1, (17,10):1, (17,11):1, (17,12):1, (17,13):1, (17,14):1, (17,15):1, (17,16):1, (17,17):0, (18,0):1, (18,1):1, (18,2):1, (18,3):1, (18,4):1, (18,5):1, (18,6):1, (18,7):1, (18,8):1, (18,9):1, (18,10):1, (18,11):1, (18,12):1, (18,13):1, (18,14):1, (18,15):1, (18,16):1, (18,17):1, (18,18):0, (19,0):1, (19,1):1, (19,2):1, (19,3):1, (19,4):1, (19,5):1, (19,6):1, (19,7):1, (19,8):1, (19,9):1, (19,10):1, (19,11):1, (19,12):1, (19,13):1, (19,14):1, (19,15):1, (19,16):1, (19,17):1, (19,18):1, (19,19):0}),
#sim.MapSelector(loci=8, fitness={ (0,0):0, (1,0):1, (1,1):0, (2,0):1, (2,1):1, (2,2):0, (3,0):1, (3,1):1, (3,2):1, (3,3):0, (4,0):1, (4,1):1, (4,2):1, (4,3):1, (4,4):0, (5,0):1, (5,1):1, (5,2):1, (5,3):1, (5,4):1, (5,5):0, (6,0):1, (6,1):1, (6,2):1, (6,3):1, (6,4):1, (6,5):1, (6,6):0, (7,0):1, (7,1):1, (7,2):1, (7,3):1, (7,4):1, (7,5):1, (7,6):1, (7,7):0, (8,0):1, (8,1):1, (8,2):1, (8,3):1, (8,4):1, (8,5):1, (8,6):1, (8,7):1, (8,8):0, (9,0):1, (9,1):1, (9,2):1, (9,3):1, (9,4):1, (9,5):1, (9,6):1, (9,7):1, (9,8):1, (9,9):0, (10,0):1, (10,1):1, (10,2):1, (10,3):1, (10,4):1, (10,5):1, (10,6):1, (10,7):1, (10,8):1, (10,9):1, (10,10):0, (11,0):1, (11,1):1, (11,2):1, (11,3):1, (11,4):1, (11,5):1, (11,6):1, (11,7):1, (11,8):1, (11,9):1, (11,10):1, (11,11):0, (12,0):1, (12,1):1, (12,2):1, (12,3):1, (12,4):1, (12,5):1, (12,6):1, (12,7):1, (12,8):1, (12,9):1, (12,10):1, (12,11):1, (12,12):0, (13,0):1, (13,1):1, (13,2):1, (13,3):1, (13,4):1, (13,5):1, (13,6):1, (13,7):1, (13,8):1, (13,9):1, (13,10):1, (13,11):1, (13,12):1, (13,13):0, (14,0):1, (14,1):1, (14,2):1, (14,3):1, (14,4):1, (14,5):1, (14,6):1, (14,7):1, (14,8):1, (14,9):1, (14,10):1, (14,11):1, (14,12):1, (14,13):1, (14,14):0, (15,0):1, (15,1):1, (15,2):1, (15,3):1, (15,4):1, (15,5):1, (15,6):1, (15,7):1, (15,8):1, (15,9):1, (15,10):1, (15,11):1, (15,12):1, (15,13):1, (15,14):1, (15,15):0, (16,0):1, (16,1):1, (16,2):1, (16,3):1, (16,4):1, (16,5):1, (16,6):1, (16,7):1, (16,8):1, (16,9):1, (16,10):1, (16,11):1, (16,12):1, (16,13):1, (16,14):1, (16,15):1, (16,16):0, (17,0):1, (17,1):1, (17,2):1, (17,3):1, (17,4):1, (17,5):1, (17,6):1, (17,7):1, (17,8):1, (17,9):1, (17,10):1, (17,11):1, (17,12):1, (17,13):1, (17,14):1, (17,15):1, (17,16):1, (17,17):0, (18,0):1, (18,1):1, (18,2):1, (18,3):1, (18,4):1, (18,5):1, (18,6):1, (18,7):1, (18,8):1, (18,9):1, (18,10):1, (18,11):1, (18,12):1, (18,13):1, (18,14):1, (18,15):1, (18,16):1, (18,17):1, (18,18):0, (19,0):1, (19,1):1, (19,2):1, (19,3):1, (19,4):1, (19,5):1, (19,6):1, (19,7):1, (19,8):1, (19,9):1, (19,10):1, (19,11):1, (19,12):1, (19,13):1, (19,14):1, (19,15):1, (19,16):1, (19,17):1, (19,18):1, (19,19):0}),
#sim.MapSelector(loci=9, fitness={ (0,0):0, (1,0):1, (1,1):0, (2,0):1, (2,1):1, (2,2):0, (3,0):1, (3,1):1, (3,2):1, (3,3):0, (4,0):1, (4,1):1, (4,2):1, (4,3):1, (4,4):0, (5,0):1, (5,1):1, (5,2):1, (5,3):1, (5,4):1, (5,5):0, (6,0):1, (6,1):1, (6,2):1, (6,3):1, (6,4):1, (6,5):1, (6,6):0, (7,0):1, (7,1):1, (7,2):1, (7,3):1, (7,4):1, (7,5):1, (7,6):1, (7,7):0, (8,0):1, (8,1):1, (8,2):1, (8,3):1, (8,4):1, (8,5):1, (8,6):1, (8,7):1, (8,8):0, (9,0):1, (9,1):1, (9,2):1, (9,3):1, (9,4):1, (9,5):1, (9,6):1, (9,7):1, (9,8):1, (9,9):0, (10,0):1, (10,1):1, (10,2):1, (10,3):1, (10,4):1, (10,5):1, (10,6):1, (10,7):1, (10,8):1, (10,9):1, (10,10):0, (11,0):1, (11,1):1, (11,2):1, (11,3):1, (11,4):1, (11,5):1, (11,6):1, (11,7):1, (11,8):1, (11,9):1, (11,10):1, (11,11):0, (12,0):1, (12,1):1, (12,2):1, (12,3):1, (12,4):1, (12,5):1, (12,6):1, (12,7):1, (12,8):1, (12,9):1, (12,10):1, (12,11):1, (12,12):0, (13,0):1, (13,1):1, (13,2):1, (13,3):1, (13,4):1, (13,5):1, (13,6):1, (13,7):1, (13,8):1, (13,9):1, (13,10):1, (13,11):1, (13,12):1, (13,13):0, (14,0):1, (14,1):1, (14,2):1, (14,3):1, (14,4):1, (14,5):1, (14,6):1, (14,7):1, (14,8):1, (14,9):1, (14,10):1, (14,11):1, (14,12):1, (14,13):1, (14,14):0, (15,0):1, (15,1):1, (15,2):1, (15,3):1, (15,4):1, (15,5):1, (15,6):1, (15,7):1, (15,8):1, (15,9):1, (15,10):1, (15,11):1, (15,12):1, (15,13):1, (15,14):1, (15,15):0, (16,0):1, (16,1):1, (16,2):1, (16,3):1, (16,4):1, (16,5):1, (16,6):1, (16,7):1, (16,8):1, (16,9):1, (16,10):1, (16,11):1, (16,12):1, (16,13):1, (16,14):1, (16,15):1, (16,16):0, (17,0):1, (17,1):1, (17,2):1, (17,3):1, (17,4):1, (17,5):1, (17,6):1, (17,7):1, (17,8):1, (17,9):1, (17,10):1, (17,11):1, (17,12):1, (17,13):1, (17,14):1, (17,15):1, (17,16):1, (17,17):0, (18,0):1, (18,1):1, (18,2):1, (18,3):1, (18,4):1, (18,5):1, (18,6):1, (18,7):1, (18,8):1, (18,9):1, (18,10):1, (18,11):1, (18,12):1, (18,13):1, (18,14):1, (18,15):1, (18,16):1, (18,17):1, (18,18):0, (19,0):1, (19,1):1, (19,2):1, (19,3):1, (19,4):1, (19,5):1, (19,6):1, (19,7):1, (19,8):1, (19,9):1, (19,10):1, (19,11):1, (19,12):1, (19,13):1, (19,14):1, (19,15):1, (19,16):1, (19,17):1, (19,18):1, (19,19):0}),
#sim.MapSelector(loci=10, fitness={ (0,0):0, (1,0):1, (1,1):0, (2,0):1, (2,1):1, (2,2):0, (3,0):1, (3,1):1, (3,2):1, (3,3):0, (4,0):1, (4,1):1, (4,2):1, (4,3):1, (4,4):0, (5,0):1, (5,1):1, (5,2):1, (5,3):1, (5,4):1, (5,5):0, (6,0):1, (6,1):1, (6,2):1, (6,3):1, (6,4):1, (6,5):1, (6,6):0, (7,0):1, (7,1):1, (7,2):1, (7,3):1, (7,4):1, (7,5):1, (7,6):1, (7,7):0, (8,0):1, (8,1):1, (8,2):1, (8,3):1, (8,4):1, (8,5):1, (8,6):1, (8,7):1, (8,8):0, (9,0):1, (9,1):1, (9,2):1, (9,3):1, (9,4):1, (9,5):1, (9,6):1, (9,7):1, (9,8):1, (9,9):0, (10,0):1, (10,1):1, (10,2):1, (10,3):1, (10,4):1, (10,5):1, (10,6):1, (10,7):1, (10,8):1, (10,9):1, (10,10):0, (11,0):1, (11,1):1, (11,2):1, (11,3):1, (11,4):1, (11,5):1, (11,6):1, (11,7):1, (11,8):1, (11,9):1, (11,10):1, (11,11):0, (12,0):1, (12,1):1, (12,2):1, (12,3):1, (12,4):1, (12,5):1, (12,6):1, (12,7):1, (12,8):1, (12,9):1, (12,10):1, (12,11):1, (12,12):0, (13,0):1, (13,1):1, (13,2):1, (13,3):1, (13,4):1, (13,5):1, (13,6):1, (13,7):1, (13,8):1, (13,9):1, (13,10):1, (13,11):1, (13,12):1, (13,13):0, (14,0):1, (14,1):1, (14,2):1, (14,3):1, (14,4):1, (14,5):1, (14,6):1, (14,7):1, (14,8):1, (14,9):1, (14,10):1, (14,11):1, (14,12):1, (14,13):1, (14,14):0, (15,0):1, (15,1):1, (15,2):1, (15,3):1, (15,4):1, (15,5):1, (15,6):1, (15,7):1, (15,8):1, (15,9):1, (15,10):1, (15,11):1, (15,12):1, (15,13):1, (15,14):1, (15,15):0, (16,0):1, (16,1):1, (16,2):1, (16,3):1, (16,4):1, (16,5):1, (16,6):1, (16,7):1, (16,8):1, (16,9):1, (16,10):1, (16,11):1, (16,12):1, (16,13):1, (16,14):1, (16,15):1, (16,16):0, (17,0):1, (17,1):1, (17,2):1, (17,3):1, (17,4):1, (17,5):1, (17,6):1, (17,7):1, (17,8):1, (17,9):1, (17,10):1, (17,11):1, (17,12):1, (17,13):1, (17,14):1, (17,15):1, (17,16):1, (17,17):0, (18,0):1, (18,1):1, (18,2):1, (18,3):1, (18,4):1, (18,5):1, (18,6):1, (18,7):1, (18,8):1, (18,9):1, (18,10):1, (18,11):1, (18,12):1, (18,13):1, (18,14):1, (18,15):1, (18,16):1, (18,17):1, (18,18):0, (19,0):1, (19,1):1, (19,2):1, (19,3):1, (19,4):1, (19,5):1, (19,6):1, (19,7):1, (19,8):1, (19,9):1, (19,10):1, (19,11):1, (19,12):1, (19,13):1, (19,14):1, (19,15):1, (19,16):1, (19,17):1, (19,18):1, (19,19):0}),
#], mode = sim.MULTIPLICATIVE, reps=0),
#sim.MapSelector(loci=0, fitness={ (0,0):0, (1,0):1, (1,1):0, (2,0):1, (2,1):1, (2,2):0, (3,0):1, (3,1):1, (3,2):1, (3,3):0, (4,0):1, (4,1):1, (4,2):1, (4,3):1, (4,4):0, (5,0):1, (5,1):1, (5,2):1, (5,3):1, (5,4):1, (5,5):0, (6,0):1, (6,1):1, (6,2):1, (6,3):1, (6,4):1, (6,5):1, (6,6):0, (7,0):1, (7,1):1, (7,2):1, (7,3):1, (7,4):1, (7,5):1, (7,6):1, (7,7):0, (8,0):1, (8,1):1, (8,2):1, (8,3):1, (8,4):1, (8,5):1, (8,6):1, (8,7):1, (8,8):0, (9,0):1, (9,1):1, (9,2):1, (9,3):1, (9,4):1, (9,5):1, (9,6):1, (9,7):1, (9,8):1, (9,9):0, (10,0):1, (10,1):1, (10,2):1, (10,3):1, (10,4):1, (10,5):1, (10,6):1, (10,7):1, (10,8):1, (10,9):1, (10,10):0, (11,0):1, (11,1):1, (11,2):1, (11,3):1, (11,4):1, (11,5):1, (11,6):1, (11,7):1, (11,8):1, (11,9):1, (11,10):1, (11,11):0, (12,0):1, (12,1):1, (12,2):1, (12,3):1, (12,4):1, (12,5):1, (12,6):1, (12,7):1, (12,8):1, (12,9):1, (12,10):1, (12,11):1, (12,12):0, (13,0):1, (13,1):1, (13,2):1, (13,3):1, (13,4):1, (13,5):1, (13,6):1, (13,7):1, (13,8):1, (13,9):1, (13,10):1, (13,11):1, (13,12):1, (13,13):0, (14,0):1, (14,1):1, (14,2):1, (14,3):1, (14,4):1, (14,5):1, (14,6):1, (14,7):1, (14,8):1, (14,9):1, (14,10):1, (14,11):1, (14,12):1, (14,13):1, (14,14):0, (15,0):1, (15,1):1, (15,2):1, (15,3):1, (15,4):1, (15,5):1, (15,6):1, (15,7):1, (15,8):1, (15,9):1, (15,10):1, (15,11):1, (15,12):1, (15,13):1, (15,14):1, (15,15):0, (16,0):1, (16,1):1, (16,2):1, (16,3):1, (16,4):1, (16,5):1, (16,6):1, (16,7):1, (16,8):1, (16,9):1, (16,10):1, (16,11):1, (16,12):1, (16,13):1, (16,14):1, (16,15):1, (16,16):0, (17,0):1, (17,1):1, (17,2):1, (17,3):1, (17,4):1, (17,5):1, (17,6):1, (17,7):1, (17,8):1, (17,9):1, (17,10):1, (17,11):1, (17,12):1, (17,13):1, (17,14):1, (17,15):1, (17,16):1, (17,17):0, (18,0):1, (18,1):1, (18,2):1, (18,3):1, (18,4):1, (18,5):1, (18,6):1, (18,7):1, (18,8):1, (18,9):1, (18,10):1, (18,11):1, (18,12):1, (18,13):1, (18,14):1, (18,15):1, (18,16):1, (18,17):1, (18,18):0, (19,0):1, (19,1):1, (19,2):1, (19,3):1, (19,4):1, (19,5):1, (19,6):1, (19,7):1, (19,8):1, (19,9):1, (19,10):1, (19,11):1, (19,12):1, (19,13):1, (19,14):1, (19,15):1, (19,16):1, (19,17):1, (19,18):1, (19,19):0}, reps = 2),
#sim.MapSelector(loci=1, fitness={ (0,0):0, (1,0):1, (1,1):0, (2,0):1, (2,1):1, (2,2):0, (3,0):1, (3,1):1, (3,2):1, (3,3):0, (4,0):1, (4,1):1, (4,2):1, (4,3):1, (4,4):0, (5,0):1, (5,1):1, (5,2):1, (5,3):1, (5,4):1, (5,5):0, (6,0):1, (6,1):1, (6,2):1, (6,3):1, (6,4):1, (6,5):1, (6,6):0, (7,0):1, (7,1):1, (7,2):1, (7,3):1, (7,4):1, (7,5):1, (7,6):1, (7,7):0, (8,0):1, (8,1):1, (8,2):1, (8,3):1, (8,4):1, (8,5):1, (8,6):1, (8,7):1, (8,8):0, (9,0):1, (9,1):1, (9,2):1, (9,3):1, (9,4):1, (9,5):1, (9,6):1, (9,7):1, (9,8):1, (9,9):0, (10,0):1, (10,1):1, (10,2):1, (10,3):1, (10,4):1, (10,5):1, (10,6):1, (10,7):1, (10,8):1, (10,9):1, (10,10):0, (11,0):1, (11,1):1, (11,2):1, (11,3):1, (11,4):1, (11,5):1, (11,6):1, (11,7):1, (11,8):1, (11,9):1, (11,10):1, (11,11):0, (12,0):1, (12,1):1, (12,2):1, (12,3):1, (12,4):1, (12,5):1, (12,6):1, (12,7):1, (12,8):1, (12,9):1, (12,10):1, (12,11):1, (12,12):0, (13,0):1, (13,1):1, (13,2):1, (13,3):1, (13,4):1, (13,5):1, (13,6):1, (13,7):1, (13,8):1, (13,9):1, (13,10):1, (13,11):1, (13,12):1, (13,13):0, (14,0):1, (14,1):1, (14,2):1, (14,3):1, (14,4):1, (14,5):1, (14,6):1, (14,7):1, (14,8):1, (14,9):1, (14,10):1, (14,11):1, (14,12):1, (14,13):1, (14,14):0, (15,0):1, (15,1):1, (15,2):1, (15,3):1, (15,4):1, (15,5):1, (15,6):1, (15,7):1, (15,8):1, (15,9):1, (15,10):1, (15,11):1, (15,12):1, (15,13):1, (15,14):1, (15,15):0, (16,0):1, (16,1):1, (16,2):1, (16,3):1, (16,4):1, (16,5):1, (16,6):1, (16,7):1, (16,8):1, (16,9):1, (16,10):1, (16,11):1, (16,12):1, (16,13):1, (16,14):1, (16,15):1, (16,16):0, (17,0):1, (17,1):1, (17,2):1, (17,3):1, (17,4):1, (17,5):1, (17,6):1, (17,7):1, (17,8):1, (17,9):1, (17,10):1, (17,11):1, (17,12):1, (17,13):1, (17,14):1, (17,15):1, (17,16):1, (17,17):0, (18,0):1, (18,1):1, (18,2):1, (18,3):1, (18,4):1, (18,5):1, (18,6):1, (18,7):1, (18,8):1, (18,9):1, (18,10):1, (18,11):1, (18,12):1, (18,13):1, (18,14):1, (18,15):1, (18,16):1, (18,17):1, (18,18):0, (19,0):1, (19,1):1, (19,2):1, (19,3):1, (19,4):1, (19,5):1, (19,6):1, (19,7):1, (19,8):1, (19,9):1, (19,10):1, (19,11):1, (19,12):1, (19,13):1, (19,14):1, (19,15):1, (19,16):1, (19,17):1, (19,18):1, (19,19):0}, reps = 2),
#sim.MapSelector(loci=2, fitness={ (0,0):0, (1,0):1, (1,1):0, (2,0):1, (2,1):1, (2,2):0, (3,0):1, (3,1):1, (3,2):1, (3,3):0, (4,0):1, (4,1):1, (4,2):1, (4,3):1, (4,4):0, (5,0):1, (5,1):1, (5,2):1, (5,3):1, (5,4):1, (5,5):0, (6,0):1, (6,1):1, (6,2):1, (6,3):1, (6,4):1, (6,5):1, (6,6):0, (7,0):1, (7,1):1, (7,2):1, (7,3):1, (7,4):1, (7,5):1, (7,6):1, (7,7):0, (8,0):1, (8,1):1, (8,2):1, (8,3):1, (8,4):1, (8,5):1, (8,6):1, (8,7):1, (8,8):0, (9,0):1, (9,1):1, (9,2):1, (9,3):1, (9,4):1, (9,5):1, (9,6):1, (9,7):1, (9,8):1, (9,9):0, (10,0):1, (10,1):1, (10,2):1, (10,3):1, (10,4):1, (10,5):1, (10,6):1, (10,7):1, (10,8):1, (10,9):1, (10,10):0, (11,0):1, (11,1):1, (11,2):1, (11,3):1, (11,4):1, (11,5):1, (11,6):1, (11,7):1, (11,8):1, (11,9):1, (11,10):1, (11,11):0, (12,0):1, (12,1):1, (12,2):1, (12,3):1, (12,4):1, (12,5):1, (12,6):1, (12,7):1, (12,8):1, (12,9):1, (12,10):1, (12,11):1, (12,12):0, (13,0):1, (13,1):1, (13,2):1, (13,3):1, (13,4):1, (13,5):1, (13,6):1, (13,7):1, (13,8):1, (13,9):1, (13,10):1, (13,11):1, (13,12):1, (13,13):0, (14,0):1, (14,1):1, (14,2):1, (14,3):1, (14,4):1, (14,5):1, (14,6):1, (14,7):1, (14,8):1, (14,9):1, (14,10):1, (14,11):1, (14,12):1, (14,13):1, (14,14):0, (15,0):1, (15,1):1, (15,2):1, (15,3):1, (15,4):1, (15,5):1, (15,6):1, (15,7):1, (15,8):1, (15,9):1, (15,10):1, (15,11):1, (15,12):1, (15,13):1, (15,14):1, (15,15):0, (16,0):1, (16,1):1, (16,2):1, (16,3):1, (16,4):1, (16,5):1, (16,6):1, (16,7):1, (16,8):1, (16,9):1, (16,10):1, (16,11):1, (16,12):1, (16,13):1, (16,14):1, (16,15):1, (16,16):0, (17,0):1, (17,1):1, (17,2):1, (17,3):1, (17,4):1, (17,5):1, (17,6):1, (17,7):1, (17,8):1, (17,9):1, (17,10):1, (17,11):1, (17,12):1, (17,13):1, (17,14):1, (17,15):1, (17,16):1, (17,17):0, (18,0):1, (18,1):1, (18,2):1, (18,3):1, (18,4):1, (18,5):1, (18,6):1, (18,7):1, (18,8):1, (18,9):1, (18,10):1, (18,11):1, (18,12):1, (18,13):1, (18,14):1, (18,15):1, (18,16):1, (18,17):1, (18,18):0, (19,0):1, (19,1):1, (19,2):1, (19,3):1, (19,4):1, (19,5):1, (19,6):1, (19,7):1, (19,8):1, (19,9):1, (19,10):1, (19,11):1, (19,12):1, (19,13):1, (19,14):1, (19,15):1, (19,16):1, (19,17):1, (19,18):1, (19,19):0}, reps = 2),
#sim.MapSelector(loci=3, fitness={ (0,0):0, (1,0):1, (1,1):0, (2,0):1, (2,1):1, (2,2):0, (3,0):1, (3,1):1, (3,2):1, (3,3):0, (4,0):1, (4,1):1, (4,2):1, (4,3):1, (4,4):0, (5,0):1, (5,1):1, (5,2):1, (5,3):1, (5,4):1, (5,5):0, (6,0):1, (6,1):1, (6,2):1, (6,3):1, (6,4):1, (6,5):1, (6,6):0, (7,0):1, (7,1):1, (7,2):1, (7,3):1, (7,4):1, (7,5):1, (7,6):1, (7,7):0, (8,0):1, (8,1):1, (8,2):1, (8,3):1, (8,4):1, (8,5):1, (8,6):1, (8,7):1, (8,8):0, (9,0):1, (9,1):1, (9,2):1, (9,3):1, (9,4):1, (9,5):1, (9,6):1, (9,7):1, (9,8):1, (9,9):0, (10,0):1, (10,1):1, (10,2):1, (10,3):1, (10,4):1, (10,5):1, (10,6):1, (10,7):1, (10,8):1, (10,9):1, (10,10):0, (11,0):1, (11,1):1, (11,2):1, (11,3):1, (11,4):1, (11,5):1, (11,6):1, (11,7):1, (11,8):1, (11,9):1, (11,10):1, (11,11):0, (12,0):1, (12,1):1, (12,2):1, (12,3):1, (12,4):1, (12,5):1, (12,6):1, (12,7):1, (12,8):1, (12,9):1, (12,10):1, (12,11):1, (12,12):0, (13,0):1, (13,1):1, (13,2):1, (13,3):1, (13,4):1, (13,5):1, (13,6):1, (13,7):1, (13,8):1, (13,9):1, (13,10):1, (13,11):1, (13,12):1, (13,13):0, (14,0):1, (14,1):1, (14,2):1, (14,3):1, (14,4):1, (14,5):1, (14,6):1, (14,7):1, (14,8):1, (14,9):1, (14,10):1, (14,11):1, (14,12):1, (14,13):1, (14,14):0, (15,0):1, (15,1):1, (15,2):1, (15,3):1, (15,4):1, (15,5):1, (15,6):1, (15,7):1, (15,8):1, (15,9):1, (15,10):1, (15,11):1, (15,12):1, (15,13):1, (15,14):1, (15,15):0, (16,0):1, (16,1):1, (16,2):1, (16,3):1, (16,4):1, (16,5):1, (16,6):1, (16,7):1, (16,8):1, (16,9):1, (16,10):1, (16,11):1, (16,12):1, (16,13):1, (16,14):1, (16,15):1, (16,16):0, (17,0):1, (17,1):1, (17,2):1, (17,3):1, (17,4):1, (17,5):1, (17,6):1, (17,7):1, (17,8):1, (17,9):1, (17,10):1, (17,11):1, (17,12):1, (17,13):1, (17,14):1, (17,15):1, (17,16):1, (17,17):0, (18,0):1, (18,1):1, (18,2):1, (18,3):1, (18,4):1, (18,5):1, (18,6):1, (18,7):1, (18,8):1, (18,9):1, (18,10):1, (18,11):1, (18,12):1, (18,13):1, (18,14):1, (18,15):1, (18,16):1, (18,17):1, (18,18):0, (19,0):1, (19,1):1, (19,2):1, (19,3):1, (19,4):1, (19,5):1, (19,6):1, (19,7):1, (19,8):1, (19,9):1, (19,10):1, (19,11):1, (19,12):1, (19,13):1, (19,14):1, (19,15):1, (19,16):1, (19,17):1, (19,18):1, (19,19):0}, reps = 2),
#sim.MapSelector(loci=4, fitness={ (0,0):0, (1,0):1, (1,1):0, (2,0):1, (2,1):1, (2,2):0, (3,0):1, (3,1):1, (3,2):1, (3,3):0, (4,0):1, (4,1):1, (4,2):1, (4,3):1, (4,4):0, (5,0):1, (5,1):1, (5,2):1, (5,3):1, (5,4):1, (5,5):0, (6,0):1, (6,1):1, (6,2):1, (6,3):1, (6,4):1, (6,5):1, (6,6):0, (7,0):1, (7,1):1, (7,2):1, (7,3):1, (7,4):1, (7,5):1, (7,6):1, (7,7):0, (8,0):1, (8,1):1, (8,2):1, (8,3):1, (8,4):1, (8,5):1, (8,6):1, (8,7):1, (8,8):0, (9,0):1, (9,1):1, (9,2):1, (9,3):1, (9,4):1, (9,5):1, (9,6):1, (9,7):1, (9,8):1, (9,9):0, (10,0):1, (10,1):1, (10,2):1, (10,3):1, (10,4):1, (10,5):1, (10,6):1, (10,7):1, (10,8):1, (10,9):1, (10,10):0, (11,0):1, (11,1):1, (11,2):1, (11,3):1, (11,4):1, (11,5):1, (11,6):1, (11,7):1, (11,8):1, (11,9):1, (11,10):1, (11,11):0, (12,0):1, (12,1):1, (12,2):1, (12,3):1, (12,4):1, (12,5):1, (12,6):1, (12,7):1, (12,8):1, (12,9):1, (12,10):1, (12,11):1, (12,12):0, (13,0):1, (13,1):1, (13,2):1, (13,3):1, (13,4):1, (13,5):1, (13,6):1, (13,7):1, (13,8):1, (13,9):1, (13,10):1, (13,11):1, (13,12):1, (13,13):0, (14,0):1, (14,1):1, (14,2):1, (14,3):1, (14,4):1, (14,5):1, (14,6):1, (14,7):1, (14,8):1, (14,9):1, (14,10):1, (14,11):1, (14,12):1, (14,13):1, (14,14):0, (15,0):1, (15,1):1, (15,2):1, (15,3):1, (15,4):1, (15,5):1, (15,6):1, (15,7):1, (15,8):1, (15,9):1, (15,10):1, (15,11):1, (15,12):1, (15,13):1, (15,14):1, (15,15):0, (16,0):1, (16,1):1, (16,2):1, (16,3):1, (16,4):1, (16,5):1, (16,6):1, (16,7):1, (16,8):1, (16,9):1, (16,10):1, (16,11):1, (16,12):1, (16,13):1, (16,14):1, (16,15):1, (16,16):0, (17,0):1, (17,1):1, (17,2):1, (17,3):1, (17,4):1, (17,5):1, (17,6):1, (17,7):1, (17,8):1, (17,9):1, (17,10):1, (17,11):1, (17,12):1, (17,13):1, (17,14):1, (17,15):1, (17,16):1, (17,17):0, (18,0):1, (18,1):1, (18,2):1, (18,3):1, (18,4):1, (18,5):1, (18,6):1, (18,7):1, (18,8):1, (18,9):1, (18,10):1, (18,11):1, (18,12):1, (18,13):1, (18,14):1, (18,15):1, (18,16):1, (18,17):1, (18,18):0, (19,0):1, (19,1):1, (19,2):1, (19,3):1, (19,4):1, (19,5):1, (19,6):1, (19,7):1, (19,8):1, (19,9):1, (19,10):1, (19,11):1, (19,12):1, (19,13):1, (19,14):1, (19,15):1, (19,16):1, (19,17):1, (19,18):1, (19,19):0}, reps = 2),
#sim.MapSelector(loci=5, fitness={ (0,0):0, (1,0):1, (1,1):0, (2,0):1, (2,1):1, (2,2):0, (3,0):1, (3,1):1, (3,2):1, (3,3):0, (4,0):1, (4,1):1, (4,2):1, (4,3):1, (4,4):0, (5,0):1, (5,1):1, (5,2):1, (5,3):1, (5,4):1, (5,5):0, (6,0):1, (6,1):1, (6,2):1, (6,3):1, (6,4):1, (6,5):1, (6,6):0, (7,0):1, (7,1):1, (7,2):1, (7,3):1, (7,4):1, (7,5):1, (7,6):1, (7,7):0, (8,0):1, (8,1):1, (8,2):1, (8,3):1, (8,4):1, (8,5):1, (8,6):1, (8,7):1, (8,8):0, (9,0):1, (9,1):1, (9,2):1, (9,3):1, (9,4):1, (9,5):1, (9,6):1, (9,7):1, (9,8):1, (9,9):0, (10,0):1, (10,1):1, (10,2):1, (10,3):1, (10,4):1, (10,5):1, (10,6):1, (10,7):1, (10,8):1, (10,9):1, (10,10):0, (11,0):1, (11,1):1, (11,2):1, (11,3):1, (11,4):1, (11,5):1, (11,6):1, (11,7):1, (11,8):1, (11,9):1, (11,10):1, (11,11):0, (12,0):1, (12,1):1, (12,2):1, (12,3):1, (12,4):1, (12,5):1, (12,6):1, (12,7):1, (12,8):1, (12,9):1, (12,10):1, (12,11):1, (12,12):0, (13,0):1, (13,1):1, (13,2):1, (13,3):1, (13,4):1, (13,5):1, (13,6):1, (13,7):1, (13,8):1, (13,9):1, (13,10):1, (13,11):1, (13,12):1, (13,13):0, (14,0):1, (14,1):1, (14,2):1, (14,3):1, (14,4):1, (14,5):1, (14,6):1, (14,7):1, (14,8):1, (14,9):1, (14,10):1, (14,11):1, (14,12):1, (14,13):1, (14,14):0, (15,0):1, (15,1):1, (15,2):1, (15,3):1, (15,4):1, (15,5):1, (15,6):1, (15,7):1, (15,8):1, (15,9):1, (15,10):1, (15,11):1, (15,12):1, (15,13):1, (15,14):1, (15,15):0, (16,0):1, (16,1):1, (16,2):1, (16,3):1, (16,4):1, (16,5):1, (16,6):1, (16,7):1, (16,8):1, (16,9):1, (16,10):1, (16,11):1, (16,12):1, (16,13):1, (16,14):1, (16,15):1, (16,16):0, (17,0):1, (17,1):1, (17,2):1, (17,3):1, (17,4):1, (17,5):1, (17,6):1, (17,7):1, (17,8):1, (17,9):1, (17,10):1, (17,11):1, (17,12):1, (17,13):1, (17,14):1, (17,15):1, (17,16):1, (17,17):0, (18,0):1, (18,1):1, (18,2):1, (18,3):1, (18,4):1, (18,5):1, (18,6):1, (18,7):1, (18,8):1, (18,9):1, (18,10):1, (18,11):1, (18,12):1, (18,13):1, (18,14):1, (18,15):1, (18,16):1, (18,17):1, (18,18):0, (19,0):1, (19,1):1, (19,2):1, (19,3):1, (19,4):1, (19,5):1, (19,6):1, (19,7):1, (19,8):1, (19,9):1, (19,10):1, (19,11):1, (19,12):1, (19,13):1, (19,14):1, (19,15):1, (19,16):1, (19,17):1, (19,18):1, (19,19):0}, reps = 2),
#sim.MapSelector(loci=6, fitness={ (0,0):0, (1,0):1, (1,1):0, (2,0):1, (2,1):1, (2,2):0, (3,0):1, (3,1):1, (3,2):1, (3,3):0, (4,0):1, (4,1):1, (4,2):1, (4,3):1, (4,4):0, (5,0):1, (5,1):1, (5,2):1, (5,3):1, (5,4):1, (5,5):0, (6,0):1, (6,1):1, (6,2):1, (6,3):1, (6,4):1, (6,5):1, (6,6):0, (7,0):1, (7,1):1, (7,2):1, (7,3):1, (7,4):1, (7,5):1, (7,6):1, (7,7):0, (8,0):1, (8,1):1, (8,2):1, (8,3):1, (8,4):1, (8,5):1, (8,6):1, (8,7):1, (8,8):0, (9,0):1, (9,1):1, (9,2):1, (9,3):1, (9,4):1, (9,5):1, (9,6):1, (9,7):1, (9,8):1, (9,9):0, (10,0):1, (10,1):1, (10,2):1, (10,3):1, (10,4):1, (10,5):1, (10,6):1, (10,7):1, (10,8):1, (10,9):1, (10,10):0, (11,0):1, (11,1):1, (11,2):1, (11,3):1, (11,4):1, (11,5):1, (11,6):1, (11,7):1, (11,8):1, (11,9):1, (11,10):1, (11,11):0, (12,0):1, (12,1):1, (12,2):1, (12,3):1, (12,4):1, (12,5):1, (12,6):1, (12,7):1, (12,8):1, (12,9):1, (12,10):1, (12,11):1, (12,12):0, (13,0):1, (13,1):1, (13,2):1, (13,3):1, (13,4):1, (13,5):1, (13,6):1, (13,7):1, (13,8):1, (13,9):1, (13,10):1, (13,11):1, (13,12):1, (13,13):0, (14,0):1, (14,1):1, (14,2):1, (14,3):1, (14,4):1, (14,5):1, (14,6):1, (14,7):1, (14,8):1, (14,9):1, (14,10):1, (14,11):1, (14,12):1, (14,13):1, (14,14):0, (15,0):1, (15,1):1, (15,2):1, (15,3):1, (15,4):1, (15,5):1, (15,6):1, (15,7):1, (15,8):1, (15,9):1, (15,10):1, (15,11):1, (15,12):1, (15,13):1, (15,14):1, (15,15):0, (16,0):1, (16,1):1, (16,2):1, (16,3):1, (16,4):1, (16,5):1, (16,6):1, (16,7):1, (16,8):1, (16,9):1, (16,10):1, (16,11):1, (16,12):1, (16,13):1, (16,14):1, (16,15):1, (16,16):0, (17,0):1, (17,1):1, (17,2):1, (17,3):1, (17,4):1, (17,5):1, (17,6):1, (17,7):1, (17,8):1, (17,9):1, (17,10):1, (17,11):1, (17,12):1, (17,13):1, (17,14):1, (17,15):1, (17,16):1, (17,17):0, (18,0):1, (18,1):1, (18,2):1, (18,3):1, (18,4):1, (18,5):1, (18,6):1, (18,7):1, (18,8):1, (18,9):1, (18,10):1, (18,11):1, (18,12):1, (18,13):1, (18,14):1, (18,15):1, (18,16):1, (18,17):1, (18,18):0, (19,0):1, (19,1):1, (19,2):1, (19,3):1, (19,4):1, (19,5):1, (19,6):1, (19,7):1, (19,8):1, (19,9):1, (19,10):1, (19,11):1, (19,12):1, (19,13):1, (19,14):1, (19,15):1, (19,16):1, (19,17):1, (19,18):1, (19,19):0}, reps = 2),
#sim.MapSelector(loci=7, fitness={ (0,0):0, (1,0):1, (1,1):0, (2,0):1, (2,1):1, (2,2):0, (3,0):1, (3,1):1, (3,2):1, (3,3):0, (4,0):1, (4,1):1, (4,2):1, (4,3):1, (4,4):0, (5,0):1, (5,1):1, (5,2):1, (5,3):1, (5,4):1, (5,5):0, (6,0):1, (6,1):1, (6,2):1, (6,3):1, (6,4):1, (6,5):1, (6,6):0, (7,0):1, (7,1):1, (7,2):1, (7,3):1, (7,4):1, (7,5):1, (7,6):1, (7,7):0, (8,0):1, (8,1):1, (8,2):1, (8,3):1, (8,4):1, (8,5):1, (8,6):1, (8,7):1, (8,8):0, (9,0):1, (9,1):1, (9,2):1, (9,3):1, (9,4):1, (9,5):1, (9,6):1, (9,7):1, (9,8):1, (9,9):0, (10,0):1, (10,1):1, (10,2):1, (10,3):1, (10,4):1, (10,5):1, (10,6):1, (10,7):1, (10,8):1, (10,9):1, (10,10):0, (11,0):1, (11,1):1, (11,2):1, (11,3):1, (11,4):1, (11,5):1, (11,6):1, (11,7):1, (11,8):1, (11,9):1, (11,10):1, (11,11):0, (12,0):1, (12,1):1, (12,2):1, (12,3):1, (12,4):1, (12,5):1, (12,6):1, (12,7):1, (12,8):1, (12,9):1, (12,10):1, (12,11):1, (12,12):0, (13,0):1, (13,1):1, (13,2):1, (13,3):1, (13,4):1, (13,5):1, (13,6):1, (13,7):1, (13,8):1, (13,9):1, (13,10):1, (13,11):1, (13,12):1, (13,13):0, (14,0):1, (14,1):1, (14,2):1, (14,3):1, (14,4):1, (14,5):1, (14,6):1, (14,7):1, (14,8):1, (14,9):1, (14,10):1, (14,11):1, (14,12):1, (14,13):1, (14,14):0, (15,0):1, (15,1):1, (15,2):1, (15,3):1, (15,4):1, (15,5):1, (15,6):1, (15,7):1, (15,8):1, (15,9):1, (15,10):1, (15,11):1, (15,12):1, (15,13):1, (15,14):1, (15,15):0, (16,0):1, (16,1):1, (16,2):1, (16,3):1, (16,4):1, (16,5):1, (16,6):1, (16,7):1, (16,8):1, (16,9):1, (16,10):1, (16,11):1, (16,12):1, (16,13):1, (16,14):1, (16,15):1, (16,16):0, (17,0):1, (17,1):1, (17,2):1, (17,3):1, (17,4):1, (17,5):1, (17,6):1, (17,7):1, (17,8):1, (17,9):1, (17,10):1, (17,11):1, (17,12):1, (17,13):1, (17,14):1, (17,15):1, (17,16):1, (17,17):0, (18,0):1, (18,1):1, (18,2):1, (18,3):1, (18,4):1, (18,5):1, (18,6):1, (18,7):1, (18,8):1, (18,9):1, (18,10):1, (18,11):1, (18,12):1, (18,13):1, (18,14):1, (18,15):1, (18,16):1, (18,17):1, (18,18):0, (19,0):1, (19,1):1, (19,2):1, (19,3):1, (19,4):1, (19,5):1, (19,6):1, (19,7):1, (19,8):1, (19,9):1, (19,10):1, (19,11):1, (19,12):1, (19,13):1, (19,14):1, (19,15):1, (19,16):1, (19,17):1, (19,18):1, (19,19):0}, reps = 2),
#sim.MapSelector(loci=8, fitness={ (0,0):0, (1,0):1, (1,1):0, (2,0):1, (2,1):1, (2,2):0, (3,0):1, (3,1):1, (3,2):1, (3,3):0, (4,0):1, (4,1):1, (4,2):1, (4,3):1, (4,4):0, (5,0):1, (5,1):1, (5,2):1, (5,3):1, (5,4):1, (5,5):0, (6,0):1, (6,1):1, (6,2):1, (6,3):1, (6,4):1, (6,5):1, (6,6):0, (7,0):1, (7,1):1, (7,2):1, (7,3):1, (7,4):1, (7,5):1, (7,6):1, (7,7):0, (8,0):1, (8,1):1, (8,2):1, (8,3):1, (8,4):1, (8,5):1, (8,6):1, (8,7):1, (8,8):0, (9,0):1, (9,1):1, (9,2):1, (9,3):1, (9,4):1, (9,5):1, (9,6):1, (9,7):1, (9,8):1, (9,9):0, (10,0):1, (10,1):1, (10,2):1, (10,3):1, (10,4):1, (10,5):1, (10,6):1, (10,7):1, (10,8):1, (10,9):1, (10,10):0, (11,0):1, (11,1):1, (11,2):1, (11,3):1, (11,4):1, (11,5):1, (11,6):1, (11,7):1, (11,8):1, (11,9):1, (11,10):1, (11,11):0, (12,0):1, (12,1):1, (12,2):1, (12,3):1, (12,4):1, (12,5):1, (12,6):1, (12,7):1, (12,8):1, (12,9):1, (12,10):1, (12,11):1, (12,12):0, (13,0):1, (13,1):1, (13,2):1, (13,3):1, (13,4):1, (13,5):1, (13,6):1, (13,7):1, (13,8):1, (13,9):1, (13,10):1, (13,11):1, (13,12):1, (13,13):0, (14,0):1, (14,1):1, (14,2):1, (14,3):1, (14,4):1, (14,5):1, (14,6):1, (14,7):1, (14,8):1, (14,9):1, (14,10):1, (14,11):1, (14,12):1, (14,13):1, (14,14):0, (15,0):1, (15,1):1, (15,2):1, (15,3):1, (15,4):1, (15,5):1, (15,6):1, (15,7):1, (15,8):1, (15,9):1, (15,10):1, (15,11):1, (15,12):1, (15,13):1, (15,14):1, (15,15):0, (16,0):1, (16,1):1, (16,2):1, (16,3):1, (16,4):1, (16,5):1, (16,6):1, (16,7):1, (16,8):1, (16,9):1, (16,10):1, (16,11):1, (16,12):1, (16,13):1, (16,14):1, (16,15):1, (16,16):0, (17,0):1, (17,1):1, (17,2):1, (17,3):1, (17,4):1, (17,5):1, (17,6):1, (17,7):1, (17,8):1, (17,9):1, (17,10):1, (17,11):1, (17,12):1, (17,13):1, (17,14):1, (17,15):1, (17,16):1, (17,17):0, (18,0):1, (18,1):1, (18,2):1, (18,3):1, (18,4):1, (18,5):1, (18,6):1, (18,7):1, (18,8):1, (18,9):1, (18,10):1, (18,11):1, (18,12):1, (18,13):1, (18,14):1, (18,15):1, (18,16):1, (18,17):1, (18,18):0, (19,0):1, (19,1):1, (19,2):1, (19,3):1, (19,4):1, (19,5):1, (19,6):1, (19,7):1, (19,8):1, (19,9):1, (19,10):1, (19,11):1, (19,12):1, (19,13):1, (19,14):1, (19,15):1, (19,16):1, (19,17):1, (19,18):1, (19,19):0}, reps = 2),
#sim.MapSelector(loci=9, fitness={ (0,0):0, (1,0):1, (1,1):0, (2,0):1, (2,1):1, (2,2):0, (3,0):1, (3,1):1, (3,2):1, (3,3):0, (4,0):1, (4,1):1, (4,2):1, (4,3):1, (4,4):0, (5,0):1, (5,1):1, (5,2):1, (5,3):1, (5,4):1, (5,5):0, (6,0):1, (6,1):1, (6,2):1, (6,3):1, (6,4):1, (6,5):1, (6,6):0, (7,0):1, (7,1):1, (7,2):1, (7,3):1, (7,4):1, (7,5):1, (7,6):1, (7,7):0, (8,0):1, (8,1):1, (8,2):1, (8,3):1, (8,4):1, (8,5):1, (8,6):1, (8,7):1, (8,8):0, (9,0):1, (9,1):1, (9,2):1, (9,3):1, (9,4):1, (9,5):1, (9,6):1, (9,7):1, (9,8):1, (9,9):0, (10,0):1, (10,1):1, (10,2):1, (10,3):1, (10,4):1, (10,5):1, (10,6):1, (10,7):1, (10,8):1, (10,9):1, (10,10):0, (11,0):1, (11,1):1, (11,2):1, (11,3):1, (11,4):1, (11,5):1, (11,6):1, (11,7):1, (11,8):1, (11,9):1, (11,10):1, (11,11):0, (12,0):1, (12,1):1, (12,2):1, (12,3):1, (12,4):1, (12,5):1, (12,6):1, (12,7):1, (12,8):1, (12,9):1, (12,10):1, (12,11):1, (12,12):0, (13,0):1, (13,1):1, (13,2):1, (13,3):1, (13,4):1, (13,5):1, (13,6):1, (13,7):1, (13,8):1, (13,9):1, (13,10):1, (13,11):1, (13,12):1, (13,13):0, (14,0):1, (14,1):1, (14,2):1, (14,3):1, (14,4):1, (14,5):1, (14,6):1, (14,7):1, (14,8):1, (14,9):1, (14,10):1, (14,11):1, (14,12):1, (14,13):1, (14,14):0, (15,0):1, (15,1):1, (15,2):1, (15,3):1, (15,4):1, (15,5):1, (15,6):1, (15,7):1, (15,8):1, (15,9):1, (15,10):1, (15,11):1, (15,12):1, (15,13):1, (15,14):1, (15,15):0, (16,0):1, (16,1):1, (16,2):1, (16,3):1, (16,4):1, (16,5):1, (16,6):1, (16,7):1, (16,8):1, (16,9):1, (16,10):1, (16,11):1, (16,12):1, (16,13):1, (16,14):1, (16,15):1, (16,16):0, (17,0):1, (17,1):1, (17,2):1, (17,3):1, (17,4):1, (17,5):1, (17,6):1, (17,7):1, (17,8):1, (17,9):1, (17,10):1, (17,11):1, (17,12):1, (17,13):1, (17,14):1, (17,15):1, (17,16):1, (17,17):0, (18,0):1, (18,1):1, (18,2):1, (18,3):1, (18,4):1, (18,5):1, (18,6):1, (18,7):1, (18,8):1, (18,9):1, (18,10):1, (18,11):1, (18,12):1, (18,13):1, (18,14):1, (18,15):1, (18,16):1, (18,17):1, (18,18):0, (19,0):1, (19,1):1, (19,2):1, (19,3):1, (19,4):1, (19,5):1, (19,6):1, (19,7):1, (19,8):1, (19,9):1, (19,10):1, (19,11):1, (19,12):1, (19,13):1, (19,14):1, (19,15):1, (19,16):1, (19,17):1, (19,18):1, (19,19):0}, reps = 2),
#sim.MapSelector(loci=10, fitness={ (0,0):0, (1,0):1, (1,1):0, (2,0):1, (2,1):1, (2,2):0, (3,0):1, (3,1):1, (3,2):1, (3,3):0, (4,0):1, (4,1):1, (4,2):1, (4,3):1, (4,4):0, (5,0):1, (5,1):1, (5,2):1, (5,3):1, (5,4):1, (5,5):0, (6,0):1, (6,1):1, (6,2):1, (6,3):1, (6,4):1, (6,5):1, (6,6):0, (7,0):1, (7,1):1, (7,2):1, (7,3):1, (7,4):1, (7,5):1, (7,6):1, (7,7):0, (8,0):1, (8,1):1, (8,2):1, (8,3):1, (8,4):1, (8,5):1, (8,6):1, (8,7):1, (8,8):0, (9,0):1, (9,1):1, (9,2):1, (9,3):1, (9,4):1, (9,5):1, (9,6):1, (9,7):1, (9,8):1, (9,9):0, (10,0):1, (10,1):1, (10,2):1, (10,3):1, (10,4):1, (10,5):1, (10,6):1, (10,7):1, (10,8):1, (10,9):1, (10,10):0, (11,0):1, (11,1):1, (11,2):1, (11,3):1, (11,4):1, (11,5):1, (11,6):1, (11,7):1, (11,8):1, (11,9):1, (11,10):1, (11,11):0, (12,0):1, (12,1):1, (12,2):1, (12,3):1, (12,4):1, (12,5):1, (12,6):1, (12,7):1, (12,8):1, (12,9):1, (12,10):1, (12,11):1, (12,12):0, (13,0):1, (13,1):1, (13,2):1, (13,3):1, (13,4):1, (13,5):1, (13,6):1, (13,7):1, (13,8):1, (13,9):1, (13,10):1, (13,11):1, (13,12):1, (13,13):0, (14,0):1, (14,1):1, (14,2):1, (14,3):1, (14,4):1, (14,5):1, (14,6):1, (14,7):1, (14,8):1, (14,9):1, (14,10):1, (14,11):1, (14,12):1, (14,13):1, (14,14):0, (15,0):1, (15,1):1, (15,2):1, (15,3):1, (15,4):1, (15,5):1, (15,6):1, (15,7):1, (15,8):1, (15,9):1, (15,10):1, (15,11):1, (15,12):1, (15,13):1, (15,14):1, (15,15):0, (16,0):1, (16,1):1, (16,2):1, (16,3):1, (16,4):1, (16,5):1, (16,6):1, (16,7):1, (16,8):1, (16,9):1, (16,10):1, (16,11):1, (16,12):1, (16,13):1, (16,14):1, (16,15):1, (16,16):0, (17,0):1, (17,1):1, (17,2):1, (17,3):1, (17,4):1, (17,5):1, (17,6):1, (17,7):1, (17,8):1, (17,9):1, (17,10):1, (17,11):1, (17,12):1, (17,13):1, (17,14):1, (17,15):1, (17,16):1, (17,17):0, (18,0):1, (18,1):1, (18,2):1, (18,3):1, (18,4):1, (18,5):1, (18,6):1, (18,7):1, (18,8):1, (18,9):1, (18,10):1, (18,11):1, (18,12):1, (18,13):1, (18,14):1, (18,15):1, (18,16):1, (18,17):1, (18,18):0, (19,0):1, (19,1):1, (19,2):1, (19,3):1, (19,4):1, (19,5):1, (19,6):1, (19,7):1, (19,8):1, (19,9):1, (19,10):1, (19,11):1, (19,12):1, (19,13):1, (19,14):1, (19,15):1, (19,16):1, (19,17):1, (19,18):1, (19,19):0}, reps = 2)
#],
matingScheme=sim.RandomMating(sexMode=(GLOBAL_SEQUENCE_OF_SEX, MALE, FEMALE),ops=[
sim.MendelianGenoTransmitter(),
sim.MapSelector(loci=0, fitness={ (0,0):0.6, (1,0):0.5, (1,1):0.6, (2,0):0.5, (2,1):0.5, (2,2):0.6, (3,0):0.5, (3,1):0.5, (3,2):0.5, (3,3):0.6, (4,0):0.5, (4,1):0.5, (4,2):0.5, (4,3):0.5, (4,4):0.6, (5,0):0.5, (5,1):0.5, (5,2):0.5, (5,3):0.5, (5,4):0.5, (5,5):0.6, (6,0):0.5, (6,1):0.5, (6,2):0.5, (6,3):0.5, (6,4):0.5, (6,5):0.5, (6,6):0.6, (7,0):0.5, (7,1):0.5, (7,2):0.5, (7,3):0.5, (7,4):0.5, (7,5):0.5, (7,6):0.5, (7,7):0.6, (8,0):0.5, (8,1):0.5, (8,2):0.5, (8,3):0.5, (8,4):0.5, (8,5):0.5, (8,6):0.5, (8,7):0.5, (8,8):0.6, (9,0):0.5, (9,1):0.5, (9,2):0.5, (9,3):0.5, (9,4):0.5, (9,5):0.5, (9,6):0.5, (9,7):0.5, (9,8):0.5, (9,9):0.6}),
sim.MapSelector(loci=1, fitness={ (0,0):0.6, (1,0):0.5, (1,1):0.6, (2,0):0.5, (2,1):0.5, (2,2):0.6, (3,0):0.5, (3,1):0.5, (3,2):0.5, (3,3):0.6, (4,0):0.5, (4,1):0.5, (4,2):0.5, (4,3):0.5, (4,4):0.6, (5,0):0.5, (5,1):0.5, (5,2):0.5, (5,3):0.5, (5,4):0.5, (5,5):0.6, (6,0):0.5, (6,1):0.5, (6,2):0.5, (6,3):0.5, (6,4):0.5, (6,5):0.5, (6,6):0.6, (7,0):0.5, (7,1):0.5, (7,2):0.5, (7,3):0.5, (7,4):0.5, (7,5):0.5, (7,6):0.5, (7,7):0.6, (8,0):0.5, (8,1):0.5, (8,2):0.5, (8,3):0.5, (8,4):0.5, (8,5):0.5, (8,6):0.5, (8,7):0.5, (8,8):0.6, (9,0):0.5, (9,1):0.5, (9,2):0.5, (9,3):0.5, (9,4):0.5, (9,5):0.5, (9,6):0.5, (9,7):0.5, (9,8):0.5, (9,9):0.6}),
sim.MapSelector(loci=2, fitness={ (0,0):0.6, (1,0):0.5, (1,1):0.6, (2,0):0.5, (2,1):0.5, (2,2):0.6, (3,0):0.5, (3,1):0.5, (3,2):0.5, (3,3):0.6, (4,0):0.5, (4,1):0.5, (4,2):0.5, (4,3):0.5, (4,4):0.6, (5,0):0.5, (5,1):0.5, (5,2):0.5, (5,3):0.5, (5,4):0.5, (5,5):0.6, (6,0):0.5, (6,1):0.5, (6,2):0.5, (6,3):0.5, (6,4):0.5, (6,5):0.5, (6,6):0.6, (7,0):0.5, (7,1):0.5, (7,2):0.5, (7,3):0.5, (7,4):0.5, (7,5):0.5, (7,6):0.5, (7,7):0.6, (8,0):0.5, (8,1):0.5, (8,2):0.5, (8,3):0.5, (8,4):0.5, (8,5):0.5, (8,6):0.5, (8,7):0.5, (8,8):0.6, (9,0):0.5, (9,1):0.5, (9,2):0.5, (9,3):0.5, (9,4):0.5, (9,5):0.5, (9,6):0.5, (9,7):0.5, (9,8):0.5, (9,9):0.6}),
sim.MapSelector(loci=3, fitness={ (0,0):0.6, (1,0):0.5, (1,1):0.6, (2,0):0.5, (2,1):0.5, (2,2):0.6, (3,0):0.5, (3,1):0.5, (3,2):0.5, (3,3):0.6, (4,0):0.5, (4,1):0.5, (4,2):0.5, (4,3):0.5, (4,4):0.6, (5,0):0.5, (5,1):0.5, (5,2):0.5, (5,3):0.5, (5,4):0.5, (5,5):0.6, (6,0):0.5, (6,1):0.5, (6,2):0.5, (6,3):0.5, (6,4):0.5, (6,5):0.5, (6,6):0.6, (7,0):0.5, (7,1):0.5, (7,2):0.5, (7,3):0.5, (7,4):0.5, (7,5):0.5, (7,6):0.5, (7,7):0.6, (8,0):0.5, (8,1):0.5, (8,2):0.5, (8,3):0.5, (8,4):0.5, (8,5):0.5, (8,6):0.5, (8,7):0.5, (8,8):0.6, (9,0):0.5, (9,1):0.5, (9,2):0.5, (9,3):0.5, (9,4):0.5, (9,5):0.5, (9,6):0.5, (9,7):0.5, (9,8):0.5, (9,9):0.6}),
sim.MapSelector(loci=4, fitness={ (0,0):0.6, (1,0):0.5, (1,1):0.6, (2,0):0.5, (2,1):0.5, (2,2):0.6, (3,0):0.5, (3,1):0.5, (3,2):0.5, (3,3):0.6, (4,0):0.5, (4,1):0.5, (4,2):0.5, (4,3):0.5, (4,4):0.6, (5,0):0.5, (5,1):0.5, (5,2):0.5, (5,3):0.5, (5,4):0.5, (5,5):0.6, (6,0):0.5, (6,1):0.5, (6,2):0.5, (6,3):0.5, (6,4):0.5, (6,5):0.5, (6,6):0.6, (7,0):0.5, (7,1):0.5, (7,2):0.5, (7,3):0.5, (7,4):0.5, (7,5):0.5, (7,6):0.5, (7,7):0.6, (8,0):0.5, (8,1):0.5, (8,2):0.5, (8,3):0.5, (8,4):0.5, (8,5):0.5, (8,6):0.5, (8,7):0.5, (8,8):0.6, (9,0):0.5, (9,1):0.5, (9,2):0.5, (9,3):0.5, (9,4):0.5, (9,5):0.5, (9,6):0.5, (9,7):0.5, (9,8):0.5, (9,9):0.6}),
]),
#ops=[
# sim.MendelianGenoTransmitter(),
# sim.MapSelector(loci=0, fitness={(0,0):0, (1,0):1, (1,1):0, (2,0):1, (2,1):1, (2,2):0, (3,0):1, (3,1):1, (3,2):1, (3,3):0, (4,0):1, (4,1):1, (4,2):1, (4,3):1, (4,4):0, (5,0):1, (5,1):1, (5,2):1, (5,3):1, (5,4):1, (5,5):0, (6,0):1, (6,1):1, (6,2):1, (6,3):1, (6,4):1, (6,5):1, (6,6):0, (7,0):1, (7,1):1, (7,2):1, (7,3):1, (7,4):1, (7,5):1, (7,6):1, (7,7):0, (8,0):1, (8,1):1, (8,2):1, (8,3):1, (8,4):1, (8,5):1, (8,6):1, (8,7):1, (8,8):0, (9,0):1, (9,1):1, (9,2):1, (9,3):1, (9,4):1, (9,5):1, (9,6):1, (9,7):1, (9,8):1, (9,9):0}),
# sim.MapSelector(loci=1, fitness={(0,0):0, (1,0):1, (1,1):0, (2,0):1, (2,1):1, (2,2):0, (3,0):1, (3,1):1, (3,2):1, (3,3):0, (4,0):1, (4,1):1, (4,2):1, (4,3):1, (4,4):0, (5,0):1, (5,1):1, (5,2):1, (5,3):1, (5,4):1, (5,5):0, (6,0):1, (6,1):1, (6,2):1, (6,3):1, (6,4):1, (6,5):1, (6,6):0, (7,0):1, (7,1):1, (7,2):1, (7,3):1, (7,4):1, (7,5):1, (7,6):1, (7,7):0, (8,0):1, (8,1):1, (8,2):1, (8,3):1, (8,4):1, (8,5):1, (8,6):1, (8,7):1, (8,8):0, (9,0):1, (9,1):1, (9,2):1, (9,3):1, (9,4):1, (9,5):1, (9,6):1, (9,7):1, (9,8):1, (9,9):0}),
# sim.MapSelector(loci=2, fitness={(0,0):0, (1,0):1, (1,1):0, (2,0):1, (2,1):1, (2,2):0, (3,0):1, (3,1):1, (3,2):1, (3,3):0, (4,0):1, (4,1):1, (4,2):1, (4,3):1, (4,4):0, (5,0):1, (5,1):1, (5,2):1, (5,3):1, (5,4):1, (5,5):0, (6,0):1, (6,1):1, (6,2):1, (6,3):1, (6,4):1, (6,5):1, (6,6):0, (7,0):1, (7,1):1, (7,2):1, (7,3):1, (7,4):1, (7,5):1, (7,6):1, (7,7):0, (8,0):1, (8,1):1, (8,2):1, (8,3):1, (8,4):1, (8,5):1, (8,6):1, (8,7):1, (8,8):0, (9,0):1, (9,1):1, (9,2):1, (9,3):1, (9,4):1, (9,5):1, (9,6):1, (9,7):1, (9,8):1, (9,9):0}),
# sim.MapSelector(loci=3, fitness={(0,0):0, (1,0):1, (1,1):0, (2,0):1, (2,1):1, (2,2):0, (3,0):1, (3,1):1, (3,2):1, (3,3):0, (4,0):1, (4,1):1, (4,2):1, (4,3):1, (4,4):0, (5,0):1, (5,1):1, (5,2):1, (5,3):1, (5,4):1, (5,5):0, (6,0):1, (6,1):1, (6,2):1, (6,3):1, (6,4):1, (6,5):1, (6,6):0, (7,0):1, (7,1):1, (7,2):1, (7,3):1, (7,4):1, (7,5):1, (7,6):1, (7,7):0, (8,0):1, (8,1):1, (8,2):1, (8,3):1, (8,4):1, (8,5):1, (8,6):1, (8,7):1, (8,8):0, (9,0):1, (9,1):1, (9,2):1, (9,3):1, (9,4):1, (9,5):1, (9,6):1, (9,7):1, (9,8):1, (9,9):0}),
# sim.MapSelector(loci=4, fitness={(0,0):0, (1,0):1, (1,1):0, (2,0):1, (2,1):1, (2,2):0, (3,0):1, (3,1):1, (3,2):1, (3,3):0, (4,0):1, (4,1):1, (4,2):1, (4,3):1, (4,4):0, (5,0):1, (5,1):1, (5,2):1, (5,3):1, (5,4):1, (5,5):0, (6,0):1, (6,1):1, (6,2):1, (6,3):1, (6,4):1, (6,5):1, (6,6):0, (7,0):1, (7,1):1, (7,2):1, (7,3):1, (7,4):1, (7,5):1, (7,6):1, (7,7):0, (8,0):1, (8,1):1, (8,2):1, (8,3):1, (8,4):1, (8,5):1, (8,6):1, (8,7):1, (8,8):0, (9,0):1, (9,1):1, (9,2):1, (9,3):1, (9,4):1, (9,5):1, (9,6):1, (9,7):1, (9,8):1, (9,9):0}),
# sim.MapSelector(loci=5, fitness={(0,0):0, (1,0):1, (1,1):0, (2,0):1, (2,1):1, (2,2):0, (3,0):1, (3,1):1, (3,2):1, (3,3):0, (4,0):1, (4,1):1, (4,2):1, (4,3):1, (4,4):0, (5,0):1, (5,1):1, (5,2):1, (5,3):1, (5,4):1, (5,5):0, (6,0):1, (6,1):1, (6,2):1, (6,3):1, (6,4):1, (6,5):1, (6,6):0, (7,0):1, (7,1):1, (7,2):1, (7,3):1, (7,4):1, (7,5):1, (7,6):1, (7,7):0, (8,0):1, (8,1):1, (8,2):1, (8,3):1, (8,4):1, (8,5):1, (8,6):1, (8,7):1, (8,8):0, (9,0):1, (9,1):1, (9,2):1, (9,3):1, (9,4):1, (9,5):1, (9,6):1, (9,7):1, (9,8):1, (9,9):0}),
# sim.MapSelector(loci=6, fitness={(0,0):0, (1,0):1, (1,1):0, (2,0):1, (2,1):1, (2,2):0, (3,0):1, (3,1):1, (3,2):1, (3,3):0, (4,0):1, (4,1):1, (4,2):1, (4,3):1, (4,4):0, (5,0):1, (5,1):1, (5,2):1, (5,3):1, (5,4):1, (5,5):0, (6,0):1, (6,1):1, (6,2):1, (6,3):1, (6,4):1, (6,5):1, (6,6):0, (7,0):1, (7,1):1, (7,2):1, (7,3):1, (7,4):1, (7,5):1, (7,6):1, (7,7):0, (8,0):1, (8,1):1, (8,2):1, (8,3):1, (8,4):1, (8,5):1, (8,6):1, (8,7):1, (8,8):0, (9,0):1, (9,1):1, (9,2):1, (9,3):1, (9,4):1, (9,5):1, (9,6):1, (9,7):1, (9,8):1, (9,9):0}),
# sim.MapSelector(loci=7, fitness={(0,0):0, (1,0):1, (1,1):0, (2,0):1, (2,1):1, (2,2):0, (3,0):1, (3,1):1, (3,2):1, (3,3):0, (4,0):1, (4,1):1, (4,2):1, (4,3):1, (4,4):0, (5,0):1, (5,1):1, (5,2):1, (5,3):1, (5,4):1, (5,5):0, (6,0):1, (6,1):1, (6,2):1, (6,3):1, (6,4):1, (6,5):1, (6,6):0, (7,0):1, (7,1):1, (7,2):1, (7,3):1, (7,4):1, (7,5):1, (7,6):1, (7,7):0, (8,0):1, (8,1):1, (8,2):1, (8,3):1, (8,4):1, (8,5):1, (8,6):1, (8,7):1, (8,8):0, (9,0):1, (9,1):1, (9,2):1, (9,3):1, (9,4):1, (9,5):1, (9,6):1, (9,7):1, (9,8):1, (9,9):0}),
# sim.MapSelector(loci=8, fitness={(0,0):0, (1,0):1, (1,1):0, (2,0):1, (2,1):1, (2,2):0, (3,0):1, (3,1):1, (3,2):1, (3,3):0, (4,0):1, (4,1):1, (4,2):1, (4,3):1, (4,4):0, (5,0):1, (5,1):1, (5,2):1, (5,3):1, (5,4):1, (5,5):0, (6,0):1, (6,1):1, (6,2):1, (6,3):1, (6,4):1, (6,5):1, (6,6):0, (7,0):1, (7,1):1, (7,2):1, (7,3):1, (7,4):1, (7,5):1, (7,6):1, (7,7):0, (8,0):1, (8,1):1, (8,2):1, (8,3):1, (8,4):1, (8,5):1, (8,6):1, (8,7):1, (8,8):0, (9,0):1, (9,1):1, (9,2):1, (9,3):1, (9,4):1, (9,5):1, (9,6):1, (9,7):1, (9,8):1, (9,9):0}),
# sim.MapSelector(loci=9, fitness={(0,0):0, (1,0):1, (1,1):0, (2,0):1, (2,1):1, (2,2):0, (3,0):1, (3,1):1, (3,2):1, (3,3):0, (4,0):1, (4,1):1, (4,2):1, (4,3):1, (4,4):0, (5,0):1, (5,1):1, (5,2):1, (5,3):1, (5,4):1, (5,5):0, (6,0):1, (6,1):1, (6,2):1, (6,3):1, (6,4):1, (6,5):1, (6,6):0, (7,0):1, (7,1):1, (7,2):1, (7,3):1, (7,4):1, (7,5):1, (7,6):1, (7,7):0, (8,0):1, (8,1):1, (8,2):1, (8,3):1, (8,4):1, (8,5):1, (8,6):1, (8,7):1, (8,8):0, (9,0):1, (9,1):1, (9,2):1, (9,3):1, (9,4):1, (9,5):1, (9,6):1, (9,7):1, (9,8):1, (9,9):0}),
# sim.MapSelector(loci=10, fitness={(0,0):0, (1,0):1, (1,1):0, (2,0):1, (2,1):1, (2,2):0, (3,0):1, (3,1):1, (3,2):1, (3,3):0, (4,0):1, (4,1):1, (4,2):1, (4,3):1, (4,4):0, (5,0):1, (5,1):1, (5,2):1, (5,3):1, (5,4):1, (5,5):0, (6,0):1, (6,1):1, (6,2):1, (6,3):1, (6,4):1, (6,5):1, (6,6):0, (7,0):1, (7,1):1, (7,2):1, (7,3):1, (7,4):1, (7,5):1, (7,6):1, (7,7):0, (8,0):1, (8,1):1, (8,2):1, (8,3):1, (8,4):1, (8,5):1, (8,6):1, (8,7):1, (8,8):0, (9,0):1, (9,1):1, (9,2):1, (9,3):1, (9,4):1, (9,5):1, (9,6):1, (9,7):1, (9,8):1, (9,9):0})
# ]),
gen=g+1
)
#bill wants 50 pop 5loc 10 alllele
#1 even 1 uneven 1 inbred 1 random gucci gang
result = AbundData(pop, PopSize, NoOfAlleles)
stat(pop, alleleFreq=ALL_AVAIL, subPops= ALL_AVAIL)
#result2 = AbundData(pop, PopSize, NoOfAlleles)
#print("one",result)
#result1 = PresAbsData(result2)
#print("two",result)
export(pop, format='CSV', output='genepopfile.csv')
y = measureFIS('genepopfile.csv', NoOfLoci)
#maxent = MaxEnt('genepopfile.csv', NoOfLoci)
FullList = ['MSHomo 5 Loci' ,g, A,PopSize, NoOfAlleles, NoOfLoci,GabesLocEstimateMEAN(result),GabesLocEstimateMEDIAN(result),GabesLocEstimate(result), y, AverageShan(result)[0], AverageShan(result)[1], AverageShan(result)[2], AverageShan(result)[3], AverageShan(result)[4],CountFixedLoci(pop,10,5),AverageRich(result)[0],AverageRich(result)[1],AverageRich(result)[2],AverageHet(result)[0],AverageHet(result)[1],AverageHet(result)[2],BillsMath(PresAbsData(result))]
#x = BillsMath(result1)
#for i in range(len(x)):
# FullList.append(x[i])
#Subpop = drawRandomSample(pop, sizes = 10)
#Subresult = AbundData(Subpop, 10, NoOfAlleles)
#Subresult2 = AbundData(Subpop, 10, NoOfAlleles)
#Subresult1 = PresAbsData(Subresult2)
#export(Subpop, format='CSV', output='genepopfile.csv')
#y = measureFIS('genepopfile.csv', NoOfLoci)
#SubList = [y, AverageShan(Subresult)[0], AverageShan(Subresult)[1], AverageShan(Subresult)[2], AverageShan(Subresult)[3]]
#x = BillsMath(Subresult1)
#for i in range(len(x)):
# SubList.append(x[i])
with open("DATAsequence1.csv", "a", newline='') as fp:
wr = csv.writer(fp, dialect='excel')
#wr.writerow(FullList+SubList)
wr.writerow(FullList)
FullList = []
print(g)
|
[
"noreply@github.com"
] |
GabeDO.noreply@github.com
|
15b1d79b4a5413c75a757829dfff8bdaa3899f82
|
3602b9578aff41164321195552d1558e17b372f7
|
/Tema3/Tema3Py/Tem3.py
|
2fd233799805ad8c4cccf706f1c059aa0548394a
|
[] |
no_license
|
alexluca97/NumericalCalculation
|
fa860da351bc8c7f7b0c81eb299c2c8f7d13cd7d
|
0000dd48c73db8b66e1808210e8cc5e34f5885c2
|
refs/heads/master
| 2020-05-29T17:49:18.650024
| 2019-05-29T16:27:38
| 2019-05-29T16:27:38
| 189,284,033
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,942
|
py
|
#! /usr/bin/env python
import logging
EPS = 10 ** -10
LOG_LEVEL = logging.DEBUG
SIMPLE = False
S = ""
if SIMPLE:
S = "s"
FILE_A = "{}a.txt".format(S)
FILE_B = "{}b.txt".format(S)
FILE_APB = "{}aplusb.txt".format(S)
FILE_AOB = "{}aorib.txt".format(S)
def is_zero(nr):
if abs(nr) < EPS:
return True
return False
def reader(path):
with open(path) as stream:
for line in stream:
idx = line.find("#")
if idx != -1:
line = line[:idx]
line = line.strip()
if line:
yield line
class Matrix(object):
def __init__(self, dim):
self.dim = dim
self.diagonal = []
self.values = []
self.zindex = {}
self._raw = {}
self.length = None
def put(self, val, lin, col):
if is_zero(val):
return
value = self._raw.setdefault(lin, {}).setdefault(col, 0) + val
self._raw[lin][col] = value
def finish(self, check_lengths=False):
zeros = 0
for lin in sorted(self._raw):
addzero = True
cols = self._raw[lin]
cols_len = len(cols)
if check_lengths and cols_len > 10:
logging.warn("%d elemente pe linie", cols_len)
for col, val in cols.items():
if lin == col:
# Fill diagonal.
miss = lin - len(self.diagonal)
self.diagonal.extend([0] * miss)
self.diagonal.append(val)
else:
# Fill the values vector.
miss = max(lin - zeros, 0)
zeros = lin + 1
self.values.extend([
(-idx, 0) for idx in
range(lin - miss, lin)
])
if addzero:
addzero = False
self.values.append((-lin, 0))
self.zindex[lin] = len(self.values)
self.values.append((col, val))
self.diagonal.extend([0] * (self.dim - len(self.diagonal)))
self.length = len(self.values)
self._raw.clear()
def has_line(self, lin):
return lin in self.zindex
def get_line(self, lin, sparse=False):
values = []
val = self.diagonal[lin]
if val:
values.append((lin, val))
idx = self.zindex.get(lin)
if idx is not None:
while idx < self.length:
col, val = self.values[idx]
idx += 1
if not val:
break # s-a terminat linia curenta
values.append((col, val))
if sparse:
return dict(values)
line = [0] * self.dim
for col, val in values:
line[col] = val
return line
def __repr__(self):
return "<Matrix {}>".format(self.dim)
def __str__(self):
return "<Matrix {} {}>".format(len(self.diagonal), len(self.values))
def __getitem__(self, pos):
lin, col = pos
if lin == col:
return self.diagonal[lin]
idx = self.zindex.get(lin)
if idx is None:
return 0 # nu avem elemente pe aceasta linie
while idx < self.length:
_col, val = self.values[idx]
idx += 1
if not val:
break # s-a terminat linia curenta
if col == _col:
return val
return 0 # nu am gasit coloana cautata
def vec_sum(a, b):
alen, blen = len(a), len(b)
diff = abs(alen - blen)
if diff:
ext = [0] * diff
if alen < blen:
a.extend(ext)
else:
b.extend(ext)
r = []
for idx, elem in enumerate(a):
r.append(elem + b[idx])
return r
def mat_sum(a, b):
assert a.dim == b.dim
r = Matrix(a.dim)
for lin in range(r.dim):
aline = a.get_line(lin)
bline = b.get_line(lin)
rline = map(sum, zip(aline, bline))
for col, val in enumerate(rline):
r.put(val, lin, col)
r.finish()
return r
def mat_mul(a, b):
"""Multiply A with B^t."""
assert a.dim == b.dim
r = Matrix(a.dim)
# Cache-uim liniile transpuse (coloanele) matricii B.
blines = {}
for blin in range(r.dim):
bline = b.get_line(blin, sparse=True)
blines[blin] = bline
# Incepem inmultirea folosind doar linii chele.
for alin in range(r.dim):
aline = a.get_line(alin, sparse=True)
for blin in range(r.dim):
bline = blines[blin]
rline = []
for col, val in aline.items():
_val = bline.get(col)
if _val:
rline.append(val * _val)
value = sum(rline)
r.put(value, alin, blin)
r.finish()
return r
def mat_vec_mul(a, b):
assert a.dim == len(b)
r = [0] * a.dim
# Incepem inmultirea folosind doar linii chele.
for alin in range(a.dim):
aline = a.get_line(alin, sparse=True)
bline = b
rline = []
for col, val in aline.items():
_val = bline[col]
if _val:
rline.append(val * _val)
value = sum(rline)
r[alin] = value
return r
def mat_equal(a, b):
assert a.dim == b.dim
for lin in range(a.dim):
aline = a.get_line(lin)
bline = b.get_line(lin)
if aline != bline:
return False
return True
def read_vecmat(path, name="unknown", trans=False, check_lengths=False):
# Citim vectorul.
info = reader(path)
dim = int(next(info))
vec = []
mat = Matrix(dim)
for _ in range(dim):
vec.append(float(next(info)))
logging.debug("%s vector length: %d", name, len(vec))
# Citim matricea si o post-procesam.
count = 0
for row in info:
count += 1
chunks = map(str.strip, row.split(","))
val, (lin, col) = float(chunks[0]), map(int, chunks[1:])
if trans: # salvam transpus
lin, col = col, lin
mat.put(val, lin, col)
logging.debug("%s matrix entries: %d", name, count)
mat.finish(check_lengths=check_lengths)
logging.debug("%s matrix: %r %s", name, mat, mat)
return vec, mat
def main():
logging.basicConfig(level=LOG_LEVEL)
# Citim datele de intrare.
avec, amat = read_vecmat(FILE_A, name="A", check_lengths=True)
bvec, bmat = read_vecmat(FILE_B, name="B", check_lengths=True)
_, tbmat = read_vecmat(FILE_B, name="TB", trans=True) # B transpus
# Si cele de iesire.
pvec, pmat = read_vecmat(FILE_APB, name="P")
ovec, omat = read_vecmat(FILE_AOB, name="O")
# Verificari pentru depanare.
assert amat[0, 0] == 141.5
assert amat[26, 26] == 134.5
assert amat[1625, 750] == 48
assert amat[1625, 749] == 0
assert bmat[0, 0] == 302.5
assert bmat[26, 26] == 119.5
assert bmat[1625, 750] == 0
assert bmat[1625, 749] == 0
# # Adunam efectiv vectorii.
# rvec = vec_sum(avec, bvec)
# logging.info("R vector length: %d", len(rvec))
# logging.info("R vector[0]: %f", rvec[0])
# logging.info("R vector[-1]: %f", rvec[-1])
# # Si facem verificarea.
# assert pvec == rvec
# Adunam matricile.
rmat = mat_sum(amat, bmat)
logging.info("R plus matrix: %r %s", rmat, rmat)
# Si le verificam continutul.
assert mat_equal(pmat, rmat)
# Inmultim matricile.
rmat = mat_mul(amat, tbmat)
logging.info("R ori matrix: %r %s", rmat, rmat)
# Si le verificam continutul.
assert mat_equal(omat, rmat)
# Inmultirea matricii cu un vector.
xvec = list(map(float, range(amat.dim, 0, -1)))
rvec = mat_vec_mul(amat, xvec)
logging.info("R ori vector[0]: %f", rvec[0])
assert avec == rvec
if __name__ == "__main__":
main()
|
[
"alexluca97@yahoo.com"
] |
alexluca97@yahoo.com
|
552e6864ed906978a3323265f588d54aaf4335a7
|
7958988b3f968a185bd35a068a45b0dfbe4a6784
|
/lessons/ranges.py
|
3356e75019a1f5c998279dd58d8f2e9c973529a8
|
[] |
no_license
|
dami-akinbi/python3-netninja
|
1a4385fb64d11b98c41fdaaa1ecbe1b460e44e1a
|
3cd50f1ee2a6a9e596304ed1cbb86a2d160f0be2
|
refs/heads/master
| 2023-05-29T01:37:50.438058
| 2021-06-11T16:19:33
| 2021-06-11T16:19:33
| 371,915,301
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 470
|
py
|
# for n in range(5):
# print(n)
# for t in range(3, 10):
# print(t)
# for m in range(0, 20, 2):
# if m == 0:
# m += 1
# continue
# print(m)
# if m == 16:
# break
# burgers = ['beef', 'chicken', 'veg', 'supreme', 'double']
# for n in range(len(burgers)):
# print(f"{n} - {burgers[n]}")
burgers = ['beef', 'chicken', 'veg', 'supreme', 'double']
for n in range(len(burgers) - 1, -1, -1):
print(f"{n} - {burgers[n]}")
|
[
"akinbidamilola074@gmail.com"
] |
akinbidamilola074@gmail.com
|
a47c90bf0042b881314f2aa28967269df9b8346e
|
9623652ee727ef9f9d8ebef86d40c8239d0c7fbe
|
/tf_neural_pixel.py
|
3efb81be57d8527700297868adfcb6ec43b6c3db
|
[] |
no_license
|
R-Freitas/Master-Thesis
|
a48798b16fdc88311def443430fb2269124d48da
|
7744b22a1469c6984b82c9e90429c958621a1d2b
|
refs/heads/master
| 2020-03-24T00:25:02.527458
| 2019-01-06T18:10:29
| 2019-01-06T18:10:29
| 141,737,647
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,902
|
py
|
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
from keras.utils import to_categorical
from keras.utils import np_utils
#Optimizar libraries
from hyperopt import Trials, STATUS_OK, tpe
from hyperas import optim
from hyperas.distributions import choice, uniform
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
import pickle
import os
import scipy.io as sio
import progressbar
import copy
import time
from sklearn import preprocessing
from sklearn import model_selection
#--------------------------GLLOBAL VARIABLES------------------------------
FRAME_SIZE = 140
#-------------------------------------------------------------------------
def set_class(x):
if x == 'G1':
return 0
elif ('S' in x and 'G1' in x):
return 1
elif x == 'S':
return 3
elif ('S' in x and 'G2' in x):
return 2
elif x == 'G2':
return 4
else:
return 5
def generate_save_data():
count_files=0
count_cells = 0
dir = os.getcwd()
dirs=[]
dirs.append(dir)
for dir in dirs:
print("DIR: ",dir)
cells=[]
i=0 #Used to transverse the cells list when printing the images
for roots, dirs, files in os.walk(dir):
for file in files:
if file.endswith('.mat'):
path = os.path.realpath(os.path.join(roots,file))
print("PATH: ",path)
data = (sio.loadmat(path,struct_as_record=True))['storage']
for case in data:
count_cells += 1
if (set_class(case['CellCycle'][0][0]) < 3):
cells.append(Cell_Info(np.matrix(case['Mask'][0]),case['CellCycle'][0][0]))
count_files += 1
"""
#Routine used to print all cells from a mat file as an image
fig=plt.figure(frameon=False)
final_mask=np.zeros_like(cells[0].Matrix)
for index in range(i,len(cells)):
final_mask += cells[index].Matrix
i += 1
plt.imshow(final_mask, cmap='Blues',interpolation='nearest')
plt.show()
"""
print(count_files, "file(s) found")
print(count_cells, "cell(s) found,", len(cells), "cell(s) used")
"""
#Routine used to determine the maximum cell size and thus choose an
#appropriate input size (in this case 140x140)
pix_size=[]
for cell in cells:
pix_size.append([(cell.y_max-cell.y_min),(cell.x_max-cell.x_min)])
pix_size=np.array(pix_size)
print(np.amax(pix_size,axis=0))
"""
"""
#Routine used to check if all information is correct
print('=================================================')
for i in range(10):
print("Y:",cells[i].y_min,cells[i].y_max)#Y min and max
print("X:",cells[i].x_min,cells[i].x_max)#X min and max
print(cells[i].Intensity)
print(cells[i].Area)
print(cells[i].CellCycle)
print(cells[i].Class)
print('=================================================')
"""
#With all the cells cells in a list, and an input size chosen it is
#time to create the input for the neural network itself
treated_cells=[]
for cell in cells:
S_mask=np.zeros((FRAME_SIZE,FRAME_SIZE))
y_diff = cell.y_max - cell.y_min
x_diff = cell.x_max - cell.x_min
if (y_diff > FRAME_SIZE or x_diff > FRAME_SIZE):
print("Impossible to fit cell, please increase frame size")
else:
y_offset = int((FRAME_SIZE-y_diff)/2)
x_offset = int((FRAME_SIZE-x_diff)/2)
S_mask[y_offset:y_diff+y_offset+1,x_offset:x_diff+x_offset+1] = cell.Matrix[cell.y_min : cell.y_max+1, cell.x_min:cell.x_max+1]
treated_cells.append(Cell_Info(S_mask.astype(float),cell.CellCycle))
del cells
#data = np.array([(cell.Matrix) for cell in treated_cells])
#labels = to_categorical(np.array([(int(cell.Class)) for cell in treated_cells]),num_classes=3)
data_G1=np.array([(cell.Matrix) for cell in treated_cells if cell.Class==0])
data_S=np.array([(cell.Matrix) for cell in treated_cells if cell.Class==1])
data_G2=np.array([(cell.Matrix) for cell in treated_cells if cell.Class==2])
labels_G1=np.empty(len(data_G1))
labels_G1.fill(0)
labels_G1 = to_categorical(labels_G1,num_classes=3)
labels_S=np.empty(len(data_S))
labels_S.fill(1)
labels_S = to_categorical(labels_S,num_classes=3)
labels_G2=np.empty(len(data_G2))
labels_G2.fill(2)
labels_G2 = to_categorical(labels_G2,num_classes=3)
labels=np.vstack((labels_G1,labels_S,labels_G2))
data = np.vstack((data_G1,data_S,data_G2))
data=data/255.0
del treated_cells
print("Data points used: ", len(data))
train_data, test_data, train_labels, test_labels = model_selection.train_test_split(data, labels, shuffle=True, test_size=0.10)
test_data, validate_data, test_labels, validate_labels = model_selection.train_test_split(test_data, test_labels, shuffle=True, test_size=0.50)
with open("/Users/Rafa/Google Drive/Faculdade/Tese/Projecto/Treated_Data/pickled_cells_pixel.pkl", "bw") as fh:
data = (train_data,
test_data,
train_labels,
test_labels,
validate_data,
validate_labels)
pickle.dump(data, fh)
def load_data():
with open("/Users/Rafa/Google Drive/Faculdade/Tese/Projecto/Treated_Data/pickled_cells_pixel.pkl", "br") as fh:
data = pickle.load(fh)
x_train = data[0]
x_test = data[1]
y_train = data[2]
y_test = data[3]
x_validation = data[4]
y_validation = data[5]
return x_train, y_train, x_test, y_test
def load_validation():
with open("/Users/Rafa/Google Drive/Faculdade/Tese/Projecto/Treated_Data/pickled_cells_pixel.pkl", "br") as fh:
data = pickle.load(fh)
x_train = data[0]
x_test = data[1]
y_train = data[2]
y_test = data[3]
x_validation = data[4]
y_validation = data[5]
return x_validation, y_validation
def optimize_model(x_train, y_train, x_test, y_test):
"""
Model providing function:
Create Keras model with double curly brackets dropped-in as needed.
Return value has to be a valid python dictionary with two customary keys:
- loss: Specify a numeric evaluation metric to be minimized
- status: Just use STATUS_OK and see hyperopt documentation if not feasible
The last one is optional, though recommended, namely:
- model: specify the model just created so that we can later use it again.
"""
callbacks = [keras.callbacks.EarlyStopping(monitor='val_acc', patience=0)]
model = keras.Sequential()
model.add(keras.layers.Flatten(input_shape=(140, 140)))
for i in (range(20)):
model.add(keras.layers.Dense(1000, activation=tf.nn.relu,use_bias=True))
model.add(keras.layers.Dropout({{uniform(0,0.5)}}))
model.add(keras.layers.Dense(3,activation=tf.nn.softmax))
model.compile(optimizer=keras.optimizers.Adam(lr=0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train,
epochs=100,
batch_size=50,
shuffle=True,
validation_data=(x_test, y_test),
verbose=1)
x_validation, y_validation=load_validation()
score, acc = model.evaluate(x_validation, y_validation)
score, acc = model.evaluate(x_test, y_test, verbose=0)
print('Test accuracy:', acc)
return {'loss': -acc, 'status': STATUS_OK, 'model': model}
def create_model(x_train, y_train, x_test, y_test):
model = keras.Sequential()
model.add(keras.layers.Flatten(input_shape=(140, 140)))
for i in (range(20)):
model.add(keras.layers.Dense(1000, activation=tf.nn.relu,use_bias=True))
model.add(keras.layers.Dropout(0.1))
model.add(keras.layers.Dense(3,activation=tf.nn.softmax))
callbacks = [keras.callbacks.EarlyStopping(monitor='val_loss', patience=2,mode='auto')]
model.compile(optimizer=keras.optimizers.Adam(lr=0.00001),
loss='categorical_crossentropy',
metrics=['accuracy'])
history=model.fit(x_train, y_train,
epochs=100,
shuffle=True,
batch_size=50,
validation_data=(x_test, y_test))
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
# "bo" is for "blue dot"
plt.plot(epochs, acc, 'bo', label='Training accuracy')
# b is for "solid blue line"
plt.plot(epochs, loss, 'b', label='Training loss')
# "ro" is for "blue dot"
plt.plot(epochs, val_acc, 'ro', label='Validation accuracy')
# r is for "solid blue line"
plt.plot(epochs, val_loss, 'r', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
x_validation, y_validation=load_validation()
score, acc = model.evaluate(x_validation, y_validation)
print('Test accuracy:', acc)
return {'loss': -acc, 'status': STATUS_OK, 'model': model}
class Cell_Info:
def __init__(self,Matrix,CellCycle):
self.Matrix = Matrix
self.Index = np.asarray(np.where(Matrix>0)).T
self.y_min = min(self.Index[:,0])
self.y_max = max(self.Index[:,0])
self.x_min = min(self.Index[:,1])
self.x_max = max(self.Index[:,1])
self.Area = np.count_nonzero(self.Matrix)
self.Intensity = self.Matrix.sum()
self.CellCycle = str(CellCycle)
self.Class = set_class(str(CellCycle))
new_data=0
if (new_data==1) :
generate_save_data()
else:
optimizing=0
if optimizing:
best_run, best_model = optim.minimize(model=optimize_model,
data=load_data,
algo=tpe.suggest,
max_evals=3,
trials=Trials())
X_train, Y_train, X_test, Y_test = load_data()
print("Evalutati on of best performing model:")
print(best_model.evaluate(X_test,Y_test))
print("Best performing model chosen hyper-parameters:")
print(best_run)
else:
X_train, Y_train, X_test, Y_test = load_data()
result, status, best_model = create_model(X_train, Y_train, X_test, Y_test)
|
[
"rafaeljcfreitas@gmail.com"
] |
rafaeljcfreitas@gmail.com
|
c5f68b68f733b910d6129fe7a7f9324e3c9c89e4
|
3d40c0e62a0e7ada9e3e396f7aa581f39184165a
|
/go/main_board.py
|
eed5f5a9cfe0010aa238ddb2e19f6983f220d3b8
|
[] |
no_license
|
kyorohiro/my_dlgo_memo
|
87590162631ff46c6a95eec728746a4a946de0bf
|
2ae3bc3f0e653a04d12f89bdb0404f65a5c12b6c
|
refs/heads/master
| 2020-05-30T15:58:04.931505
| 2019-06-24T16:50:00
| 2019-06-24T16:50:00
| 189,834,985
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 693
|
py
|
import sys
from go import Board,BoardState
def main():
index:int = 0
board:Board = Board(9,9)
while True:
sys.stdout.flush()
sys.stdout.write(str(board))
sys.stdout.write("Input A1 - I9 or ! \r\n")
input:str = sys.stdin.readline()
if(0 <= input.find("!")):
break
else:
extracted = Board.extractRowCol(input)
row:int = extracted[0]
col:int = extracted[1]
if (0<= row and row <=9) and (0<= col and col <=9):
board.setState(row, col, BoardState.BLACK if index%2==0 else BoardState.WHITE)
index+=1
pass
if __name__ == "__main__":
main()
|
[
"kyorohiro@gmail.com"
] |
kyorohiro@gmail.com
|
626563e7f5d26e2470a314f5d15ba914c78964fe
|
0b501e422ffbe2a525ae275ecf0387134f9bbc83
|
/ScopeResolution/newscanner/cases/oldAllDiv.py
|
5143eb6df97cc94982f52986f1049c9929d813e5
|
[] |
no_license
|
saroj31/Python-Interpreter-CPSC8270
|
2c5136991d86f376c7d00caca8de64c202b3d0ff
|
a4e00e28edb77abc4ad27b38561480c85a915019
|
refs/heads/master
| 2021-01-20T04:12:00.077907
| 2017-04-28T02:25:09
| 2017-04-28T02:25:09
| 89,657,396
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 124
|
py
|
print 4/5
print 5/3
print 5/1
print 4/2/1
print 8270000/10/(5*2)/((5*2**2)/2)
print 5//2
print 5//-2
print -5//2
print 5/0
|
[
"Saroj Dash"
] |
Saroj Dash
|
64dc80169d6224e49fc44b7b7d3d15d14a21807a
|
faabe34af6297530617395bcc6811350765da847
|
/platforms/hacking-coding-interview/PermutationWithDups.py
|
1ea371457e280492c1595742d4664f228873d15c
|
[] |
no_license
|
pqnguyen/CompetitiveProgramming
|
44a542aea299bd553dd022a9e737e087285b8b6d
|
27330e7ff79c4ac883d7e1fcdf2f0d30939c3f78
|
refs/heads/master
| 2021-07-21T12:15:47.366599
| 2021-06-27T14:58:48
| 2021-06-27T14:58:48
| 132,837,831
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,027
|
py
|
# Cracking the coding interview - 8.8
# Permutations with Duplicates: Write a method to compute all permutations of a string
# whose characters are not necessarily unique. The list of permutations should not have duplicates.
from collections import defaultdict
class Solution:
def permutation(self, a):
freq = self.count_freq(a)
results = []
self.permutation_util(a, freq, results, [])
print(results)
return results
def permutation_util(self, a, freq, results, current):
if len(current) == len(a):
results.append(current[:])
return
for key in freq.keys():
if freq[key]:
freq[key] -= 1
current.append(key)
self.permutation_util(a, freq, results, current)
current.pop()
freq[key] += 1
def count_freq(self, a):
freq = defaultdict(int)
for e in a: freq[e] += 1
return freq
a = [1, 1, 2, 3]
Solution().permutation(a)
|
[
"pqnguyen1996@gmail.com"
] |
pqnguyen1996@gmail.com
|
62be7ad5f06bad7d48b0c923c2b1e2a141b124ee
|
3753f0f09329263ea785c5394b37b98fba0d233f
|
/aviate/aviate/migrations/0004_auto_20211030_1248.py
|
44697f7e5b53aea12a3e22f633f14f1ea12d2c0b
|
[] |
no_license
|
vinayvivek90/portal
|
2d50c4ce8ce0091fc65198398e5aa8a8acc07f1e
|
72d9821a8eee1b12fd0a4d35bcba7b17ebf5a522
|
refs/heads/master
| 2023-09-02T11:09:04.550913
| 2021-10-31T13:06:12
| 2021-10-31T13:06:12
| 423,147,561
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 602
|
py
|
# Generated by Django 3.2.8 on 2021-10-30 12:48
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('aviate', '0003_resume_uploaded'),
]
operations = [
migrations.AlterField(
model_name='resume',
name='resume',
field=models.FileField(upload_to=''),
),
migrations.AlterField(
model_name='resume',
name='uploaded',
field=models.DateTimeField(default=datetime.datetime(2021, 10, 30, 12, 48, 19, 396159)),
),
]
|
[
"vinayvivek90@gmail.com"
] |
vinayvivek90@gmail.com
|
122f76fb0e7c8802bf3c1cfb70e7d2a2535d4cf4
|
1919fc2555dbcb6b865fdef0cc44c56c6c47e2f0
|
/chapter_14/demo_14_8.py
|
19772561a53ce16fcb9aabe0271f8e2aca646494
|
[] |
no_license
|
ender8848/the_fluent_python
|
10f8dd98fcf206b04ea6d34f47ad5e35f896a3ac
|
058d59f6a11da34e23deb228e24a160d907f7643
|
refs/heads/master
| 2022-12-19T07:51:51.908127
| 2019-01-17T09:53:33
| 2019-01-17T09:53:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,263
|
py
|
'''
示例 14-8 演示了一个简单的生成器表达式,并且与列表推导做了对比。
示例 14-8 先在列表推导中使用 gen_AB 生成器函数,然后在生成器表达式中使用
'''
def gen_AB():
print('start')
yield 'A'
print('continue')
yield 'B'
print('end.')
# 列表推导迫切地迭代 gen_AB() 函数生成的生成器对象产出的元素:'A' 和 'B'。注意,下面的输出是 start、continue 和 end.
res1 = [x*3 for x in gen_AB()]
'''
start
continue
end.
'''
# 这个 for 循环迭代列表推导生成的 res1 列表
for i in res1:
print('-->', i)
'''
--> AAA
--> BBB
'''
# 把生成器表达式返回的值赋值给 res2。只需调用 gen_AB() 函数,虽然调用时会返回一个生成器,但是这里并不使用
res2 = (x*3 for x in gen_AB())
# res2 是一个生成器对象
print(res2)
'''<generator object <genexpr> at 0x1099c9ba0>'''
# 只有 for 循环迭代 res2 时,gen_AB 函数的定义体才会真正执行。
# for 循环每次迭代时会隐式调用 next(res2),前进到 gen_AB 函数中的下一个 yield 语句。
# 注意,gen_AB 函数的输出与 for 循环中 print 函数的输出夹杂在一起
for i in res2:
print('-->', i)
'''
start
--> AAA
continue
--> BBB
end.
'''
|
[
"js_huang@foxmail.com"
] |
js_huang@foxmail.com
|
48ac43a0fb9fe230d18ef52b9367dafd12e8337d
|
656a7b63fe6dd6fafb619c63a6a0d9e33f308f90
|
/stress/ana/multicore-results
|
010a60faaf191ff04ee34c3b18a0499e067cbb15
|
[] |
no_license
|
JeffersonLab/clara-tests
|
0f6a553b113f4258cc47e65ef2840a886c2270d7
|
dfce424a6443d7622b40b6facc0a60e408090ed2
|
refs/heads/master
| 2021-05-01T05:01:42.533511
| 2021-04-20T18:44:31
| 2021-04-20T18:44:31
| 52,465,301
| 0
| 0
| null | 2021-04-20T18:44:32
| 2016-02-24T18:37:20
|
Python
|
UTF-8
|
Python
| false
| false
| 4,963
|
#!/usr/bin/env python3
import argparse
import re
import os
import yaml
from string import Template
from collections import OrderedDict
import nbformat as nbf
import pandas as pd
LOG_REGEX = re.compile(r'^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}(?:\.\d{3})?:\s+')
START_REGEX = re.compile(
r'Using (?P<cores>\d+) cores on (?P<host>.+)'
' to process (?P<events>\d+) events of (?P<file>.+) \[\d+/\d+\]'
)
BENCHMARK_REGEX = re.compile(
r'(?P<label>\w+)\s+(?P<events>\d+) events\s+'
'total time =\s+(?P<total_time>\d+\.\d+) s\s+'
'average event time =\s+(?P<avg_time>\d+\.\d+) ms'
)
ORCH_AVG_TIME_REGEX = re.compile(
r'Average processing time =\s+(?P<avg_time>\d+\.\d+) ms'
)
meta_data = {}
results_list = []
results_frame = pd.DataFrame()
def nonblank_lines(f):
for line in f.readlines():
line = LOG_REGEX.sub('', line.strip())
if line:
yield line
def parse_global_block(line):
global meta_data
def parse_meta(key, regex, line):
match = re.match(regex, line)
if match:
meta_data[key] = match.group(1)
if 'version' not in meta_data:
parse_meta('version', r'CLARA version += (\S+)', line)
return False
if 'date' not in meta_data:
parse_meta('date', r'Start time += (\d{4}-\d{2}-\d{2})', line)
return False
if 'host' not in meta_data:
parse_meta('host', r'Host += (.+)', line)
return False
if 'input_file' not in meta_data:
parse_meta('input_file', r'Input file += (.+)', line)
return False
if 'output_file' not in meta_data:
parse_meta('output_file', r'Output file += (.+)', line)
return False
return True
def parse_time(log_file):
with open(log_file) as f:
global_block = True
benchmark_trial = False
benchmark_block = False
for line in nonblank_lines(f):
if global_block:
if parse_global_block(line):
global_block = False
continue
if not benchmark_trial:
match = START_REGEX.match(line)
if match:
data = OrderedDict()
data['Cores'] = int(match.group('cores'))
meta_data.setdefault('events', match.group('events'))
benchmark_trial = True
if benchmark_trial and not benchmark_block:
if line.startswith('Benchmark results:'):
benchmark_block = True
continue
if benchmark_block:
match = BENCHMARK_REGEX.match(line)
if match:
label = match.group('label')
data[label] = float(match.group('avg_time'))
continue
match = ORCH_AVG_TIME_REGEX.match(line)
if match:
data['Orchestrator'] = float(match.group('avg_time'))
results_list.append(data)
benchmark_trial = False
benchmark_block = False
global results_frame
results_frame = pd.DataFrame(results_list)
results_frame = results_frame.groupby('Cores').mean().round(decimals=2)
def write_results(print_csv=True, nb_file=None):
csv_data = results_frame.to_csv(float_format='%.2f')
csv_data = csv_data.strip()
if print_csv:
print(csv_data)
def calc_xlim():
return results_frame.reset_index()['Cores'].max() + 2
def calc_time_lim():
t_max = results_frame['TOTAL'].max()
return int((t_max + 70) / 100 + 1) * 100
if nb_file:
nb = nbf.v4.new_notebook()
data = dict(meta_data)
data.update({
'services': results_frame.columns[1:-3].tolist(),
'csv_data': csv_data,
'xlim': calc_xlim(),
'time_lim': calc_time_lim(),
})
new_cell = {
'code': nbf.v4.new_code_cell,
'markdown': nbf.v4.new_markdown_cell,
}
dir_path = os.path.dirname(os.path.realpath(__file__))
template_path = os.path.join(dir_path, 'multicore-nb.yml')
with open(template_path) as f:
template = yaml.load(f)
for cell in template['notebook']['cells']:
content = Template(cell['content']).substitute(data).strip()
nb_cell = new_cell[cell['type']](content)
nb.setdefault('cells', []).append(nb_cell)
with open(nb_file, 'w') as f:
nbf.write(nb, f)
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument('--nb-file', dest='nb_file', action='store',
help='create a Jupyter notebook')
argparser.add_argument('log_file', help='the multicore-test output')
args = argparser.parse_args()
parse_time(args.log_file)
write_results(print_csv=True, nb_file=args.nb_file)
|
[
"smancill@jlab.org"
] |
smancill@jlab.org
|
|
a13e8c056a95d21c6806c33e2d16bddf24c74469
|
5dec1edff745c6f4dab7cede9919490267407537
|
/ScanProjectStructure.py
|
a3e18c3cfa075cf74b9e85c80c2661263ee98409
|
[] |
no_license
|
lian2377/ProjectScanner
|
bbc051459cfb2358454e1464f6d14a18cb9b4b64
|
d56ae56cb6820cd3bc199e1f1da2e6c6217caade
|
refs/heads/master
| 2020-05-20T22:54:40.725380
| 2019-07-19T03:31:39
| 2019-07-19T03:31:39
| 84,539,544
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,932
|
py
|
import os, json
def scan(projectPath, fileOutput = False, suffixList = None):
rootDir = str(projectPath).replace("\\", os.sep)
rootDir = rootDir.replace("/", os.sep)
projectName = rootDir.rstrip(os.sep).split(os.sep)[-1]
getSuffix = lambda s:s.split('.')[-1]
fileList = []
for (dirpath, dirnames, filenames) in os.walk(rootDir):
for f in filenames:
if suffixList is not None and getSuffix(f) not in suffixList:
continue
relativePath = os.path.relpath(dirpath, rootDir)
relativeFileName = os.path.join(relativePath, f)
if relativeFileName[0] == '.':
relativeFileName = os.sep.join(relativeFileName.split(os.sep)[1:])
fileList.append(relativeFileName)
treeRoot = {"name": projectName, "children": []}
for fileName in fileList:
target = treeRoot
targetPath = []
newNodeList = fileName.split(os.sep)
firstNode = newNodeList[0]
checkList = []
for node in treeRoot["children"]:
checkList.append(node)
findFirstPos = False
while not findFirstPos:
tmpList = []
for node in checkList:
if node["name"] == firstNode:
findFirstPos = True
target = node
targetPath.append(target["name"])
if "children" in node:
lastExistNode = node
for newNodeName in newNodeList[1:]:
findNextPos = False
nextNode = node
for node2 in lastExistNode["children"]:
if node2["name"] == newNodeName:
findNextPos = True
nextNode = node2
break
if findNextPos:
lastExistNode = nextNode
target = lastExistNode
if "children" not in nextNode:
break
targetPath.append(target["name"])
else:
break
break
elif "children" in node:
for node2 in node["children"]:
tmpList.append(node2)
if not findFirstPos:
if len(tmpList) == 0:
target = treeRoot
findFirstPos = True
checkList = tmpList
if target != treeRoot:
for node in targetPath:
newNodeList.pop(0)
i = 0
for newNodeName in newNodeList:
if i == len(newNodeList)-1:
if "children" not in target:
target["children"] = []
target["children"].append({"name": newNodeName})
else:
target["children"].append({"name": newNodeName, "children": []})
target = target["children"][-1]
i += 1
unsortList = []
treeRoot["children"] = sorted(treeRoot["children"], key=lambda x: x["name"])
unsortList.append(treeRoot["children"])
while True:
if len(unsortList) == 0:
break
for item in unsortList[0]:
if "children" in item:
item["children"] = sorted(item["children"], key=lambda x: x["name"])
unsortList.append(item["children"])
unsortList.pop(0)
if fileOutput:
jsnFileName = projectName
if suffixList is None:
jsnFileName += ".all"
jsnFile = open(jsnFileName + ".tree.json", "w", encoding="utf8")
jsnFile.write(json.dumps(treeRoot, indent=4, separators=(',', ': ')))
jsnFile.close()
return treeRoot
|
[
"lian@connectivityAir.local"
] |
lian@connectivityAir.local
|
24fd069808459e33826bb6f7ceb0790ce3642a9f
|
5c36683928e9fda553a5ec350cc44a29784fd805
|
/handlers/userpage.py
|
fc4d59c31755481dc51d710ede1854669602240d
|
[] |
no_license
|
ashishchopra605/MultiUserBlog
|
b97b557e4e35684801c64503abcedd1a9e94ca04
|
0a4bb967af54fd6d093bb11caa5b2a5334dae919
|
refs/heads/master
| 2021-01-20T09:24:20.295093
| 2017-05-04T10:47:32
| 2017-05-04T10:47:32
| 90,248,708
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 475
|
py
|
from google.appengine.ext import db
from handlers.bloghandle import BlogHandler
from models.post import Post
from main import *
class UserPage(BlogHandler):
"""Handler for UserPage"""
def get(self):
if user_logged_in(self):
username = self.user.name
posts = Post.all().filter('user_posted =', self.user.key())
self.render('userpage.html', username=username, posts=posts)
else:
self.redirect('/login')
|
[
"ashishchopra605@gmail.com"
] |
ashishchopra605@gmail.com
|
bbc94c9279545956f2054f3a5de28bc4378b39dc
|
a7b66311c2ce113789933ec3162f1128b2862f13
|
/app/waterQual/SeasonLinear/tsScatter.py
|
683ed3fddec8cef8a717c5b39597641c3449338d
|
[
"MIT"
] |
permissive
|
ChanJeunlam/geolearn
|
214b2c42359ea1164b39117fad2d7470adeb6d35
|
791caa54eb70920823ea7d46714dc8a3e7fa7445
|
refs/heads/master
| 2023-07-16T04:13:15.526364
| 2021-08-16T05:24:18
| 2021-08-16T05:24:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,827
|
py
|
import importlib
import numpy as np
import os
import pandas as pd
import json
from hydroDL.master import basins
from hydroDL import kPath, utils
from hydroDL.app import waterQuality
from hydroDL.data import usgs, gageII
import matplotlib.pyplot as plt
from hydroDL.post import axplot, figplot
dirSel = os.path.join(kPath.dirData, 'USGS', 'inventory', 'siteSel')
with open(os.path.join(dirSel, 'dictRB_Y30N5.json')) as f:
dictSite = json.load(f)
siteNoLst = dictSite['comb']
codeLst = sorted(usgs.newC)
# load Linear and Seasonal model
dictL = dict()
dirL = os.path.join(kPath.dirWQ, 'modelStat', 'WRTDS-DL', 'All', 'output')
dictS = dict()
dirS = os.path.join(kPath.dirWQ, 'modelStat', 'WRTDS-DS', 'All', 'output')
dictQ = dict()
dirQ = os.path.join(kPath.dirWQ, 'modelStat', 'WRTDS-DQ', 'All', 'output')
dictP = dict()
dirP = os.path.join(kPath.dirWQ, 'modelStat', 'WRTDS-DQ', 'All', 'output')
for dirTemp, dictTemp in zip([dirL, dirS, dirQ,dirP], [dictL, dictS, dictQ,dictP]):
for k, siteNo in enumerate(siteNoLst):
print('\t WRTDS site {}/{}'.format(k, len(siteNoLst)), end='\r')
saveFile = os.path.join(dirTemp, siteNo)
df = pd.read_csv(saveFile, index_col=None).set_index('date')
dictTemp[siteNo] = df
dictObs = dict()
for k, siteNo in enumerate(siteNoLst):
print('\t USGS site {}/{}'.format(k, len(siteNoLst)), end='\r')
df = waterQuality.readSiteTS(
siteNo, varLst=['00060']+codeLst, freq='D', rmFlag=True)
dictObs[siteNo] = df
# calculate rsq
rMat = np.full([len(siteNoLst), len(codeLst), 2], np.nan)
for ic, code in enumerate(codeLst):
for siteNo in dictSite[code]:
indS = siteNoLst.index(siteNo)
v1 = dictL[siteNo][code].values
v2 = dictS[siteNo][code].values
v0 = dictObs[siteNo][code].values
(vv0, vv1, vv2), indV = utils.rmNan([v0, v1, v2])
rmse1, corr1 = utils.stat.calErr(vv1, vv0)
rmse2, corr2 = utils.stat.calErr(vv2, vv0)
rMat[indS, ic, 0] = corr1**2
rMat[indS, ic, 1] = corr2**2
qMat = np.full([len(siteNoLst)], np.nan)
for siteNo in siteNoLst:
indS = siteNoLst.index(siteNo)
v1 = dictQ[siteNo]['00060'].values
v0 = dictObs[siteNo]['00060'].values
(vv0, vv1), indV = utils.rmNan([v0, v1])
rmse, corr = utils.stat.calErr(vv1, vv0)
qMat[indS] = corr**2
codeLst2 = ['00915', '00925', '00935', '00930', '00940', '00945',
'00955', '70303']
[nfy, nfx] = [3, 3]
# codeLst2 = ['00010', '00300', '00405', '00600', '00605',
# '00618', '00660', '00665', '00681', '00915',
# '00925', '00930', '00935', '00940', '00945',
# '00950', '00955', '70303', '71846', '80154']
# nfy, nfx = [4, 5]
# load dilution factor
bMat = np.full([len(siteNoLst), len(codeLst)], np.nan)
for k, code in enumerate(codeLst):
dirC = os.path.join(kPath.dirWQ, 'modelStat', 'WRTDS-DL', 'All', 'params')
fileParC = os.path.join(dirC, code)
parC = pd.read_csv(fileParC, dtype={'siteNo': str}).set_index('siteNo')
bMat[:, k] = parC['pQ']
bMat2 = np.full([len(siteNoLst), len(codeLst)], np.nan)
bMat2[bMat > 0] = np.log(bMat[bMat > 0]+1)
bMat2[bMat <= 0] = -np.log(-bMat[bMat <= 0]+1)
importlib.reload(figplot)
indC = [codeLst.index(code) for code in codeLst2]
labelLst = ['{} {}'.format(code, usgs.codePdf.loc[code]['shortName'])
for code in codeLst2]
xMat = rMat[:, indC, 1]
yMat = rMat[:, indC, 0]
cMat = bMat2[:, indC]
nXY = [nfx, nfy]
figM, axM = figplot.scatter121Batch(
xMat, yMat, cMat, labelLst, nXY, optCb=1,
ticks=[0, 0.5, 1], s=20, cmap='jet')
figM.show()
###############
ic1 = codeLst.index('00915')
ic2 = codeLst.index('00955')
fig, axes = plt.subplots(1, 2)
# axplot.plot121(axes[0], rMat[:, ic1, 0], rMat[:, ic2, 0], vR=[0, 1])
# axplot.plot121(axes[1], rMat[:, ic1, 1], rMat[:, ic2, 1], vR=[0, 1])
axplot.scatter121(axes[0], rMat[:, ic1, 0], rMat[:, ic2, 0], qMat, vR=[0, .5])
axplot.scatter121(axes[1], rMat[:, ic1, 1], rMat[:, ic2, 1], qMat, vR=[0, .5])
fig.show()
temp = ['00915', '00955']
ic1 = codeLst.index(temp[0])
ic2 = codeLst.index(temp[1])
nameLst = [usgs.codePdf.loc[code]['shortName'] for code in temp]
fig, axes = plt.subplots(1, 2)
sc1 = axplot.scatter121(axes[0], rMat[:, ic1, 1],
rMat[:, ic1, 0], qMat, vR=[0, 0.5])
axes[0].set_xlabel('Seasonality of {}'.format(nameLst[0]))
axes[0].set_ylabel('Linearity of {}'.format(nameLst[0]))
sc2 = axplot.scatter121(axes[1], rMat[:, ic2, 1],
rMat[:, ic2, 0], qMat, vR=[0, 0.5])
axes[1].set_xlabel('Seasonality of {}'.format(nameLst[1]))
axes[1].set_ylabel('Linearity of {}'.format(nameLst[1]))
fig.show()
temp = ['00915', '00955']
ic1 = codeLst.index(temp[0])
ic2 = codeLst.index(temp[1])
nameLst = [usgs.codePdf.loc[code]['shortName'] for code in temp]
fig, axes = plt.subplots(1, 2)
axplot.scatter121(axes[0], rMat[:, ic1, 0], rMat[:, ic2, 0], qMat, vR=[0, .6])
axes[0].set_xlabel('Linearity of {}'.format(nameLst[0]))
axes[0].set_ylabel('Linearity of {}'.format(nameLst[1]))
axplot.scatter121(axes[1], rMat[:, ic1, 1], rMat[:, ic2, 1], qMat, vR=[0, .6])
axes[1].set_xlabel('Seasonality of {}'.format(nameLst[0]))
axes[1].set_ylabel('Seasonality of {}'.format(nameLst[1]))
fig.show()
temp = ['00915', '00955']
ic1 = codeLst.index(temp[0])
ic2 = codeLst.index(temp[1])
nameLst = [usgs.codePdf.loc[code]['shortName'] for code in temp]
fig, axes = plt.subplots(1, 2)
axplot.scatter121(axes[0], qMat, rMat[:, ic1, 1], rMat[:, ic1, 0], vR=[0, 1])
axes[0].set_xlabel('Seasonality of Q')
axes[0].set_ylabel('Seasonality of {}'.format(nameLst[0]))
axplot.scatter121(axes[1], qMat, rMat[:, ic2, 1], rMat[:, ic2, 0], vR=[0, 1])
axes[1].set_xlabel('Seasonality of Q')
axes[1].set_ylabel('Seasonality of {}'.format(nameLst[1]))
fig.show()
# rock type - Na Cl
fileGlim = os.path.join(kPath.dirData, 'USGS', 'GLiM', 'tab_1KM')
tabGlim = pd.read_csv(fileGlim, dtype={'siteNo': str}).set_index('siteNo')
matV = np.argmax(tabGlim.values, axis=1)
matV = tabGlim.values[:, 4]
for k in range(15):
matV = tabGlim.values[:, k]
temp = ['00930', '00940']
ic1 = codeLst.index(temp[0])
ic2 = codeLst.index(temp[1])
nameLst = [usgs.codePdf.loc[code]['shortName'] for code in temp]
fig, axes = plt.subplots(1, 2)
cb = axplot.scatter121(axes[0], rMat[:, ic1, 0],
rMat[:, ic2, 0], matV, cmap='jet')
axes[0].set_xlabel('Linearity of {}'.format(nameLst[0]))
axes[0].set_ylabel('Linearity of {}'.format(nameLst[1]))
fig.colorbar(cb, ax=axes[0])
cb = axplot.scatter121(axes[1], rMat[:, ic1, 1],
rMat[:, ic2, 1], matV, cmap='jet')
axes[1].set_xlabel('Seasonality of {}'.format(nameLst[0]))
axes[1].set_ylabel('Seasonality of {}'.format(nameLst[1]))
fig.colorbar(cb, ax=axes[1])
fig.show()
|
[
"geofkwai@gmail.com"
] |
geofkwai@gmail.com
|
6a588576b11b5c51eb1bb8da29744a365b2a9826
|
4a2f6044ffaf19db55680db5fbfb2c9539f3d4f2
|
/testcase.py
|
21189fa2b9fbf42c86ebd245977930f8ed4a93ef
|
[] |
no_license
|
ekkazit/mpos-backend
|
1c3571b26a8952ea9df69b9261175f3f36c2a9be
|
3ee5936a893e83d1c4e1a9a61571b250f60160a5
|
refs/heads/master
| 2021-01-21T11:39:58.936682
| 2016-05-21T16:08:23
| 2016-05-21T16:08:23
| 51,684,875
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 106
|
py
|
import unittest
from app.tests.echo_test import EchoTest
if __name__ == '__main__':
unittest.main()
|
[
"ekkazitstudio@gmail.com"
] |
ekkazitstudio@gmail.com
|
33cdaf6983bad3e7639a4d4a3487dae598804c4a
|
846556273884898ec3ccd2c13b39ddd9a75fef7f
|
/chapter 5/water_viscosity.py
|
f200ca85aebe47af48ec0e8dce3acdc47d1d3d6b
|
[] |
no_license
|
gitviviane/python-scipro-solutions
|
647b2ac1391cc1a97e90ce2fec1e18925308288f
|
7874cd292d6b58315bd170889dc22aeac14396f8
|
refs/heads/master
| 2021-12-11T02:25:49.109640
| 2016-10-18T16:11:13
| 2016-10-18T16:11:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 426
|
py
|
"""
Exercise 5.28: Plot the viscosity of water
Author: Weiyun Lu
"""
from scitools.std import plot, linspace
A = 2.414e-5
B = 247.8
C = 140.0
mu = lambda T : A * 10**(B / (T-C)) #viscosity in terms of Kelvin
mu_celcius = lambda T : mu (T+273) #calculate mu in terms of celcius
x = linspace(0,100,100)
y = mu_celcius(x)
plot(x, y, '-b', xlabel='Temperature (C)', ylabel='Viscosity (Pa s)', \
title='Viscocity of Water')
|
[
"wlu058@uottawa.ca"
] |
wlu058@uottawa.ca
|
75046425c036c9ca0e33b1a02a47d9d0cdfc6e81
|
2b350eaebe8b0951f07266513c1b1ccfbda1f210
|
/manage.py
|
2e0df0b2140aa89f111445c2baf465fce8dfe010
|
[] |
no_license
|
Toribar/99Design-empty-slots
|
55f20184b9a1525e422886669d7cd037ee876852
|
e82ad1e909136bb0481959f21b955ed9b6d76d00
|
refs/heads/master
| 2021-01-10T15:20:55.822845
| 2016-02-21T18:01:44
| 2016-02-21T18:01:44
| 50,187,281
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 256
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "slot_99design.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"bata.henry@gmail.com"
] |
bata.henry@gmail.com
|
25f68bca0b499c7902005badf6565960943f22dd
|
45a1380cbfe5dee5efd014cd2ac756ab92b8fec2
|
/2020/2020-01-11-01/process.py
|
606535ca8e67951280fed2598471bed233ea0644
|
[] |
no_license
|
airball-aero/airball-data
|
86ae1f20aa1c4d1d7212e3e7895e73831e5f78f4
|
506f230c853dc4fac37bc99218de70e3f1da995f
|
refs/heads/master
| 2022-05-05T18:09:30.797720
| 2022-03-18T04:08:31
| 2022-03-18T04:08:31
| 183,973,504
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 847
|
py
|
#!/usr/bin/python
import matplotlib.pyplot as plt
probe_static = [
[ 79, 79, 91, 93, 93, 92, 91, 92, 90, 88, 86, 87, 87, 88, 87, 96, 94, 95, 94, 92, 94, 94, 93, 95, 95, 60, 87, 60, 62, 68, 71, 54 ],
[ 70, 69, 80, 80, 81, 79, 78, 82, 77, 76, 75, 75, 76, 76, 75, 83, 82, 82, 82, 81, 81, 82, 81, 82, 82, 70, 75, 57, 59, 64, 67, 52 ],
]
alternate_static = [
[ 74, 70, 90, 66, 60, 92, 95 ],
[ 68, 66, 80, 62, 57, 80, 83 ],
]
plt.scatter(
alternate_static[0],
alternate_static[1],
label="Alternate static",
color="blue",
marker="o")
plt.scatter(
probe_static[0],
probe_static[1],
label="Probe static",
color="red",
marker="v")
plt.plot(
[50, 100],
[50, 100],
color="black")
plt.xlabel('N291DR ASI reading (kias)')
plt.ylabel('Airball ASI reading (kias)')
plt.legend()
plt.show()
|
[
"ihab.awad@gmail.com"
] |
ihab.awad@gmail.com
|
0720ffb79ae2eb51c690163c33dc21d6bbfc43f3
|
4110496fb8b6cf7fbdabf9682f07255b6dab53f3
|
/Graph/2097. [Like] Valid Arrangement of Pairs.py
|
02c59bc7c88164c40ac98bb703409f518c62d36e
|
[] |
no_license
|
louisfghbvc/Leetcode
|
4cbfd0c8ad5513f60242759d04a067f198351805
|
1772036510638ae85b2d5a1a9e0a8c25725f03bd
|
refs/heads/master
| 2023-08-22T19:20:30.466651
| 2023-08-13T14:34:58
| 2023-08-13T14:34:58
| 227,853,839
| 4
| 0
| null | 2020-11-20T14:02:18
| 2019-12-13T14:09:25
|
C++
|
UTF-8
|
Python
| false
| false
| 959
|
py
|
# euler path, remember postorder
# O(N)
class Solution:
def validArrangement(self, pairs: List[List[int]]) -> List[List[int]]:
# convert to the graph problem
# euler path
in_degree, out_degree = defaultdict(int), defaultdict(int)
graph = defaultdict(list)
start = 0
for u, v in pairs:
start = u
in_degree[v] += 1
out_degree[u] += 1
graph[u].append(v)
for u, v in pairs:
if in_degree[u]+1 == out_degree[u]:
start = u
break
if in_degree[v]+1 == out_degree[v]:
start = v
break
self.res = []
def dfs(u):
while graph[u]:
v = graph[u].pop()
dfs(v)
self.res += [u, v],
dfs(start)
return self.res[::-1]
|
[
"noreply@github.com"
] |
louisfghbvc.noreply@github.com
|
b4f74d52a64594575516c6396835481f28c21646
|
052aa4c87718988c2dd0c41163ba936a980a53ea
|
/Python Web Service/NodoSelect.py
|
202dd0c24da4a60423c4ddf035d40a32bb43e4f9
|
[] |
no_license
|
brayanaroche/Proyecto1S22017_201503918
|
614b7e163bedacfaf60a39257cba027b450c89e4
|
7c7c141df0e4b321611718507964df5c1bcff7f5
|
refs/heads/master
| 2021-07-06T16:03:06.513623
| 2017-10-01T18:05:23
| 2017-10-01T18:05:23
| 105,462,180
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 269
|
py
|
class NodoSelect(object):
def __init__(self,dato):
self.dato=dato
self.enlace=None
def getDato(self):
return self.dato
def getEnlace(self):
return self.enlace
def setEnlace(self,nodo):
self.enlace=nodo
def setDato(self,objeto):
self.dato=objeto
|
[
"30421259+brayanaroche@users.noreply.github.com"
] |
30421259+brayanaroche@users.noreply.github.com
|
9536416822bc6da015eefdc65b629452f3eeea88
|
8b1e49ce342e2cfe76cd7f3a23e5aeb2796603b1
|
/tagify_project/music/migrations/0004_auto_20210314_1418.py
|
d889e1f2d3f7cc24a312416f7717059b646cec7a
|
[
"MIT"
] |
permissive
|
Krayonn/tagify
|
bd06f50d681a31ce666d90d6f8bf748da0dbd483
|
5ee7efce3cacfa23a344e4c02070a365f50136a8
|
refs/heads/main
| 2023-03-30T05:19:23.350344
| 2021-04-03T17:00:18
| 2021-04-03T17:00:18
| 338,643,170
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 437
|
py
|
# Generated by Django 3.1.6 on 2021-03-14 14:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('music', '0003_auto_20210314_1412'),
]
operations = [
migrations.AlterField(
model_name='track',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
|
[
"kris-beckers@hotmail.com"
] |
kris-beckers@hotmail.com
|
0c84952b18e2e7d938b7e66b0179fd476e36746d
|
f7d6c5413e3330801efae4fe9d505f41d3ed705b
|
/prom_inter.py
|
b9814afe3296bb8687f56df5b9db2d7d72bffd89
|
[] |
no_license
|
gzp9595/jiuge
|
6c2e480eb530491370b4045a6603e2c28df80e19
|
a69bad45adad328348fbf101278d04b0d4e69768
|
refs/heads/master
| 2021-06-26T06:33:16.775337
| 2019-05-15T07:35:40
| 2019-05-15T07:35:40
| 140,416,018
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,461
|
py
|
# coding: utf-8
import time
import redis
import random
import redis
from setting import HOST_PROXY, PORT_PROXY
from untils import generate_logger
class RedisClient():
"""
By now, only (a) random IP proxy(s) from the proxy-pool is/are provided.
"""
def __init__(self, DB_NAME_PROXY, host=HOST_PROXY, port=PORT_PROXY, ):
pool = redis.ConnectionPool(host=host, port=port, db=0)
# [redis连接对象是线程安全的](http://www.cnblogs.com/clover-siyecao/p/5600078.html)
# [redis是单线程的](https://stackoverflow.com/questions/17099222/are-redis-operations-on-data-structures-thread-safe)
self._db = redis.Redis(connection_pool=pool)
self.logger = generate_logger("JJ_RedisClient")
self.DB_NAME_PROXY = DB_NAME_PROXY
def get(self):
proms = []
if(self._db.llen(self.DB_NAME_PROXY) == 0):
time.sleep(random.random()*10)
return proms
try:
prom = self._db.lpop(self.DB_NAME_PROXY) # 从队列头读取出来
# print("Using IP proxy:", proxy) # req.text: "119.75.213.61:80"
proms.append(prom)
except ValueError as ve:
self.logger.error("ValueError:queue_len is too short(<1).\n{0}".format(ve))
except Exception as e:
self.logger.error("Unexpected Error.\n{0}".format(e))
finally:
# self.logger.info("Using proxies:{0}".format(proxies))
return proms
def put(self, prom):
"""
add proxy to right top
# zset
self._db.zadd("proxy_zset", proxy, self._INITIAL_SCORE)
"""
self._db.rpush(self.DB_NAME_PROXY, prom) # list
@property
def queue_len(self):
"""
get length from queue.
"""
return self._db.llen(self.DB_NAME_PROXY)
def showall(self):
"""
show all elements in the list.
"""
#print(self._db.lrange(DB_NAME, 0, -1))
self.logger.info(repr(self._db.lrange(self.DB_NAME_PROXY, 0, -1)))
def del_all_proxies(self):
"""
delete all the proxies in DB_NAME
"""
self._db.delete(self.DB_NAME_PROXY)
def flush(self):
"""
flush db
"""
# self._db.flushall() # DO NOT DO THIS.
pass
if __name__ == "__main__":
# """
client = RedisClient()
client.del_all_proxies()
client.get_new()
client.get()
|
[
"gzp9595@gmail.com"
] |
gzp9595@gmail.com
|
2e7eaca7efd7f749a67af43742d14ffb7b36e35b
|
e02e993a394b58b011e8bc7b2924461c36aa4424
|
/urls.py
|
cd10561278ec3e91b4efe97a8cf57b70322fd9de
|
[] |
no_license
|
marcboivin/biereapp
|
f1946fd7fdd99b41d3db182604e3df53244604cc
|
6950b5c16012541714cbfef9a21854ca665436c9
|
refs/heads/master
| 2021-01-22T03:39:21.122488
| 2011-04-11T01:42:32
| 2011-04-11T01:42:32
| 1,285,056
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,163
|
py
|
from django.conf.urls.defaults import patterns, include, url
from django.contrib.auth.views import login, logout
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
from biereapp import settings
from biereapp.views import Dashboard, CreerFacture, AddUserTransation, NewProduit, NewClient, FactureDetails, AddPrixProduit, FactureFermer, FactureInFermer, ProduitInventaire, CommandeFournisseur, Commande, AJAX_AddInventaire, AJAX_DeleteTransaction, super_facture
from biereapp.models import Facture, Produit, Client
admin.autodiscover()
#Values for generic views
liste_client = {
'template_name': 'clients/liste.html',
'queryset' : Client.objects.all()
}
single_client = {
'queryset': Client.objects.all(),
'template_name': 'clients/client.html',
'template_object_name': 'client',
}
liste_factures = {
'template_name': 'facture/liste.html',
'queryset' : Facture.objects.order_by('-Date')
}
single_facture = {
'queryset': Facture.objects.all(),
'template_name': 'facture/facture.html',
'template_object_name': 'facture',
}
liste_produit = {
'template_name': 'produits/liste.html',
'queryset' : Produit.objects.order_by('Brasseur', 'Nom')
}
single_produit = {
'queryset': Produit.objects.all(),
'template_name': 'produits/produit.html',
'template_object_name': 'produit',
}
# Url patterns for generic views
urlpatterns = patterns('django.views.generic',
(r'^clients/liste/', 'list_detail.object_list', liste_client),
(r'^clients/(?P<object_id>\d+)/$', 'list_detail.object_detail', single_client),
(r'^factures/liste/', 'list_detail.object_list', liste_factures),
(r'^factures/(?P<object_id>\d+)/$', 'list_detail.object_detail', single_facture),
(r'^produits/liste/', 'list_detail.object_list', liste_produit),
(r'^produits/(?P<object_id>\d+)/$', 'list_detail.object_detail', single_produit),
)
urlpatterns += patterns('',
# Enable the admin:
url(r'^admin/', include(admin.site.urls)),
(r'^accounts/login/$', login),
(r'^accounts/logout/$', logout),
(r'^media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT }),
(r'^factures/creer/$', CreerFacture),
(r'^factures/produit/ajout/$', AddUserTransation),
(r'^produits/creer/$', NewProduit),
(r'^clients/creer/$', NewClient),
(r'^factures/(?P<facture_id>\d+)/details/$', FactureDetails),
(r'^factures/(?P<facture_id>\d+)/fermer/$', FactureFermer),
(r'^factures/(?P<facture_id>\d+)/ouvrir/$', FactureInFermer),
(r'^factures/super/$', super_facture),
(r'^produits/(?P<object_id>\d+)/add/$', AddPrixProduit),
(r'^produits/inventaire/$', ProduitInventaire),
(r'^commandes/creer/$', CommandeFournisseur),
(r'^commandes/$', Commande),
(r'^ajax/inventaire/ajust/$', AJAX_AddInventaire),
(r'^ajax/transactions/delete/$', AJAX_DeleteTransaction),
(r'^$', Dashboard),
# Uncomment the admin/doc line below and add 'django.contrib.admindocs'
# to INSTALLED_APPS to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
)
|
[
"boivin@ma14.com"
] |
boivin@ma14.com
|
4052b944a61800bfb2afc1bf57c093dcebfaf293
|
898cd981dbf81fc4b2f98c5508a25672f2bdea88
|
/examples/redis_client2.py
|
6a85602fb107530bc3330828c440165311af03a8
|
[
"BSD-3-Clause"
] |
permissive
|
GottWall/GottWall
|
1bd219d6e010c304c018038f95e9d6108ef26099
|
3b940fa27245a4160d5fe1af3ea15b8c8ac70a4b
|
refs/heads/master
| 2020-04-06T03:42:14.965092
| 2015-10-10T07:27:43
| 2015-10-10T07:27:43
| 7,048,966
| 9
| 3
| null | 2014-02-09T19:41:41
| 2012-12-07T06:46:08
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,901
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import datetime
import redis
from random import randint, choice
from stati_redis import RedisClient, Client
stats_client = RedisClient(private_key="my_private_key",
public_key="my_public_key",
project="SampleProject",
host="10.8.9.8",
db=2)
import time
from contextlib import contextmanager
def pretty_time(td):
"""Convert timedelta to pretty
:param td: timedelta (t2 - t1)
:return: delta string in seconds or minutes
"""
if td < 300:
return "{0} sec".format(td)
else:
return "{0} min".format((td / 60.0))
@contextmanager
def measure_time(title, logger=None, **debug_params):
t1 = time.time()
print('Started "{0}" at {1}'. format(title, time.ctime(t1)))
yield
t2 = time.time()
print('Finished "{0}" at {1} for the time {2}'.\
format(title, time.ctime(t2), pretty_time(t2-t1)))
with measure_time("Test stats"):
for x in xrange(100000):
stats_client.incr(choice([u"APIv1", "APIv2", "APIv3"]),
timestamp=datetime.datetime(choice([2012, 2013]), randint(1, 12), randint(1, 27)) + datetime.timedelta(days=randint(1, 4)),
value=randint(1, 10),
filters={choice(["status"]): choice(["200", "403", "500", "404", "401", "201"]),
"users": choice(["anonymouse", "registered"])}
)
print(x)
with measure_time("Test stats"):
for x in xrange(1000000):
stats_client.incr(u"Actions",
timestamp=datetime.datetime(choice([2012, 2013]), randint(1, 12), randint(1, 27)) + datetime.timedelta(days=randint(1, 4)),
value=randint(1, 5),
filters={"views": choice(["products", "special page"]),
"voting": choice(["up", "down"])}
)
print(x)
with measure_time("Test stats"):
for x in xrange(1000000):
stats_client.incr(choice([u"Reviews", u"Feedbacks", "Registrations"]),
timestamp=datetime.datetime(choice([2012, 2013]), randint(1, 12), randint(1, 27)) + datetime.timedelta(days=randint(1, 4)),
value=randint(1, 5),
filters={})
print(x)
with measure_time("Test stats"):
for x in xrange(1000000):
stats_client.incr(choice([u"Orders"]),
timestamp=datetime.datetime(choice([2012, 2013]), randint(1, 12), randint(1, 27)) + datetime.timedelta(days=randint(1, 4)),
value=randint(1, 5),
filters={"status": choice(["Completed", "New", "Canceled"])})
print(x)
print("finish")
|
[
"Alexandr.S.rus@gmail.com"
] |
Alexandr.S.rus@gmail.com
|
b56ef68c6e84a51c466f5bc99c378a07abe33752
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2388/60833/269257.py
|
47ff280dcb3d860e67df2482635288684a05b087
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 471
|
py
|
lines=[]
while True:
try:
lines.append(input())
except:
break
n=int(lines.pop(0))
for i in range(0,n):
count=int(lines.pop(0))
list_number = list(lines.pop(0).split(" "))
list_number = list(map(int, list_number))
list_number1 = list(lines.pop(0).split(" "))
list_number1 = list(map(int, list_number1))
list_number.sort()
list_number1.sort()
if(list_number==list_number1):
print(1)
else:
print(0)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
49fe93f3ed4e6615f5e012037f5c1604ab8e0e4e
|
7666f107a24c7749afc26e91bd3abb5bff158fc2
|
/upper/env/bin/pip3.6
|
7a906fe9e7cc903e37b55e10a809b480ca110ba0
|
[] |
no_license
|
zigbeemuema/zb.001
|
15cd351a5f1e57e258c0b47b6a07caaed0591c5f
|
a9fa531be4424acd0f951b3d7e6786c755b6c3cd
|
refs/heads/master
| 2020-04-26T08:33:58.715136
| 2019-03-02T06:52:40
| 2019-03-02T08:39:56
| 173,426,635
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 247
|
6
|
#!/home/onesmas/Desktop/exam/upper/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"muemaonesmas@gmail.com"
] |
muemaonesmas@gmail.com
|
d457426f49ba33515642d88d8e0bba702f8f2b64
|
4ca52144cf9b2741c33f9df71514cefcd78ad3c3
|
/surro/world.py
|
088b82503ecfcff0b94c6ed58a18b2646e813e2b
|
[
"MIT"
] |
permissive
|
MatthewScholefield/surro
|
89f8dc8a8a10d80af7bc39483e3eeef1e4fa2de1
|
db666a8c5c2fd456b1816562014572ceaabd2b5f
|
refs/heads/master
| 2021-09-05T01:03:36.250898
| 2018-01-23T03:41:16
| 2018-01-23T07:36:30
| 118,553,845
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,380
|
py
|
from random import randint
import pygame
from openal.audio import SoundSource
from openal.loaders import load_wav_file
from pygame.font import Font
from pygame.surface import Surface
from surro.enemy import Enemy
from surro.orb import Orb
from surro.player import Player
from surro.util import get_sfx, new_pt, vec_dist, ContinuousSoundSource
class World:
FPS = 60
SCALE = 0.05
SIZE = 150 * SCALE
def __init__(self, sink, songs=None, has_evil=True):
self.sink = sink
self.has_evil = has_evil
self.points = self.points_t = 0
self.game_time = 0.
self.alive = True
self.player = Player(self)
self.enemy = Enemy(self) if has_evil else None
self.orb = Orb(self, songs)
self.sources = [self.player.damage_source, self.orb.source]
if self.enemy:
self.sources.append(self.enemy.source)
self.sink.play(self.sources)
def destroy(self):
for i in self.sources:
if isinstance(i, ContinuousSoundSource):
i.timer.cancel()
self.sink.stop(self.sources)
def play_sfx(self, name, offset=(0., 0., 0.), volume=1.):
source = SoundSource()
source.queue(load_wav_file(get_sfx(name)))
source.gain = volume
source.position = tuple(new_pt(*self.sink.listener.position) + new_pt(*offset))
self.sink.play(source)
def random_point(self, ratio=1.):
width = int(ratio * self.SIZE)
return new_pt(randint(0, 2 * width) - width,
0, randint(0, 2 * width) - width)
def score_points(self, points):
self.play_sfx('point.wav')
self.orb.new_pos()
self.points_t += points
def game_over(self):
self.alive = False
def update(self):
self.player.update()
if self.enemy:
self.enemy.update()
self.orb.update()
self.sink.update()
self.points += 0.1 * (self.points_t - self.points)
self.game_time += 1. / self.FPS
def calculate_bg_color(self):
if not self.alive:
return new_pt(0.7, 0.0, 0.0)
def light_ratio(dist, damper=0.2):
return 1. / max(1., dist / damper)
enemy = light_ratio(vec_dist(self.player.pos, self.enemy.pos)) if self.enemy else 0.
target = light_ratio(vec_dist(self.player.pos, self.orb.pos))
wall = light_ratio(self.SIZE - max(abs(self.player.pos)))
color = new_pt()
for ratio, item_color in [
(target, (1., 1., 1.)),
(enemy, (1., 0., 0.)),
(wall, (.2, .4, .9))
]:
color += (1. - max(color)) * ratio * new_pt(*item_color)
return color
def render(self, font: Font, screen: Surface):
def render_text(text, pos, color=(255, 255, 255)):
surface = font.render(str(text), True, color)
pos = (new_pt(pos[0] * screen.get_width(), pos[1] * screen.get_height()) -
new_pt(*surface.get_size()) / 2)
screen.blit(surface, tuple(pos))
screen.fill(tuple(255. * self.calculate_bg_color()))
render_text(int(self.points + 0.5), (0.5, 0.5))
render_text(round(self.player.health, 1), (0.1, 0.1), (255, 200, 200))
if not self.alive:
render_text('Game Over', (0.5, 0.2))
pygame.display.flip()
|
[
"matthew3311999@gmail.com"
] |
matthew3311999@gmail.com
|
8960ea2154f0cd4e5df7a80f8c99e7e466124119
|
c903ffcca23646072fca2e459bbe2f30951e2431
|
/genColors.py
|
1606f20c40989f56509398b3eab576bef8f09f3a
|
[] |
no_license
|
mvwicky/2048
|
1c1154b23c94003ba24bf82ef9c45cd6ac536ff1
|
f30e64028da4210a0805bb4d64692e871642207a
|
refs/heads/master
| 2016-09-06T09:40:45.051507
| 2014-05-20T20:27:59
| 2014-05-20T20:27:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 344
|
py
|
import sfml as sf
import random
import math
def main():
colors=[]
for i in range(16):
colors.append(sf.Color(random.randint(0,255),random.randint(0,255),random.randint(0,255),255))
values=[]
for i in range(1,13):
values.append(2**i)
colorValues=dict(zip(values,colors))
print colorValues
return 0
if __name__=='__main__':
main()
|
[
"mvanwickle@gmail.com"
] |
mvanwickle@gmail.com
|
53f8e686a7d03be0476327f6ce9ddd4c7dd04ae7
|
9c608f2bde82747a1cd4b46275024f69540025fc
|
/app/db/database.py
|
1ad74ae51d946b52f7e492a29179e4020d88a78e
|
[] |
no_license
|
omarcsejust/learn-fastapi
|
7476f770276f81d5eb8fbcd4db47fdb8b8156010
|
ad03b0dc7b188c94efa88d14d0b083c249c52641
|
refs/heads/master
| 2023-06-06T11:25:40.752058
| 2021-06-29T05:37:56
| 2021-06-29T05:37:56
| 309,629,075
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 463
|
py
|
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
SQLALCHEMY_DATABASE_URL = "sqlite:///./sql_app.db"
# SQLALCHEMY_DATABASE_URL = "postgresql://user:password@postgresserver/db"
engine = create_engine(
SQLALCHEMY_DATABASE_URL, connect_args={"check_same_thread": False}
)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base = declarative_base()
|
[
"omarhasan951@gmail.com"
] |
omarhasan951@gmail.com
|
479f9c663e83bb8b0000402893f68df99ef957b4
|
7c99f5334dbaa45e86b2a814a4d640c40b1dc7dd
|
/COW/resources/python/apiCallFactory.py
|
8ce7c0348cfba82f88bf029ca4ac3672e2f2bd83
|
[] |
no_license
|
aurelien-defossez/cod-open-world
|
9e7e88fbc0358ad8bf552de6205f704b3d6745ad
|
0b6ba9d1f7c4c760767d450d1169402189a6e003
|
refs/heads/master
| 2021-01-19T05:26:15.636726
| 2012-05-02T16:55:23
| 2012-05-02T16:55:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,636
|
py
|
from java.lang import Boolean
from java.lang import Integer
from java.lang import Double
from java.lang import String
from jarray import zeros
from jarray import array
from com import Variant
class ApiCallFactory(object):
connector = None
parameters = None
ctParameters = 0
def __init__(self, connector):
self.connector = connector
def initParameters(self, nb):
self.parameters = zeros(nb, Variant)
self.ctParameters = 0
def addBool(self, x):
self.parameters[self.ctParameters] = Variant(Boolean(x))
self.ctParameters += 1
def addInt(self, x):
self.parameters[self.ctParameters] = Variant(Integer(x))
self.ctParameters += 1
def addDouble(self, x):
self.parameters[self.ctParameters] = Variant(Double(x))
self.ctParameters += 1
def addString(self, x):
self.parameters[self.ctParameters] = Variant(String(x))
self.ctParameters += 1
def addBoolArray(self, values):
self.parameters[self.ctParameters] = Variant(array(values, Boolean))
def addIntArray(self, values):
self.parameters[self.ctParameters] = Variant(array(values, Integer))
def addDoubleArray(self, values):
self.parameters[self.ctParameters] = Variant(array(values, Double))
def addStringArray(self, values):
self.parameters[self.ctParameters] = Variant(array(values, String))
class GameApiCallFactory(ApiCallFactory):
def call(self, function):
return self.connector.callGameFunction(function, self.parameters)
class ViewApiCallFactory(ApiCallFactory):
def call(self, function):
return self.connector.callViewFunction(function, self.parameters)
|
[
"didjor@gmail.com"
] |
didjor@gmail.com
|
8c428814e9695b5f59575fcbb73aaef938a02ad8
|
23e709e600655e1497da21d5bf2fbe1ec6534798
|
/csequences.py
|
75f7f74ba62e6e6114ddc8bc20e5e6fc66e007bf
|
[] |
no_license
|
SDAMcIntyre/Aurora-sequence-randomiser
|
509e94d9c181eba9a00ab274fd0b15a09b34ecc4
|
f4770f5ea854727d1d30dc36695e032f816e6514
|
refs/heads/master
| 2022-12-27T09:30:43.997123
| 2019-09-20T15:23:23
| 2019-09-20T15:23:23
| 301,985,690
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 643
|
py
|
import os, shutil
src = './SequenceFiles/'
src_files = os.listdir(src)
textToReplace = '.dpf\tTimed\t5.000\t' #5 seconds
newText = '.dpf\tTimed\t10.000\t' #10 seconds
for file_name in src_files:
full_file_name = os.path.join(src, file_name)
new_file_name = os.path.join(src, 'C_'+file_name)
if os.path.isfile(full_file_name):
shutil.copy(full_file_name, new_file_name)
with open(new_file_name) as new_file:
file_contents = new_file.read()
file_contents = file_contents.replace(textToReplace, newText)
with open(new_file_name, 'w') as new_file:
new_file.write(file_contents)
|
[
"sarah.mcintyre@liu.se"
] |
sarah.mcintyre@liu.se
|
2f0a67035a1c1f075e3f42c7dc5b9a089449fda1
|
ac7c02f29a837fdd67d2bdc77bba182080e98ed8
|
/codekata/countline.py
|
8de1a79a4ee40942ba94f47f40d9047ca87bfd01
|
[] |
no_license
|
YaminiNarayanan-359/guvi
|
7630c309a86365e4367fda1ddab4e966e7d1ac5b
|
a52b6353100b4e9b83a003e6a327fbfb174daac4
|
refs/heads/master
| 2020-06-03T00:08:00.389609
| 2019-07-16T06:59:53
| 2019-07-16T06:59:53
| 191,355,064
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 70
|
py
|
st=input().split()
c=1
for i in st:
if(i=='.'):
c+=1
print(c+1)
|
[
"noreply@github.com"
] |
YaminiNarayanan-359.noreply@github.com
|
86923e5515d4034637037ce44e2bb1e57b57d428
|
c212e6cb93fb2a97be21602bb55865d8da4c743c
|
/test.py
|
c8ab17d3d5deac0af34a8384cb47d7e290c178bc
|
[
"MIT"
] |
permissive
|
cycoe/PhotoCropper
|
90194c7589c81c642f1e1e81f3830614bc455045
|
803197461890a419655411c7bba68a50c811236a
|
refs/heads/master
| 2020-06-20T13:43:56.410453
| 2019-07-16T07:15:10
| 2019-07-16T07:15:10
| 197,140,511
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 595
|
py
|
import sys
from PyQt5.QtWidgets import QMainWindow, QApplication, QLabel
from PyQt5.QtGui import QPainter, QPen, QColor, QPixmap
from PyQt5.QtCore import QRectF
from modules.PhotoCropWidget import PhotoCropWidget
def main():
application = QApplication(sys.argv)
window = QMainWindow()
cropWidget = PhotoCropWidget()
cropWidget.setAutoFillBackground(True)
cropWidget.setScaledContents(True)
cropWidget.setPixmap(QPixmap('openSUSE.jpg'))
window.setCentralWidget(cropWidget)
window.show()
sys.exit(application.exec_())
if __name__ == "__main__":
main()
|
[
"cycoe@cycoe.cc"
] |
cycoe@cycoe.cc
|
a1e0784fc7f9fc956515af6e1e29a56ca0cc9802
|
828010b5d41e11bf973570ba6e70ed5d50ff11d1
|
/ros/src/tl_detector/light_classification/tl_classifier.py
|
4c4696bb586d716340e334d5d1f0825ae879f29a
|
[
"MIT"
] |
permissive
|
cipher982/CarND-Capstone
|
12840bd18da46bf076fa70e051a71c55573a0ee2
|
57c607be6f38377d4e2de280c55b41cc7bceb475
|
refs/heads/master
| 2022-12-04T20:38:13.467072
| 2021-03-29T21:00:37
| 2021-03-29T21:00:37
| 126,865,571
| 0
| 0
|
MIT
| 2022-11-22T07:37:45
| 2018-03-26T17:34:33
|
CMake
|
UTF-8
|
Python
| false
| false
| 1,724
|
py
|
from styx_msgs.msg import TrafficLight
import tensorflow as tf
import os
import numpy as np
import rospy
import time
from functools import partial
import cv2
from random import randint
from keras.models import load_model
DIR_PATH = os.path.dirname(os.path.realpath(__file__))
class TLClassifier(object):
def __init__(self):
# load the graph
self.model = load_model(DIR_PATH + '/sim-classifier-8.h5')
self.model._make_predict_function()
self.graph = tf.get_default_graph()
# current light default is unknown
self.light_state = TrafficLight.UNKNOWN
def get_classification(self, image):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
# prediction key
classification_tl_key = {0: TrafficLight.RED,
1: TrafficLight.YELLOW,
2: TrafficLight.GREEN,
4: TrafficLight.UNKNOWN}
resized = cv2.resize(image, (80,60))/255.
test_img = np.array([resized])
#rospy.logwarn("resized cam image, saving. . .")
timeNow = time.time()
#cv2.imwrite('home/student/Pictures/IMG_{0}.png'.format(timeNow),resized)
# run the prediction
with self.graph.as_default():
model_predict = self.model.predict(test_img)
if model_predict[0][np.argmax(model_predict[0])] > 0.5:
self.light_state = classification_tl_key[np.argmax(model_predict[0])]
return self.light_state
|
[
"David010@gmail.com"
] |
David010@gmail.com
|
417d6ce3366d6e4a391222a6e79120a32879863e
|
99d8895888ab093b06a3ba03594f2a74cb97758f
|
/Scripts/Python_HSE/WEEK5/homework/solution29.py
|
a9baf1df6c718721b2a1a9084fa949635e42fb16
|
[] |
no_license
|
AlekseyAbakumov/PythonLearn
|
4c35efeb0996f4a3221f228b57e30d595bb62637
|
fdd2cc9bdaa0fac3d582ddd5f2fbf9018218bda5
|
refs/heads/master
| 2023-03-23T20:12:42.603817
| 2021-03-16T18:22:48
| 2021-03-16T18:22:48
| 265,309,409
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 147
|
py
|
inputList = list(map(int, input().split()))
x = int(input())
position = 1
for i in inputList:
if i >= x:
position += 1
print(position)
|
[
"a-starostin@mail.ru"
] |
a-starostin@mail.ru
|
7856cc265933b86e74d14b023214f67266ca0dd8
|
1539f86f91ce0ee6150fba7363976d32cd37ece2
|
/codes_auto/1515.find-the-minimum-number-of-fibonacci-numbers-whose-sum-is-k.py
|
eb705e5d422dd5c4f6100ee291d790b102a1d947
|
[] |
no_license
|
zhpbo/LeetCode_By_Python
|
fdee0a8b7ea7ed1f61a99f0041e1c748e50f138c
|
0017b9db891d36789116f7299d32510a373e68da
|
refs/heads/master
| 2023-07-09T15:38:45.003002
| 2020-08-18T07:04:51
| 2020-08-18T07:04:51
| 281,598,190
| 0
| 0
| null | 2021-08-18T04:58:39
| 2020-07-22T06:47:05
| null |
UTF-8
|
Python
| false
| false
| 496
|
py
|
#
# @lc app=leetcode.cn id=1515 lang=python3
#
# [1515] find-the-minimum-number-of-fibonacci-numbers-whose-sum-is-k
#
class Solution:
def findMinFibonacciNumbers(self, k: int) -> int:
fib = [1, 1]
while fib[-1] <= k:
fib.append(fib[-1] + fib[-2])
ans = 0
while k > 0 and fib[-1] > 1:
if k >= fib[-1]:
ans += 1
k -= fib[-1]
else:
fib.pop()
return ans + k
# @lc code=end
|
[
"liuyang0001@outlook.com"
] |
liuyang0001@outlook.com
|
85312b3bf85afc36470393b4f7cb7f0d7506f189
|
e29faa10be00e8c839bf909922ace176b05826bb
|
/misc/05_implementing_different_layers.py
|
0307366f059cefe23d3e1f8960ef4b0fd039b903
|
[] |
no_license
|
weiyinfu/learnTensorflow
|
666e46259da435c19c06f3abbaa8a00ae37431ce
|
98cb7a978dd682ec8f651f9da57e2f23d47c21a4
|
refs/heads/master
| 2022-01-01T08:10:40.215554
| 2021-12-26T06:24:40
| 2021-12-26T06:24:40
| 145,659,381
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,821
|
py
|
# Implementing Different Layers
#---------------------------------------
#
# We will illustrate how to use different types
# of layers in TensorFlow
#
# The layers of interest are:
# (1) Convolutional Layer
# (2) Activation Layer
# (3) Max-Pool Layer
# (4) Fully Connected Layer
#
# We will generate two different data sets for this
# script, a 1-D data set (row of data) and
# a 2-D data set (similar to picture)
import tensorflow as tf
import matplotlib.pyplot as plt
import csv
import os
import random
import numpy as np
import random
from tensorflow.python.framework import ops
ops.reset_default_graph()
#---------------------------------------------------|
#-------------------1D-data-------------------------|
#---------------------------------------------------|
# Create graph session
ops.reset_default_graph()
sess = tf.Session()
# parameters for the run
data_size = 25
conv_size = 5
maxpool_size = 5
stride_size = 1
# ensure reproducibility
seed=13
np.random.seed(seed)
tf.set_random_seed(seed)
# Generate 1D data
data_1d = np.random.normal(size=data_size)
# Placeholder
x_input_1d = tf.placeholder(dtype=tf.float32, shape=[data_size])
#--------Convolution--------
def conv_layer_1d(input_1d, my_filter,stride):
# TensorFlow's 'conv2d()' function only works with 4D arrays:
# [batch#, width, height, channels], we have 1 batch, and
# width = 1, but height = the length of the input, and 1 channel.
# So next we create the 4D array by inserting dimension 1's.
input_2d = tf.expand_dims(input_1d, 0)
input_3d = tf.expand_dims(input_2d, 0)
input_4d = tf.expand_dims(input_3d, 3)
# Perform convolution with stride = 1, if we wanted to increase the stride,
# to say '2', then strides=[1,1,2,1]
convolution_output = tf.nn.conv2d(input_4d, filter=my_filter, strides=[1,1,stride,1], padding="VALID")
# Get rid of extra dimensions
conv_output_1d = tf.squeeze(convolution_output)
return(conv_output_1d)
# Create filter for convolution.
my_filter = tf.Variable(tf.random_normal(shape=[1,conv_size,1,1]))
# Create convolution layer
my_convolution_output = conv_layer_1d(x_input_1d, my_filter,stride=stride_size)
#--------Activation--------
def activation(input_1d):
return(tf.nn.relu(input_1d))
# Create activation layer
my_activation_output = activation(my_convolution_output)
#--------Max Pool--------
def max_pool(input_1d, width,stride):
# Just like 'conv2d()' above, max_pool() works with 4D arrays.
# [batch_size=1, width=1, height=num_input, channels=1]
input_2d = tf.expand_dims(input_1d, 0)
input_3d = tf.expand_dims(input_2d, 0)
input_4d = tf.expand_dims(input_3d, 3)
# Perform the max pooling with strides = [1,1,1,1]
# If we wanted to increase the stride on our data dimension, say by
# a factor of '2', we put strides = [1, 1, 2, 1]
# We will also need to specify the width of the max-window ('width')
pool_output = tf.nn.max_pool(input_4d, ksize=[1, 1, width, 1],
strides=[1, 1, stride, 1],
padding='VALID')
# Get rid of extra dimensions
pool_output_1d = tf.squeeze(pool_output)
return(pool_output_1d)
my_maxpool_output = max_pool(my_activation_output, width=maxpool_size,stride=stride_size)
#--------Fully Connected--------
def fully_connected(input_layer, num_outputs):
# First we find the needed shape of the multiplication weight matrix:
# The dimension will be (length of input) by (num_outputs)
weight_shape = tf.squeeze(tf.stack([tf.shape(input_layer),[num_outputs]]))
# Initialize such weight
weight = tf.random_normal(weight_shape, stddev=0.1)
# Initialize the bias
bias = tf.random_normal(shape=[num_outputs])
# Make the 1D input array into a 2D array for matrix multiplication
input_layer_2d = tf.expand_dims(input_layer, 0)
# Perform the matrix multiplication and add the bias
full_output = tf.add(tf.matmul(input_layer_2d, weight), bias)
# Get rid of extra dimensions
full_output_1d = tf.squeeze(full_output)
return(full_output_1d)
my_full_output = fully_connected(my_maxpool_output, 5)
# Run graph
# Initialize Variables
init = tf.global_variables_initializer()
sess.run(init)
feed_dict = {x_input_1d: data_1d}
print('>>>> 1D Data <<<<')
# Convolution Output
print('Input = array of length %d' % (x_input_1d.shape.as_list()[0]))
print('Convolution w/ filter, length = %d, stride size = %d, results in an array of length %d:' %
(conv_size,stride_size,my_convolution_output.shape.as_list()[0]))
print(sess.run(my_convolution_output, feed_dict=feed_dict))
# Activation Output
print('\nInput = above array of length %d' % (my_convolution_output.shape.as_list()[0]))
print('ReLU element wise returns an array of length %d:' % (my_activation_output.shape.as_list()[0]))
print(sess.run(my_activation_output, feed_dict=feed_dict))
# Max Pool Output
print('\nInput = above array of length %d' % (my_activation_output.shape.as_list()[0]))
print('MaxPool, window length = %d, stride size = %d, results in the array of length %d' %
(maxpool_size,stride_size,my_maxpool_output.shape.as_list()[0]))
print(sess.run(my_maxpool_output, feed_dict=feed_dict))
# Fully Connected Output
print('\nInput = above array of length %d' % (my_maxpool_output.shape.as_list()[0]))
print('Fully connected layer on all 4 rows with %d outputs:' %
(my_full_output.shape.as_list()[0]))
print(sess.run(my_full_output, feed_dict=feed_dict))
#---------------------------------------------------|
#-------------------2D-data-------------------------|
#---------------------------------------------------|
# Reset Graph
ops.reset_default_graph()
sess = tf.Session()
# parameters for the run
row_size = 10
col_size = 10
conv_size = 2
conv_stride_size = 2
maxpool_size = 2
maxpool_stride_size = 1
# ensure reproducibility
seed=13
np.random.seed(seed)
tf.set_random_seed(seed)
#Generate 2D data
data_size = [row_size,col_size]
data_2d = np.random.normal(size=data_size)
#--------Placeholder--------
x_input_2d = tf.placeholder(dtype=tf.float32, shape=data_size)
# Convolution
def conv_layer_2d(input_2d, my_filter,stride_size):
# TensorFlow's 'conv2d()' function only works with 4D arrays:
# [batch#, width, height, channels], we have 1 batch, and
# 1 channel, but we do have width AND height this time.
# So next we create the 4D array by inserting dimension 1's.
input_3d = tf.expand_dims(input_2d, 0)
input_4d = tf.expand_dims(input_3d, 3)
# Note the stride difference below!
convolution_output = tf.nn.conv2d(input_4d, filter=my_filter,
strides=[1,stride_size,stride_size,1], padding="VALID")
# Get rid of unnecessary dimensions
conv_output_2d = tf.squeeze(convolution_output)
return(conv_output_2d)
# Create Convolutional Filter
my_filter = tf.Variable(tf.random_normal(shape=[conv_size,conv_size,1,1]))
# Create Convolutional Layer
my_convolution_output = conv_layer_2d(x_input_2d, my_filter,stride_size=conv_stride_size)
#--------Activation--------
def activation(input_1d):
return(tf.nn.relu(input_1d))
# Create Activation Layer
my_activation_output = activation(my_convolution_output)
#--------Max Pool--------
def max_pool(input_2d, width, height,stride):
# Just like 'conv2d()' above, max_pool() works with 4D arrays.
# [batch_size=1, width=given, height=given, channels=1]
input_3d = tf.expand_dims(input_2d, 0)
input_4d = tf.expand_dims(input_3d, 3)
# Perform the max pooling with strides = [1,1,1,1]
# If we wanted to increase the stride on our data dimension, say by
# a factor of '2', we put strides = [1, 2, 2, 1]
pool_output = tf.nn.max_pool(input_4d, ksize=[1, height, width, 1],
strides=[1, stride, stride, 1],
padding='VALID')
# Get rid of unnecessary dimensions
pool_output_2d = tf.squeeze(pool_output)
return(pool_output_2d)
# Create Max-Pool Layer
my_maxpool_output = max_pool(my_activation_output,
width=maxpool_size, height=maxpool_size,stride=maxpool_stride_size)
#--------Fully Connected--------
def fully_connected(input_layer, num_outputs):
# In order to connect our whole W byH 2d array, we first flatten it out to
# a W times H 1D array.
flat_input = tf.reshape(input_layer, [-1])
# We then find out how long it is, and create an array for the shape of
# the multiplication weight = (WxH) by (num_outputs)
weight_shape = tf.squeeze(tf.stack([tf.shape(flat_input),[num_outputs]]))
# Initialize the weight
weight = tf.random_normal(weight_shape, stddev=0.1)
# Initialize the bias
bias = tf.random_normal(shape=[num_outputs])
# Now make the flat 1D array into a 2D array for multiplication
input_2d = tf.expand_dims(flat_input, 0)
# Multiply and add the bias
full_output = tf.add(tf.matmul(input_2d, weight), bias)
# Get rid of extra dimension
full_output_2d = tf.squeeze(full_output)
return(full_output_2d)
# Create Fully Connected Layer
my_full_output = fully_connected(my_maxpool_output, 5)
# Run graph
# Initialize Variables
init = tf.global_variables_initializer()
sess.run(init)
feed_dict = {x_input_2d: data_2d}
print('\n>>>> 2D Data <<<<')
# Convolution Output
print('Input = %s array' % (x_input_2d.shape.as_list()))
print('%s Convolution, stride size = [%d, %d] , results in the %s array' %
(my_filter.get_shape().as_list()[:2],conv_stride_size,conv_stride_size,my_convolution_output.shape.as_list()))
print(sess.run(my_convolution_output, feed_dict=feed_dict))
# Activation Output
print('\nInput = the above %s array' % (my_convolution_output.shape.as_list()))
print('ReLU element wise returns the %s array' % (my_activation_output.shape.as_list()))
print(sess.run(my_activation_output, feed_dict=feed_dict))
# Max Pool Output
print('\nInput = the above %s array' % (my_activation_output.shape.as_list()))
print('MaxPool, stride size = [%d, %d], results in %s array' %
(maxpool_stride_size,maxpool_stride_size,my_maxpool_output.shape.as_list()))
print(sess.run(my_maxpool_output, feed_dict=feed_dict))
# Fully Connected Output
print('\nInput = the above %s array' % (my_maxpool_output.shape.as_list()))
print('Fully connected layer on all %d rows results in %s outputs:' %
(my_maxpool_output.shape.as_list()[0],my_full_output.shape.as_list()[0]))
print(sess.run(my_full_output, feed_dict=feed_dict))
|
[
"weiyinfu.weiyinfu@bytedance.com"
] |
weiyinfu.weiyinfu@bytedance.com
|
3ff8152a850d6ffbc613fa30ea6cf48730ec12db
|
cc528121b8e56e6cac317f6b918ec52691f03d67
|
/app.py
|
f8761da1a61678f27b3cc47001ce33ccdde05c53
|
[] |
no_license
|
AhmosGUC/fyyur
|
313b3533964efa2efcb3d12ed6f2a516add3348a
|
d33ecf47cd020e9d0c4e2edb139b3d5795fda465
|
refs/heads/master
| 2023-02-15T22:40:35.700046
| 2021-01-14T12:24:11
| 2021-01-14T12:24:11
| 326,627,514
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 26,646
|
py
|
#----------------------------------------------------------------------------#
# Imports
#----------------------------------------------------------------------------#
import sys
import json
import dateutil.parser
import babel
from flask import Flask, render_template, request, Response, flash, redirect, url_for
from flask_moment import Moment
from flask_sqlalchemy import SQLAlchemy
import logging
from logging import Formatter, FileHandler
from flask_wtf import Form
from forms import *
from flask_migrate import Migrate
#----------------------------------------------------------------------------#
# App Config.
#----------------------------------------------------------------------------#
app = Flask(__name__)
moment = Moment(app)
app.config.from_object('config')
db = SQLAlchemy(app)
migrate = Migrate(app, db)
# TODODONE: connect to a local postgresql database
#----------------------------------------------------------------------------#
# Models.
#----------------------------------------------------------------------------#
# shows_table = db.Table('shows',
# db.Column('venue_id', db.Integer, db.ForeignKey(
# 'venues.id'), primary_key=True),
# db.Column('artist_id', db.Integer, db.ForeignKey(
# 'artists.id'), primary_key=True),
# db.Column('start_time', db.DateTime)
# )
class Show(db.Model):
__tablename__ = 'shows'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
venue_id = db.Column(db.Integer, db.ForeignKey(
'venues.id', ondelete="CASCADE"), nullable=False)
artist_id = db.Column(db.Integer, db.ForeignKey(
'artists.id', ondelete="CASCADE"), nullable=False)
start_time = db.Column(db.DateTime, nullable=False)
artist = db.relationship("Artist", back_populates="venues")
venue = db.relationship("Venue", back_populates="artists")
def __repr__(self):
return f'<Show {self.id},{self.venue_id}, {self.artist_id},{self.start_time}>'
class Venue(db.Model):
__tablename__ = 'venues'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, nullable=False)
city = db.Column(db.String(120), nullable=False)
state = db.Column(db.String(120), nullable=False)
address = db.Column(db.String(120), nullable=False)
phone = db.Column(db.String(120), nullable=False)
genres = db.Column(db.String(120))
image_link = db.Column(db.String(500))
facebook_link = db.Column(db.String(120))
website_link = db.Column(db.String(120))
seeking_talent = db.Column(db.Boolean, nullable=False)
seeking_description = db.Column(db.String(120))
artists = db.relationship(
"Show", back_populates="venue", cascade="all, delete")
def __repr__(self):
return f'<Venue {self.id}, {self.name},{self.state}>'
# TODODONE: implement any missing fields, as a database migration using Flask-Migrate
class Artist(db.Model):
__tablename__ = 'artists'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, nullable=False)
city = db.Column(db.String(120), nullable=False)
state = db.Column(db.String(120), nullable=False)
phone = db.Column(db.String(120), nullable=False)
genres = db.Column(db.String(120))
image_link = db.Column(db.String(500))
facebook_link = db.Column(db.String(120))
website_link = db.Column(db.String(120))
seeking_venue = db.Column(db.Boolean, nullable=False)
seeking_description = db.Column(db.String(120))
available_days = db.Column(db.Integer, nullable=False)
venues = db.relationship(
"Show", back_populates="artist", cascade="all, delete")
# TODODONE: implement any missing fields, as a database migration using Flask-Migrate
def __repr__(self):
return f'<Artist {self.id}, {self.name},{self.state}>'
# TODODONE Implement Show and Artist models, and complete all model relationships and properties, as a database migration.
#----------------------------------------------------------------------------#
# Filters.
#----------------------------------------------------------------------------#
def format_datetime(value, format='medium'):
value = str(value)
date = dateutil.parser.parse(value)
if format == 'full':
format = "EEEE MMMM, d, y 'at' h:mma"
elif format == 'medium':
format = "EE MM, dd, y h:mma"
return babel.dates.format_datetime(date, format)
app.jinja_env.filters['datetime'] = format_datetime
#----------------------------------------------------------------------------#
# misc func.
#----------------------------------------------------------------------------#
def days_to_num(lst):
days = 0
S = 1 << 6
Su = 1 << 5
M = 1 << 4
T = 1 << 3
W = 1 << 2
Th = 1 << 1
F = 1
for d in lst:
if d == "Saturday":
days = days | S
elif d == "Sunday":
days = days | Su
elif d == "Monday":
days = days | M
elif d == "Tuesday":
days = days | T
elif d == "Wednesday":
days = days | W
elif d == "Thursday":
days = days | Th
elif d == "Friday":
days = days | F
return days
def num_to_days(x):
# 0b 0 0 0 0 0 0 0 0
# S S M T W T F
# b = bin(x)
b = x
S = 1 << 6
Su = 1 << 5
M = 1 << 4
T = 1 << 3
W = 1 << 2
Th = 1 << 1
F = 1
days = []
if b & S > 0:
days.append("Saturday")
if b & Su > 0:
days.append("Sunday")
if b & M > 0:
days.append("Monday")
if b & T > 0:
days.append("Tuesday")
if b & W > 0:
days.append("Wednesday")
if b & Th > 0:
days.append("Thursday")
if b & F > 0:
days.append("Friday")
return days
# now = datetime.datetime.now()
# print(now.strftime("%A"))
# print(num_to_days(int('001100100', 2)))
#----------------------------------------------------------------------------#
# Controllers.
#----------------------------------------------------------------------------#
@app.route('/')
def index():
topNewVns = db.session.query(Venue.id, Venue.name).order_by(
Venue.id.desc()).limit(10).all()
topNewArts = db.session.query(Artist.id, Artist.name).order_by(
Artist.id.desc()).limit(10).all()
artists = []
venues = []
for v in topNewVns:
venues.append({"id": v.id, "name": v.name})
for a in topNewArts:
artists.append({"id": a.id, "name": a.name})
return render_template('pages/home.html', artists=artists, venues=venues)
# Venues
# ----------------------------------------------------------------
@app.route('/venues')
def venues():
# TODODONE: replace with real venues data.
# num_shows should be aggregated based on number of upcoming shows per venue.
# data = [{
# "city": "San Francisco",
# "state": "CA",
# "venues": [{
# "id": 1,
# "name": "The Musical Hop",
# "num_upcoming_shows": 0,
# }, {
# "id": 3,
# "name": "Park Square Live Music & Coffee",
# "num_upcoming_shows": 1,
# }]
# }, {
# "city": "New York",
# "state": "NY",
# "venues": [{
# "id": 2,
# "name": "The Dueling Pianos Bar",
# "num_upcoming_shows": 0,
# }]
# }]
stateNcity = db.session.query(Venue.state, Venue.city).distinct().all()
data = []
for pair in stateNcity:
elm = {"state": pair[0], "city": pair[1], "venues": []}
data.append(elm)
allVenues = Venue.query.all()
for elm in data:
for v in allVenues:
if elm["state"] == v.state and elm["city"] == v.city:
num_show = Show.query.filter(Show.venue_id == v.id).filter(
Show.start_time > datetime.now()).count()
print(num_show)
details = {"id": v.id, "name": v.name,
"num_upcoming_shows": num_show}
elm["venues"].append(details)
return render_template('pages/venues.html', areas=data)
@app.route('/venues/search', methods=['POST'])
def search_venues():
# TODODONE: implement search on artists with partial string search. Ensure it is case-insensitive.
# seach for Hop should return "The Musical Hop".
# search for "Music" should return "The Musical Hop" and "Park Square Live Music & Coffee"
search_term = request.form.get('search_term', '')
search_term = '%'+search_term+'%'
res = Venue.query.filter(Venue.name.ilike(search_term)).all()
data = []
for r in res:
tmp = {"id": r.id, "name": r.name}
data.append(tmp)
response = {
"count": len(res),
"data": data
}
return render_template('pages/search_venues.html', results=response, search_term=request.form.get('search_term', ''))
@app.route('/venues/<int:venue_id>')
def show_venue(venue_id):
# shows the venue page with the given venue_id
# TODODONE: replace with real venue data from the venues table, using venue_id
v = Venue.query.get(venue_id)
joinQry = db.session.query(Show, Artist).filter(Show.venue_id == v.id).join(
Artist, Show.artist_id == Artist.id).all()
past_shows = []
upcoming_shows = []
for a in joinQry:
tmpArtist = {
"artist_id": a[1].id,
"artist_name": a[1].name,
"artist_image_link": a[1].image_link,
"start_time": a[0].start_time
}
if tmpArtist["start_time"] < datetime.now():
past_shows.append(tmpArtist)
else:
upcoming_shows.append(tmpArtist)
data = {
"id": v.id,
"name": v.name,
"genres": v.genres.split(','),
"address": v.address,
"city": v.city,
"state": v.state,
"phone": v.phone,
"website": v.website_link,
"facebook_link": v.facebook_link,
"seeking_talent": v.seeking_talent,
"seeking_description": v.seeking_description,
"image_link": v.image_link,
"past_shows": past_shows,
"upcoming_shows": upcoming_shows,
"past_shows_count": len(past_shows),
"upcoming_shows_count": len(upcoming_shows),
}
return render_template('pages/show_venue.html', venue=data)
# Create Venue
# ----------------------------------------------------------------
@app.route('/venues/create', methods=['GET'])
def create_venue_form():
form = VenueForm()
return render_template('forms/new_venue.html', form=form)
@app.route('/venues/create', methods=['POST'])
def create_venue_submission():
# TODODONE: insert form data as a new Venue record in the db, instead
# TODODONE: modify data to be the data object returned from db insertion
error = False
try:
f = request.form
vf = VenueForm(f)
if vf.validate_on_submit():
newVenue = Venue()
newVenue.name = f.get('name')
newVenue.state = f.get('state')
newVenue.city = f.get('city')
newVenue.address = f.get('address')
newVenue.phone = f.get('phone')
newVenue.genres = ','.join(request.form.getlist("genres"))
newVenue.seeking_talent = True if f.get(
'seeking_talent', 'y') == 'y' else False
newVenue.seeking_description = f.get('seeking_description', '')
newVenue.facebook_link = f.get('facebook_link', None)
newVenue.website_link = f.get('website_link', None)
newVenue.image_link = f.get('image_link', None)
db.session.add(newVenue)
db.session.commit()
else:
for e in vf:
print(e.errors)
return render_template('forms/new_venue.html', form=vf)
except:
db.session.rollback()
finally:
db.session.close()
if error:
# TODODONE: on unsuccessful db insert, flash an error instead.
flash('An error occurred. Venue ' +
request.form['name'] + ' could not be listed.')
else:
# on successful db insert, flash success
flash('Venue ' + request.form['name'] + ' was successfully listed!')
# e.g., flash('An error occurred. Venue ' + data.name + ' could not be listed.')
# see: http://flask.pocoo.org/docs/1.0/patterns/flashing/
return render_template('pages/home.html')
@app.route('/venues/<venue_id>', methods=['DELETE'])
def delete_venue(venue_id):
# TODODONE: Complete this endpoint for taking a venue_id, and using
# SQLAlchemy ORM to delete a record. Handle cases where the session commit could fail.
try:
Venue.query.filter_by(id=venue_id).delete()
db.session.commit()
except:
db.session.rollback()
print(sys.exc_info())
finally:
db.session.close()
# BONUS CHALLENGE: Implement a button to delete a Venue on a Venue Page, have it so that
# clicking that button delete it from the db then redirect the user to the homepage
return redirect(url_for('index'))
# Artists
# ----------------------------------------------------------------
@app.route('/artists')
def artists():
# TODODONE: replace with real data returned from querying the database
allArtist = db.session.query(Artist.id, Artist.name).all()
data = allArtist
# data = [{
# "id": 4,
# "name": "Guns N Petals",
# }, {
# "id": 5,
# "name": "Matt Quevedo",
# }, {
# "id": 6,
# "name": "The Wild Sax Band",
# }]
return render_template('pages/artists.html', artists=data)
@app.route('/artists/search', methods=['POST'])
def search_artists():
# TODODONE: implement search on artists with partial string search. Ensure it is case-insensitive.
# seach for "A" should return "Guns N Petals", "Matt Quevado", and "The Wild Sax Band".
# search for "band" should return "The Wild Sax Band".
search_term = request.form.get('search_term', '')
search_term = '%'+search_term+'%'
res = Artist.query.filter(Artist.name.ilike(search_term)).all()
data = []
for r in res:
tmp = {"id": r.id, "name": r.name}
data.append(tmp)
response = {
"count": len(res),
"data": data
}
response = {
"count": len(res),
"data": data
}
return render_template('pages/search_artists.html', results=response, search_term=request.form.get('search_term', ''))
@app.route('/artists/<int:artist_id>')
def show_artist(artist_id):
# shows the venue page with the given venue_id
# TODODONE: replace with real venue data from the venues table, using venue_id
a = Artist.query.get(artist_id)
joinQry = db.session.query(Show, Venue).filter(Show.artist_id == a.id).join(
Venue, Show.venue_id == Venue.id).all()
past_shows = []
upcoming_shows = []
for v in joinQry:
tmpVenue = {
"venue_id": v[1].id,
"venue_name": v[1].name,
"venue_image_link": v[1].image_link,
"start_time": v[0].start_time
}
if tmpVenue["start_time"] < datetime.now():
past_shows.append(tmpVenue)
else:
upcoming_shows.append(tmpVenue)
data = {
"id": a.id,
"name": a.name,
"genres": a.genres.split(','),
"available_days": num_to_days(a.available_days),
"city": a.city,
"state": a.state,
"phone": a.phone,
"website": a.website_link,
"facebook_link": a.facebook_link,
"seeking_venue": a.seeking_venue,
"seeking_description": a.seeking_description,
"image_link": a.image_link,
"past_shows": past_shows,
"upcoming_shows": upcoming_shows,
"past_shows_count": len(past_shows),
"upcoming_shows_count": len(upcoming_shows),
}
return render_template('pages/show_artist.html', artist=data)
# Update
# ----------------------------------------------------------------
@app.route('/artists/<int:artist_id>/edit', methods=['GET'])
def edit_artist(artist_id):
a = Artist.query.get(artist_id)
artist = {
"id": a.id,
"name": a.name,
"genres": a.genres.split(','),
"available_days": num_to_days(a.available_days),
"city": a.city,
"state": a.state,
"phone": a.phone,
"website_link": a.website_link,
"facebook_link": a.facebook_link,
"seeking_venue": a.seeking_venue,
"seeking_description": a.seeking_description,
"image_link": a.image_link
}
form = ArtistForm(data=artist)
# TODODONE: populate form with fields from artist with ID <artist_id>
return render_template('forms/edit_artist.html', form=form, artist=artist)
@app.route('/artists/<int:artist_id>/edit', methods=['POST'])
def edit_artist_submission(artist_id):
# TODODONE: take values from the form submitted, and update existing
# artist record with ID <artist_id> using the new attributes
try:
f = request.form
a = Artist.query.get(artist_id)
af = ArtistForm(f)
if af.validate_on_submit():
a.name = f.get('name')
a.city = f.get('city')
a.state = f.get('state')
a.phone = f.get('phone')
a.genres = ','.join(request.form.getlist("genres"))
a.available_days = days_to_num(
request.form.getlist("available_days"))
a.seeking_venue = True if f.get(
'seeking_venue', 'n') == 'y' else False
a.seeking_description = f.get('seeking_description', "")
a.facebook_link = f.get('facebook_link', None)
a.website_link = f.get('website_link', None)
a.image_link = f.get('image_link', None)
db.session.commit()
else:
return render_template('forms/edit_artist.html', form=af, artist=a)
except:
db.session.rollback()
finally:
db.session.close()
return redirect(url_for('show_artist', artist_id=artist_id))
@app.route('/venues/<int:venue_id>/edit', methods=['GET'])
def edit_venue(venue_id):
v = Venue.query.get(venue_id)
venue = {
"id": v.id,
"name": v.name,
"genres": v.genres.split(','),
"city": v.city,
"state": v.state,
"address": v.address,
"phone": v.phone,
"website_link": v.website_link,
"facebook_link": v.facebook_link,
"seeking_talent": v.seeking_talent,
"seeking_description": v.seeking_description,
"image_link": v.image_link
}
form = VenueForm(data=venue)
# TODODONE: populate form with values from venue with ID <venue_id>
return render_template('forms/edit_venue.html', form=form, venue=venue)
@app.route('/venues/<int:venue_id>/edit', methods=['POST'])
def edit_venue_submission(venue_id):
# TODODONE: take values from the form submitted, and update existing
# venue record with ID <venue_id> using the new attributes
try:
f = request.form
vf = VenueForm(f)
v = Venue.query.get(venue_id)
if vf.validate_on_submit():
v.name = f.get('name')
v.city = f.get('city')
v.state = f.get('state')
v.phone = f.get('phone')
v.address = f.get('address')
v.genres = ','.join(request.form.getlist("genres"))
v.seeking_talent = True if f.get(
'seeking_talent', 'n') == 'y' else False
v.seeking_description = f.get('seeking_description', "")
v.facebook_link = f.get('facebook_link', None)
v.website_link = f.get('website_link', None)
v.image_link = f.get('image_link', None)
db.session.commit()
else:
return render_template('forms/edit_venue.html', form=vf, venue=v)
except:
print(sys.exc_info())
db.session.rollback()
finally:
db.session.close()
return redirect(url_for('show_venue', venue_id=venue_id))
# Create Artist
# ----------------------------------------------------------------
@app.route('/artists/create', methods=['GET'])
def create_artist_form():
form = ArtistForm()
return render_template('forms/new_artist.html', form=form)
@app.route('/artists/create', methods=['POST'])
def create_artist_submission():
# called upon submitting the new artist listing form
# TODODONE: insert form data as a new Venue record in the db, instead
# TODODONE: modify data to be the data object returned from db insertion
error = False
try:
f = request.form
af = ArtistForm(f)
if af.validate_on_submit():
newArtist = Artist()
newArtist.name = f.get('name')
newArtist.city = f.get('city')
newArtist.state = f.get('state')
newArtist.phone = f.get('phone')
newArtist.genres = ','.join(request.form.getlist("genres"))
newArtist.available_days = days_to_num(
request.form.getlist("available_days"))
newArtist.seeking_venue = True if f.get(
'seeking_venue', 'n') == 'y' else False
newArtist.seeking_description = f.get('seeking_description', "")
newArtist.facebook_link = f.get('facebook_link', None)
newArtist.website_link = f.get('website_link', None)
newArtist.image_link = f.get('image_link', None)
db.session.add(newArtist)
db.session.commit()
else:
return render_template('forms/new_artist.html', form=af)
except:
db.session.rollback()
error = True
finally:
db.session.close()
if error:
flash('An error occurred. Artist ' +
request.form['name'] + ' could not be listed.')
else:
# on successful db insert, flash success
flash('Artist ' + request.form['name'] + ' was successfully listed!')
# TODODONE: on unsuccessful db insert, flash an error instead.
# e.g., flash('An error occurred. Artist ' + data.name + ' could not be listed.')
return render_template('pages/home.html')
# Shows
# ----------------------------------------------------------------
@app.route('/shows')
def shows():
# displays list of shows at /shows
# TODODONE: replace with real venues data.
# num_shows should be aggregated based on number of upcoming shows per venue.
joinQry = db.session.query(Show, Venue, Artist).join(
Venue, Show.venue_id == Venue.id).join(
Artist, Show.artist_id == Artist.id).order_by(Show.start_time).all()
past_shows = []
next_shows = []
for s in joinQry:
tmpShow = {
"venue_id": s[1].id,
"venue_name": s[1].name,
"artist_id": s[2].id,
"artist_name": s[2].name,
"artist_image_link": s[2].image_link,
"start_time": s[0].start_time
}
if(tmpShow["start_time"] > datetime.now()):
next_shows.append(tmpShow)
else:
past_shows.append(tmpShow)
return render_template('pages/shows.html', past_shows=past_shows, next_shows=next_shows)
@app.route('/shows/create')
def create_shows():
# renders form. do not touch.
form = ShowForm()
return render_template('forms/new_show.html', form=form, error=[])
@app.route('/shows/create', methods=['POST'])
def create_show_submission():
# called to create new shows in the db, upon submitting new show listing form
# TODODONE: insert form data as a new Show record in the db, instead
error = False
errors = []
try:
f = request.form
sf = ShowForm(f)
if sf.validate_on_submit():
artist_id = f.get("artist_id")
venue_id = f.get("venue_id")
a = Artist.query.get(artist_id)
v = Venue.query.get(venue_id)
start_time = f.get('start_time')
x = datetime.strptime(start_time, '%Y-%m-%d %H:%M:%S')
if a is None:
errors.append('No artist with this id')
if v is None:
errors.append('No venue with this id')
if a:
day = x.strftime("%A")
days = num_to_days(a.available_days)
if not day in days:
errors.append('Artist is not available this day')
if len(errors) == 0:
newShow = Show(start_time=start_time)
newShow.artist = a
newShow.venue = v
db.session.add(newShow)
db.session.commit()
# on successful db insert, flash success
flash('Show was successfully listed!')
else:
error = True
else:
error = True
for e in sf:
for err in e.errors:
errors.append(err)
if error:
print(errors)
return render_template('forms/new_show.html', form=sf, all_errors=errors)
except:
error = True
print(sys.exc_info())
db.session.rollback()
finally:
db.session.close()
if error:
flash('Internal Server Error try again later!')
# TODODONE: on unsuccessful db insert, flash an error instead.
# e.g., flash('An error occurred. Show could not be listed.')
# see: http://flask.pocoo.org/docs/1.0/patterns/flashing/
return render_template('pages/home.html')
@app.errorhandler(404)
def not_found_error(error):
return render_template('errors/404.html'), 404
@app.errorhandler(500)
def server_error(error):
return render_template('errors/500.html'), 500
if not app.debug:
file_handler = FileHandler('error.log')
file_handler.setFormatter(
Formatter(
'%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')
)
app.logger.setLevel(logging.INFO)
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.info('errors')
#----------------------------------------------------------------------------#
# Launch.
#----------------------------------------------------------------------------#
# Default port:
if __name__ == '__main__':
app.run()
# Or specify port manually:
'''
if __name__ == '__main__':
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
'''
|
[
"hesham.ahmos.y@gmail.com"
] |
hesham.ahmos.y@gmail.com
|
4cc8dcf83aa34c2bb350263fc43c9198dcd14932
|
b4df53f3156423fb5f5f371d674f2d03c8d39cd8
|
/HW03/distortion-removal.py
|
4c8536053eb388845df75772520ce24f0da38ab9
|
[] |
no_license
|
lifangda01/ECE661-ComputerVision
|
c3d7dd2845c00e47ca4237941fbda02733903136
|
ce7a2bd677c8b79646adcdfc3d5292c78d8a9393
|
refs/heads/master
| 2020-09-24T03:36:03.027085
| 2017-01-23T22:04:28
| 2017-01-23T22:04:28
| 67,304,969
| 7
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,788
|
py
|
#!/usr/bin/python
import numpy as np
import cv2
import matplotlib.pyplot as plt
from matplotlib.image import AxesImage
RESIZE_RATIO = 1.0
BACKGROUND_COLOR = (0, 0, 0)
# line(1,2) is parallel to line(3,4)
IMAGE_HC_A = [(190,120), (69,540), (236,108), (120,546)]
IMAGE_HC_A_1 = [(287,229), (280,250), (332,220), (326,255)]
FLATIRON_PAIRS = [(157,98), (136,165), (157,98), (185,117), (190,120), (69,540), (190,120), (236,108),
(332,220), (287,229), (332,220), (326,255), (391,414), (302,416), (391,414), (408,312),
(354,364), (322,320), (354,364), (302,416)]
def resize_image_by_ratio(image, ratio):
'''
Resize an image by a given ratio, used for faster debug.
'''
return cv2.resize(image, (int(image.shape[1]*ratio),int(image.shape[0]*ratio)))
def get_bounding_box_after_transformation(image, H):
'''
Given an image and the transformation matrix to be applied,
calculate the bounding box of the image after transformation.
@image: image to transform
@H: transformation matrix to be applied
@return: (num_row, num_col, off_row, off_col)
'''
(h, w, c) = image.shape
corners_1 = [(0,0), (0,w), (h,0), (h,w)]
corners_2_row = []
corners_2_col = []
for corner in corners_1:
(r,c) = corner
p_1 = np.array([[r,c,1]])
(r_2, c_2, z_2) = H * p_1.T
corners_2_row.append( int(r_2 / z_2) )
corners_2_col.append( int(c_2 / z_2) )
return (max(corners_2_row)-min(corners_2_row)+1, max(corners_2_col)-min(corners_2_col)+1,
min(corners_2_row), min(corners_2_col))
def get_line(p1, p2):
p1 = np.array([p1[0], p1[1], 1.])
p2 = np.array([p2[0], p2[1], 1.])
return np.cross(p1, p2)
def get_lines(points):
lines = []
for i in range(len(points)/2):
lines.append( get_line(points[2*i], points[2*i+1]) )
return lines
def get_pixel_by_nearest_neighbor(image, row_f, col_f):
'''
Get the pixel value based on float row and column numbers.
@image: image to be find pixels in
@row_f, col_f: float row and column numbers
@return: pixel value from image
'''
row = int(round(row_f))
col = int(round(col_f))
return image[row][col]
def get_projective_transformation_matrix(line1, line2, line3, line4):
'''
Given two pairs of lines that are supposed to be parallel in world plane,
find the homography matrix.
If successful, return a 3x3 numpy matrix that can be used to remove projective distortion.
@line1..4: line 1,2 and line 3,4 should be parallel in world plane,
each line is a tuple of its HC representation
@return: 3x3 numpy matrix H
'''
# Calculate the two vanishing points first
vp1 = np.cross(line1, line2)
vp2 = np.cross(line3, line4)
# Calculate the vanishing line
vl = np.cross(vp1, vp2)
vl = vl / vl[2]
H = np.matrix(np.identity(3))
H[2] = vl
return H
def get_affine_transformation_matrix(lines):
'''
Given two pairs of lines that are supposed to be orthogonal in world plane,
find the homography matrix.
If successful, return a 3x3 numpy matrix that can be used to remove affine distortion.
@line1..4: line 1,2 and line 3,4 should be orthogonal in world plane,
each line is a tuple of its HC representation
@return: 3x3 numpy matrix H
'''
# Get S first
# Ms = c --> s = inv(M)c
M = np.matrix( np.zeros((2,2)) )
c = np.matrix( np.zeros((2,1)) )
line1 = lines[0]
line2 = lines[1]
line3 = lines[2]
line4 = lines[3]
M[0] = [line1[0]*line2[0], line1[0]*line2[0] + line1[1]*line2[0]]
M[1] = [line3[0]*line4[0], line3[0]*line4[0] + line3[1]*line4[0]]
c[0] = -line1[1]*line2[1]
c[1] = -line3[1]*line4[1]
try:
s = M.I * c
except np.linalg.LinAlgError:
print "ERROR: M is singular!"
return None
s = np.asarray(s).reshape(-1)
S = np.array([[s[0], s[1]],
[s[1], 1. ]])
# Perform singular value decomposition
# s, U = np.linalg.eig(S)
# D = np.sqrt(np.abs(np.diag(s)))
U, s, V = np.linalg.svd(S)
D = np.sqrt(np.diag(s))
H = np.matrix( np.zeros((3,3)) )
# np.dot is equivalent to matrix multiplication for 2D arrays
H[:2, :2] = np.dot(np.dot(U, D), U.T)
H[:2, :2] = H[:2, :2] / H[1,1]
H[2,2] = 1.
return H
def get_projective_and_affine_transformation_matrix(lines):
'''
Given five pairs of lines that are supposed to be orthogonal in world plane,
find the homography matrix that eliminates both projective and affine distortion.
@lines: list of 10 lines forming five orthogonal pairs
@return: 3x3 numpy matrix H
'''
# We need at least five pairs of lines
if len(lines) < 10:
print "ERROR: not enough orthogonal line pairs!"
return None
# Mc = d --> c = inv(M)d
M = np.matrix( np.zeros((5,5)) )
d = np.matrix( np.zeros((5,1)) )
# print lines
for i in range( 5 ):
l = lines[2*i]
m = lines[2*i+1]
M[i] = [l[0]*m[0], (l[0]*m[1]+l[1]*m[0])/2., l[1]*m[1], (l[0]*m[2]+l[2]*m[0])/2., (l[1]*m[2]+l[2]*m[1])/2.]
d[i] = [-l[2]*m[2]]
try:
c = M.I * d
except np.linalg.LinAlgError:
print "ERROR: M is singular!"
return None
c = np.asarray(c).reshape(-1)
C_p = np.matrix([[c[0] , c[1]/2., c[3]/2.],
[c[1]/2., c[2] , c[4]/2.],
[c[3]/2., c[4]/2., 1.]])
# Similar to the previous step
print C_p
S = C_p[:2, :2]
# S = S / S[1,1]
U, s, V = np.linalg.svd(S)
D = np.sqrt(np.diag(s))
# np.dot is equivalent to matrix multiplication for 2D arrays
A = np.dot(np.dot(U, D), U.T)
v = np.dot(A.I, C_p[:2,2])
H = np.matrix( np.zeros((3,3)) )
H[:2,:2] = A
# H[:2, :2] = H[:2, :2] / H[1,1]
H[2,:2] = v.T
H[2,2] = 1.
return H
def get_affine_transformation_matrix_from_points(points):
return get_affine_transformation_matrix([
get_line(points[0], points[1]), get_line(points[0], points[2]),
get_line(points[3], points[4]), get_line(points[3], points[5]) ])
def get_projective_transformation_matrix_from_points(points):
return get_projective_transformation_matrix( get_lines(points) )
def get_projective_and_affine_transformation_matrix_from_points(points):
return get_projective_and_affine_transformation_matrix( get_lines[points] )
def apply_transformation_on_image(image, H):
'''
Given a transformation matrix, apply it to the input image to obtain a transformed image.
@image: np.ndarray of input image
@H: the tranformation matrix to be applied
@return: np.ndarray of the transformed image
'''
# First determine the size of the transformed image
(num_row, num_col, off_row, off_col) = get_bounding_box_after_transformation(image, H)
print "New image size:", (num_row, num_col)
try:
trans_img = np.ndarray( (num_row, num_col, image.shape[2]) )
except IndexError:
trans_img = np.ndarray( (num_row, num_col, 1) )
H_inv = H.I
for r in range(trans_img.shape[0]):
for c in range(trans_img.shape[1]):
p_2 = np.array([[r+off_row,c+off_col,1]])
(r_1, c_1, z_1) = H_inv * p_2.T
r_1 = r_1 / z_1
c_1 = c_1 / z_1
if 0 <= r_1 < image.shape[0]-1 and 0 <= c_1 < image.shape[1]-1:
trans_img[r][c] = get_pixel_by_nearest_neighbor(image, r_1, c_1)
else:
trans_img[r][c] = BACKGROUND_COLOR
return trans_img
def apply_transformation_on_points(points, H):
'''
Apply the given transformation matrix on all points from input.
@coords: list of input points, each is represented by (row,col)
@return: list of points after transformation, each is represented by (row,col)
'''
l = []
for point in points:
p = np.array([point[0], point[1], 1.])
p = np.asarray(np.dot(H, p)).reshape(-1)
p = p / p[-1]
l.append( (p[0], p[1]) )
return l
def task1():
'''
Use two-step method to remove affine distortion
'''
image_img_a = cv2.imread('images/flatiron_1.jpg')
image_img_a = resize_image_by_ratio(image_img_a, RESIZE_RATIO)
image_hc_a = [( int(x*RESIZE_RATIO), int(y*RESIZE_RATIO) ) for (x,y) in IMAGE_HC_A]
image_hc_a_1 = [( int(x*RESIZE_RATIO), int(y*RESIZE_RATIO) ) for (x,y) in IMAGE_HC_A_1]
H_ap = get_projective_transformation_matrix_from_points(image_hc_a)
trans_img = apply_transformation_on_image(image_img_a, H_ap)
cv2.imwrite('images/task1_flatiron_per.jpg', trans_img)
image_hc_a = apply_transformation_on_points(image_hc_a, H_ap)
image_hc_a_1 = apply_transformation_on_points(image_hc_a_1, H_ap)
H_aa = get_affine_transformation_matrix_from_points(image_hc_a[:3] + image_hc_a_1[:3])
trans_img = apply_transformation_on_image(trans_img, H_aa.I)
cv2.imwrite('images/task1_flatiron_affine.jpg', trans_img)
def task2():
'''
Use one-step method to remove affine distortion
'''
image = cv2.imread('images/flatiron_1.jpg')
image = resize_image_by_ratio(image, RESIZE_RATIO)
flatiron_pairs = [( int(x*RESIZE_RATIO), int(y*RESIZE_RATIO) ) for (x,y) in FLATIRON_PAIRS]
H_pa = get_projective_and_affine_transformation_matrix_from_points(flatiron_pairs)
trans_img = apply_transformation_on_image(image, H_pa)
cv2.imwrite('images/task2_flatiron.jpg', trans_img)
def main():
task1()
task2()
if __name__ == '__main__':
main()
|
[
"lifangda02@gmail.com"
] |
lifangda02@gmail.com
|
9187ea2881390a09c88fcc726707525e4c45254f
|
9f7848ee3bd64e5b831e893d52f69912e540d6e4
|
/Snippet Search/bargrapg.py
|
001f7a0a87dcdd5d639d623406cacc52c8337559
|
[
"MIT"
] |
permissive
|
Ufatima/Twitter-Event-detection
|
e7f9f93d566b7fc2b161cf1ffd39f293fc0eb104
|
86c3ec5f6db9cedaaf1945938deca465ab818c57
|
refs/heads/master
| 2021-03-24T09:47:01.532986
| 2019-12-26T22:01:59
| 2019-12-26T22:01:59
| 119,052,578
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 431
|
py
|
import matplotlib.pyplot as plt
normalization_score_list = [0.66,0.33,0.66,0.66,0.33]
no_snippets_list = [10,6,6,5,3]
wup_similarity = [0.8571428571428571, 0.5238095238095238, 0.8571428571428571,0.8571428571428571,0.7321428571428571]
plt.scatter(no_snippets_list,normalization_score_list)
plt.title('Relationship Between Temperature and Iced Coffee Sales')
plt.xlabel('snippets number')
plt.ylabel('normalization score')
plt.show()
|
[
"hoory1020@gmail.com"
] |
hoory1020@gmail.com
|
9dee51aa3f19139230bc9821668cd83ef899a51c
|
cc4d19b685d77f9cdfb2a34492dd5ef84f0ab49e
|
/videosharing/wsgi.py
|
9bd71e9654384f98e7d09a7b5182f907b94a8634
|
[] |
no_license
|
Zoli1212/videosharing
|
1eb424525b97286650e771f1b5e13b515d716fb2
|
7e1b2504f0faeab953ac4a48379af464dba35f57
|
refs/heads/master
| 2023-01-03T01:17:47.226223
| 2020-10-30T17:10:40
| 2020-10-30T17:10:40
| 308,698,489
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 401
|
py
|
"""
WSGI config for videosharing project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'videosharing.settings')
application = get_wsgi_application()
|
[
"43321960+Zoli1212@users.noreply.github.com"
] |
43321960+Zoli1212@users.noreply.github.com
|
7498fe6a8b6b6c43e6405131fea035d35a6742fa
|
f90a65159a2dcd6bb39479a9f38f45690f801f82
|
/TheoInf/Exercises/Sheet06/Task1/main.py
|
5f939cfafc992c1a00f3eba237fef9daabacc226
|
[] |
no_license
|
BUCKFAE/Semester06
|
a0471fd4edec2f8e21ff3fcbb5572a53c6b7915f
|
ef9098d2f534d14815cadd854971d91b3a196833
|
refs/heads/master
| 2023-06-24T05:06:49.400571
| 2021-07-20T08:47:16
| 2021-07-20T08:47:16
| 359,149,507
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,024
|
py
|
def is_prime(x):
return False if x < 2 else all([x % i != 0 for i in range(2, x - 1)])
def check(n):
for p in range(2, n):
# Skipping if p is not a prime
if not is_prime(p):
continue
for q in range(2, n):
# Skipping if q is not a prime
if not is_prime(q):
continue
i = 1
j = 1
# Increasing i and j until the result gets to big
while (p ** i) * (q **j) <= n:
# Checking if we found a valid solution
if (p ** i) * (q ** j) == n:
return (p, q, i, j)
i += 1
# Ensuring we test every valid combination of i and j
if j == i:
j += 1
i = 1
return None
# Valdiating results
for i in range(50):
res = check(i)
sol = (res[0] ** res[2]) * (res[1] ** res[3]) if not res is None else ""
print(f"{i}: {res}: {sol}")
|
[
"informatik.scubert@gmail.com"
] |
informatik.scubert@gmail.com
|
70133f9d2aa4f9ddaf403ec7b297c7a03651b038
|
af02f85c4c18c864d8b5f7bcfce61da871d134bd
|
/python/dataRetriever/ckan1.py
|
6401f6615d8d44102173702a18906184a94cdfb5
|
[] |
no_license
|
akpatankar/info1
|
24341b968435b3e4f601a4c2d67b994499d16ee7
|
65d1072c115a27b7e226d7ab3a0128a38d6400e8
|
refs/heads/master
| 2021-03-24T10:20:57.268410
| 2014-09-10T04:50:00
| 2014-09-10T04:50:00
| 22,318,441
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,003
|
py
|
#!/usr/bin/env python
import urllib2
import urllib
import json
import pprint
import re
import StringIO
# Use the json module to dump a dictionary to a string for posting.
data_string = urllib.quote(json.dumps({'id': 'data-explorer'}))
# Make the HTTP request.
response = urllib2.urlopen('http://catalog.data.gov/api/3/action/package_list',
data_string)
assert response.code == 200
# Use the json module to load CKAN's response into a dictionary.
response_dict = json.loads(response.read())
# Check the contents of the response.
assert response_dict['success'] is True
#pprint.pprint(response_dict)
result = response_dict['result']
pprint.pprint(result)
#url1 = result[0]["url"]
#url2 = response_dict["url"]
response = urllib2.urlopen('http://catalog.data.gov/api/3/action/package_list?rows=25',
data_string)
assert response.code == 200
response_dict2 = json.loads(response.read())
print response_dict2['result']['results'][24]['resources']
#print re.search("http", result).group()
|
[
"a_k_patankar@yahoo.com"
] |
a_k_patankar@yahoo.com
|
ad15cca3830c540ce40efe51b48e661a584727a3
|
d183806b214af3bf83cd97dd6b5a1768197fdc29
|
/Conversely_Frontend/Server.py
|
fcedf32ccc7a0beb259db23a9d54bcafe7e833ee
|
[
"MIT"
] |
permissive
|
sam-aldis/Conversely
|
ea09f1a8bb2d7f5c58dc706e1544a52a864fdc62
|
1fc30d6b768cc03f727229a52e0879fac3af1e3a
|
refs/heads/master
| 2021-01-21T20:39:28.631318
| 2020-03-09T04:17:49
| 2020-03-09T04:17:49
| 92,267,805
| 0
| 1
| null | 2017-05-24T23:47:33
| 2017-05-24T08:17:55
|
Python
|
UTF-8
|
Python
| false
| false
| 4,257
|
py
|
#!/usr/bin/python
import tornado.web
import tornado.ioloop
import os
import ukjp.config
import ukjp.fbactions
import ukjp.database
import thread
import json
import hashlib
class UkJPRequestHandler(tornado.web.RequestHandler):
def set_default_headers(self):
self.add_header("Server","UKJP-Server")
class ApiHandler(UkJPRequestHandler):
actions = {
"300" : "SET",
"200" : "GET",
"100" : "INIT"
}
def handleApi(self):
try:
postType = self.get_argument("type")
atype = self.actions[postType]
#TYPE IS SET
if atype == "SET":
action = self.get_argument("action")
#SET ACCESS TOKEN
if action == "accessToken":
details = json.JSONDecoder().decode(self.get_argument("details"))
accessToken = self.get_argument("accessToken")
self.set_secure_cookie("UKJP_FB_COOKIE",accessToken)
db = ukjp.database.Database(accessToken=accessToken)
userId = db.getUserId()
session_id = hashlib.md5(userId).hexdigest()
self.set_secure_cookie("UKJP_SESSION",session_id)
details['session'] = session_id
try:
db.addUser(details)
self.write("200")
except ukjp.database.DBException as e:
self.write(e.message)
#SET PAGEID
if action == "pageid":
pageid = self.get_argument("pageid")
accessToken = self.get_secure_cookie("UKJP_FB_COOKIE")
db = ukjp.database.Database(accessToken=accessToken)
try:
db.selectPage(pageid)
self.write("200")
except ukjp.database.DBException as e:
self.write(e.message)
#TYPE IS GET
if atype == "GET":
action = self.get_argument("action")
#GET USER PAGE DETAILS
if action == "userPage":
accessToken = self.get_secure_cookie("UKJP_FB_COOKIE")
session_id = self.get_secure_cookie("UKJP_SESSION")
print(accessToken)
db = ukjp.database.Database(accessToken=accessToken)
pageDetails = db.getPageDetails(session_id)
self.set_header("Content-Type","application/json")
self.write(pageDetails)
except Exception as e:
print(e)
self.finish()
def get(self):
self.handleApi()
def post(self):
self.handleApi()
class IndexHandler(UkJPRequestHandler):
def get(self):
try:
cookie = self.get_secure_cookie("UKJP_FB_COOKIE")
if cookie != None:
print(cookie)
self.redirect("/app")
else:
self.render("./pages/index.html")
except:
self.render("./pages/index.html")
class ApplicationHandler(UkJPRequestHandler):
def get(self):
try:
cookie = self.get_secure_cookie("UKJP_FB_COOKIE")
if cookie == None:
self.redirect("/")
else:
self.render("./pages/app.html")
except:
self.redirect("/")
class App(tornado.web.Application):
def set_default_headers(self):
self.h
def __init__(self):
handlers = [
tornado.web.url(r"/",IndexHandler),
tornado.web.url(r"/app",ApplicationHandler),
tornado.web.url(r"/api",ApiHandler),
]
settings = {
"static_path" : os.path.join(os.path.dirname(__file__),"static"),
"cookie_secret" : "Ve41Cp4#1sf^fjqpDasg%$sdff",
"debug" : True,
"autoreload" : True,
"compress_response" : True
}
tornado.web.Application.__init__(self, handlers, **settings)
facebook_updater = thread.start_new_thread(ukjp.fbactions.updater_process,())
server = App()
server.listen(8989)
tornado.ioloop.IOLoop.current().start()
|
[
"samaldis@gmail.com"
] |
samaldis@gmail.com
|
e865f68f746423d35e830ef8d6adef3f3cf61878
|
256dbe8e710f7750dc3185f1b4f67a2d36a020fa
|
/bsp/stm32f411-nucleo/drivers/SConscript
|
31cff2824baa868bb5d2c1cbb480a34ab660c539
|
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
sundm75/rt-thread
|
8a0a7cd3d107c090d226cfefb0c62aba273cec15
|
82b62e3b08e28968125e6825ea29256215069a5c
|
refs/heads/master
| 2021-05-12T08:41:45.631706
| 2019-01-04T02:44:09
| 2019-01-04T02:44:09
| 117,292,652
| 1
| 1
|
Apache-2.0
| 2019-01-04T02:44:10
| 2018-01-12T22:10:04
|
C
|
UTF-8
|
Python
| false
| false
| 259
|
Import('RTT_ROOT')
Import('rtconfig')
from building import *
cwd = os.path.join(str(Dir('#')), 'drivers')
# add the general drivers.
src = Glob('*.c')
CPPPATH = [cwd]
group = DefineGroup('Drivers', src, depend = [''], CPPPATH = CPPPATH)
Return('group')
|
[
"pengfei.fu@gmail.com"
] |
pengfei.fu@gmail.com
|
|
7219e6780a35336396bd45cdb0c93cc918ba27e5
|
ea371e791764593d9dfffd0eafcbce0cb3b76135
|
/scripts/build-rosinstall.py
|
68ed3d4b578a796b2a3a0019db51c086514a14ab
|
[] |
no_license
|
ipa-hsd/robust
|
a1862df5fba84346e895c8cc00ef01ace124752c
|
c6ed28a29b4da4957b8aa9b21e458511717511ed
|
refs/heads/master
| 2021-11-04T02:58:19.595036
| 2019-02-27T08:34:56
| 2019-02-27T08:34:56
| 130,993,317
| 0
| 0
| null | 2018-04-25T11:02:16
| 2018-04-25T11:02:16
| null |
UTF-8
|
Python
| false
| false
| 4,550
|
py
|
import sys
import os
import argparse
import logging
import subprocess
import shutil
import docker
import yaml
import requests
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
DIR_HERE = os.path.dirname(__file__)
DOCKER_IMAGE_TIME_MACHINE = 'robust-rosin/rosinstall_generator_time_machine:03'
BIN_TIME_MACHINE = 'rosinstall_generator_tm.sh'
DESCRIPTION = "build-rosinstall"
def find_bug_descriptions(d):
buff = []
for root, _, files in os.walk(d):
for fn in files:
if fn.endswith('.bug'):
fn = os.path.join(root, fn)
buff.append(fn)
return buff
def gh_issue_to_datetime(url_issue):
# type: (str) -> str
prefix = "https://github.com/"
owner, repo, _, number = url_issue[len(prefix):].split('/')
url_api = 'https://api.github.com/repos/{}/{}/issues/{}'
url_api = url_api.format(owner, repo, number)
r = requests.get(url_api)
created_at = r.json()['created_at']
return created_at
def build_file(fn_bug_desc, overwrite=False):
logger.info("building rosinstall file for file: %s", fn_bug_desc)
bug_id = os.path.basename(fn_bug_desc)[:-4]
dir_bug = os.path.dirname(fn_bug_desc)
fn_rosinstall = os.path.join(dir_bug, 'deps.rosinstall')
# check for existence of Docker image for time machine
client_docker = docker.from_env()
try:
client_docker.images.get(DOCKER_IMAGE_TIME_MACHINE)
except docker.errors.ImageNotFound:
logger.warning("Docker image for time machine not found: %s",
DOCKER_IMAGE_TIME_MACHINE)
sys.exit(1)
if not shutil.which(BIN_TIME_MACHINE):
logger.warning("could not find time machine binary in PATH: %s",
BIN_TIME_MACHINE)
sys.exit(1)
if os.path.isfile(fn_rosinstall):
if overwrite:
logger.warning("overwriting file: %s", fn_rosinstall)
else:
logger.info("skipping existing file: %s", fn_rosinstall)
return
with open(fn_bug_desc, 'r') as f:
d = yaml.load(f)
ros_pkgs = d['time-machine']['ros_pkgs']
missing_deps = d['time-machine'].get('missing-dependencies', [])
if 'issue' in d['time-machine']:
url_issue = d['time-machine']['issue']
try:
dt = gh_issue_to_datetime(url_issue)
except Exception:
m = "failed to convert GitHub issue to ISO 8601 timestamp: {}"
m = m.format(url_issue)
raise Exception(m)
elif 'datetime' in d['time-machine']:
dt = d['time-machine']['datetime'].isoformat()
if dt[-1] != 'Z':
dt += 'Z'
else:
raise Exception("expected 'issue' or 'datetime' in 'time-machine'")
cmd = [BIN_TIME_MACHINE, dt, d['time-machine']['ros_distro']]
cmd += ros_pkgs + missing_deps
cmd += ['--deps', '--deps-only', '--tar']
logger.debug("executing command: %s", ' '.join(cmd))
try:
res = subprocess.run(cmd,
check=True,
stdout=subprocess.PIPE)
contents = res.stdout.decode('utf-8')
except subprocess.CalledProcessError as err:
logger.warning("time machine failed (return code: %d) for bug [%s]",
err.returncode, fn_bug_desc)
return
# updated repository names
contents = contents.replace('geometry_experimental', 'geometry2')
# write to rosinstall file
with open(fn_rosinstall, 'w') as f:
f.write(contents)
def build_dir(d, overwrite=False):
logger.info("building rosinstall files for directory: %s", d)
files = find_bug_descriptions(d)
for fn in files:
try:
build_file(fn, overwrite=overwrite)
except Exception:
logger.exception("failed to create rosinstall file for bug: %s", fn)
def main():
log_to_stdout = logging.StreamHandler()
log_to_stdout.setLevel(logging.DEBUG)
logger.addHandler(log_to_stdout)
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument('file_or_dir', type=str)
parser.add_argument('--overwrite', action='store_true')
args = parser.parse_args()
fn_or_dir = args.file_or_dir
if os.path.isdir(fn_or_dir):
build_dir(fn_or_dir, overwrite=args.overwrite)
elif os.path.isfile(fn_or_dir):
build_file(fn_or_dir, overwrite=args.overwrite)
else:
logger.error("ERROR: expected a filename or directory.")
sys.exit(1)
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
ipa-hsd.noreply@github.com
|
6a7f856c53af86a496f98a4a42d08881a273fcd3
|
41777201b100f24cea0427c8ed9cd5fb2e976b49
|
/Section 1/tornado-test.py
|
e0e31988a76214fe9ba548ce40b0310e57bc1060
|
[
"MIT"
] |
permissive
|
PacktPublishing/Reactive-Programming-in-Python
|
a611adfd231c4448f3c4c4873949a67c28753700
|
bfdee56ae38b092f010ac42af31796bc0416747d
|
refs/heads/master
| 2022-11-05T05:40:35.623146
| 2022-10-28T13:50:39
| 2022-10-28T13:50:39
| 128,192,589
| 40
| 11
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 356
|
py
|
import tornado.ioloop
import tornado.web
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write('Hello, world\n Awesome')
def make_app():
return tornado.web.Application([
(r'/', MainHandler)
])
if __name__ == '__main__':
app = make_app()
app.listen(8888)
tornado.ioloop.IOLoop.current().start()
|
[
"susant@packtpub.com"
] |
susant@packtpub.com
|
78f3d3a4eca1d9fa8d0ef24b5ae0b2d9ac9d4177
|
d0bf0f5d3d1d44c8a26a25fdac14af35084d3307
|
/rl_rpsr/policy.py
|
a8fca1364eccd1a9219760ddab284fdd352985dc
|
[
"MIT"
] |
permissive
|
abaisero/rl-rpsr
|
73f842f01b4246d11b2e961b31f99709762a87ed
|
ac3d1162f6629133b7f3f88a0d49e4f9fd6f2e69
|
refs/heads/main
| 2023-05-13T01:00:24.674446
| 2021-06-07T19:18:16
| 2021-06-07T19:18:16
| 373,312,207
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,137
|
py
|
from __future__ import annotations
import abc
class Policy(metaclass=abc.ABCMeta):
@abc.abstractmethod
def reset(self) -> int:
raise NotImplementedError
@abc.abstractmethod
def step(self, action: int, observation: int) -> int:
raise NotImplementedError
class RandomPolicy(Policy):
def __init__(self, model):
super().__init__()
self.model = model
def _action(self) -> int:
return self.model.action_space.sample()
def reset(self) -> int:
return self._action()
def step(self, action: int, observation: int) -> int:
return self._action()
class ModelPolicy(Policy):
def __init__(self, model, vf):
super().__init__()
self.model = model
self.vf = vf
self.state = None
def _action(self) -> int:
return self.vf.policy(self.state)
def reset(self) -> int:
self.state = self.model.start.copy()
return self._action()
def step(self, action: int, observation: int) -> int:
self.state = self.model.dynamics(self.state, action, observation)
return self._action()
|
[
"andrea.baisero@gmail.com"
] |
andrea.baisero@gmail.com
|
501e858309d7ff8772dc34f3500c3dc28a84a09a
|
af3ec207381de315f4cb6dddba727d16d42d6c57
|
/dialogue-engine/test/programytest/parser/template/graph_tests/test_log.py
|
986504fd189497acc1a83296c9272ee75bacd2ad
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mcf-yuichi/cotoba-agent-oss
|
02a5554fe81ce21517f33229101013b6487f5404
|
ce60833915f484c4cbdc54b4b8222d64be4b6c0d
|
refs/heads/master
| 2023-01-12T20:07:34.364188
| 2020-11-11T00:55:16
| 2020-11-11T00:55:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,931
|
py
|
"""
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import xml.etree.ElementTree as ET
from programy.parser.template.nodes.base import TemplateNode
from programy.parser.template.nodes.log import TemplateLogNode
from programytest.parser.template.graph_tests.graph_test_client import TemplateGraphTestClient
class TemplateGraphLogTests(TemplateGraphTestClient):
def test_log_node_from_xml_default_values(self):
template = ET.fromstring("""
<template>
<log>Text</log>
</template>
""")
root = self._graph.parse_template_expression(template)
self.assertIsNotNone(root)
self.assertIsInstance(root, TemplateNode)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 1)
node = root.children[0]
self.assertIsNotNone(node)
self.assertIsInstance(node, TemplateLogNode)
|
[
"cliff@cotobadesign.com"
] |
cliff@cotobadesign.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.