blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ceb3aeda4643e1231491fcb39994da2829556c49
|
feec3e5576b2d54f59480ae99d30c7c814b1b66d
|
/mysite/polls/views.py
|
e72c41b6846b941879fb7fa44cfa49c0a1184935
|
[] |
no_license
|
Macielyoung/Seq2Seq-Chatbot
|
be32c18925d902b25b880906f76dace42a5a6449
|
dae00f1001bf5033335890f16581ddddf96209b1
|
refs/heads/master
| 2020-04-16T01:11:49.994953
| 2019-01-22T08:12:20
| 2019-01-22T08:12:20
| 165,164,044
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 517
|
py
|
from django.shortcuts import render
from django.http import HttpResponse
from infer import SeqModel
import json
seq2seq = SeqModel()
metadata = seq2seq.load_data()
seq2seq.load_model(metadata)
# Create your views here.
def index(request):
query = request.GET.get('query', '')
qa_pair = {}
qa_pair['query'] = query
answer = seq2seq.infer(query)
qa_pair['answer'] = answer
qa_json = json.dumps(qa_pair)
response = HttpResponse(qa_json)
response.__setitem__("Access-Control-Allow-Origin", "*")
return response
|
[
"maciel@yangwenyandeMacBook-Pro.local"
] |
maciel@yangwenyandeMacBook-Pro.local
|
5b08a8497e67c4ad8f4ea6744c7a8bec7de04b04
|
d7016f69993570a1c55974582cda899ff70907ec
|
/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2022_08_01/aio/operations/_ssh_public_keys_operations.py
|
3272fd669e75a7dc41ea48ede8265410af966dc1
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
kurtzeborn/azure-sdk-for-python
|
51ca636ad26ca51bc0c9e6865332781787e6f882
|
b23e71b289c71f179b9cf9b8c75b1922833a542a
|
refs/heads/main
| 2023-03-21T14:19:50.299852
| 2023-02-15T13:30:47
| 2023-02-15T13:30:47
| 157,927,277
| 0
| 0
|
MIT
| 2022-07-19T08:05:23
| 2018-11-16T22:15:30
|
Python
|
UTF-8
|
Python
| false
| false
| 30,872
|
py
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._ssh_public_keys_operations import (
build_create_request,
build_delete_request,
build_generate_key_pair_request,
build_get_request,
build_list_by_resource_group_request,
build_list_by_subscription_request,
build_update_request,
)
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SshPublicKeysOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.compute.v2022_08_01.aio.ComputeManagementClient`'s
:attr:`ssh_public_keys` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_subscription(self, **kwargs: Any) -> AsyncIterable["_models.SshPublicKeyResource"]:
"""Lists all of the SSH public keys in the subscription. Use the nextLink property in the response
to get the next page of SSH public keys.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SshPublicKeyResource or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2022_08_01.models.SshPublicKeyResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-08-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-08-01"))
cls: ClsType[_models.SshPublicKeysGroupListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_subscription.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("SshPublicKeysGroupListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_by_subscription.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/sshPublicKeys"}
@distributed_trace
def list_by_resource_group(
self, resource_group_name: str, **kwargs: Any
) -> AsyncIterable["_models.SshPublicKeyResource"]:
"""Lists all of the SSH public keys in the specified resource group. Use the nextLink property in
the response to get the next page of SSH public keys.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SshPublicKeyResource or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2022_08_01.models.SshPublicKeyResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-08-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-08-01"))
cls: ClsType[_models.SshPublicKeysGroupListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_resource_group.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("SshPublicKeysGroupListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_by_resource_group.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys"
}
@overload
async def create(
self,
resource_group_name: str,
ssh_public_key_name: str,
parameters: _models.SshPublicKeyResource,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.SshPublicKeyResource:
"""Creates a new SSH public key resource.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param ssh_public_key_name: The name of the SSH public key. Required.
:type ssh_public_key_name: str
:param parameters: Parameters supplied to create the SSH public key. Required.
:type parameters: ~azure.mgmt.compute.v2022_08_01.models.SshPublicKeyResource
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SshPublicKeyResource or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2022_08_01.models.SshPublicKeyResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def create(
self,
resource_group_name: str,
ssh_public_key_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.SshPublicKeyResource:
"""Creates a new SSH public key resource.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param ssh_public_key_name: The name of the SSH public key. Required.
:type ssh_public_key_name: str
:param parameters: Parameters supplied to create the SSH public key. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SshPublicKeyResource or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2022_08_01.models.SshPublicKeyResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def create(
self,
resource_group_name: str,
ssh_public_key_name: str,
parameters: Union[_models.SshPublicKeyResource, IO],
**kwargs: Any
) -> _models.SshPublicKeyResource:
"""Creates a new SSH public key resource.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param ssh_public_key_name: The name of the SSH public key. Required.
:type ssh_public_key_name: str
:param parameters: Parameters supplied to create the SSH public key. Is either a model type or
a IO type. Required.
:type parameters: ~azure.mgmt.compute.v2022_08_01.models.SshPublicKeyResource or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SshPublicKeyResource or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2022_08_01.models.SshPublicKeyResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-08-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-08-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.SshPublicKeyResource] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "SshPublicKeyResource")
request = build_create_request(
resource_group_name=resource_group_name,
ssh_public_key_name=ssh_public_key_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.create.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("SshPublicKeyResource", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("SshPublicKeyResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
create.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys/{sshPublicKeyName}"
}
@overload
async def update(
self,
resource_group_name: str,
ssh_public_key_name: str,
parameters: _models.SshPublicKeyUpdateResource,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.SshPublicKeyResource:
"""Updates a new SSH public key resource.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param ssh_public_key_name: The name of the SSH public key. Required.
:type ssh_public_key_name: str
:param parameters: Parameters supplied to update the SSH public key. Required.
:type parameters: ~azure.mgmt.compute.v2022_08_01.models.SshPublicKeyUpdateResource
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SshPublicKeyResource or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2022_08_01.models.SshPublicKeyResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def update(
self,
resource_group_name: str,
ssh_public_key_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.SshPublicKeyResource:
"""Updates a new SSH public key resource.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param ssh_public_key_name: The name of the SSH public key. Required.
:type ssh_public_key_name: str
:param parameters: Parameters supplied to update the SSH public key. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SshPublicKeyResource or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2022_08_01.models.SshPublicKeyResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def update(
self,
resource_group_name: str,
ssh_public_key_name: str,
parameters: Union[_models.SshPublicKeyUpdateResource, IO],
**kwargs: Any
) -> _models.SshPublicKeyResource:
"""Updates a new SSH public key resource.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param ssh_public_key_name: The name of the SSH public key. Required.
:type ssh_public_key_name: str
:param parameters: Parameters supplied to update the SSH public key. Is either a model type or
a IO type. Required.
:type parameters: ~azure.mgmt.compute.v2022_08_01.models.SshPublicKeyUpdateResource or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SshPublicKeyResource or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2022_08_01.models.SshPublicKeyResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-08-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-08-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.SshPublicKeyResource] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "SshPublicKeyUpdateResource")
request = build_update_request(
resource_group_name=resource_group_name,
ssh_public_key_name=ssh_public_key_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("SshPublicKeyResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys/{sshPublicKeyName}"
}
@distributed_trace_async
async def delete( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, ssh_public_key_name: str, **kwargs: Any
) -> None:
"""Delete an SSH public key.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param ssh_public_key_name: The name of the SSH public key. Required.
:type ssh_public_key_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-08-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-08-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_delete_request(
resource_group_name=resource_group_name,
ssh_public_key_name=ssh_public_key_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys/{sshPublicKeyName}"
}
@distributed_trace_async
async def get(
self, resource_group_name: str, ssh_public_key_name: str, **kwargs: Any
) -> _models.SshPublicKeyResource:
"""Retrieves information about an SSH public key.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param ssh_public_key_name: The name of the SSH public key. Required.
:type ssh_public_key_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SshPublicKeyResource or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2022_08_01.models.SshPublicKeyResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-08-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-08-01"))
cls: ClsType[_models.SshPublicKeyResource] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
ssh_public_key_name=ssh_public_key_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("SshPublicKeyResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys/{sshPublicKeyName}"
}
@distributed_trace_async
async def generate_key_pair(
self, resource_group_name: str, ssh_public_key_name: str, **kwargs: Any
) -> _models.SshPublicKeyGenerateKeyPairResult:
"""Generates and returns a public/private key pair and populates the SSH public key resource with
the public key. The length of the key will be 3072 bits. This operation can only be performed
once per SSH public key resource.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param ssh_public_key_name: The name of the SSH public key. Required.
:type ssh_public_key_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SshPublicKeyGenerateKeyPairResult or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2022_08_01.models.SshPublicKeyGenerateKeyPairResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-08-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-08-01"))
cls: ClsType[_models.SshPublicKeyGenerateKeyPairResult] = kwargs.pop("cls", None)
request = build_generate_key_pair_request(
resource_group_name=resource_group_name,
ssh_public_key_name=ssh_public_key_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.generate_key_pair.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("SshPublicKeyGenerateKeyPairResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
generate_key_pair.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys/{sshPublicKeyName}/generateKeyPair"
}
|
[
"noreply@github.com"
] |
kurtzeborn.noreply@github.com
|
7c25ff18b341cd872a8a25f0dcfbf1023a780010
|
48ca6f9f041a1e9f563500c8a7fa04dbb18fa949
|
/pygears/typing/qround.py
|
ea79fe0998313278f899a4b014df440c38f3cbb8
|
[
"MIT"
] |
permissive
|
bogdanvuk/pygears
|
71404e53d4689ec9cdd9db546bfc0f229a7e02da
|
705b11ab6de79868b25753fa9d0ce7128791b346
|
refs/heads/master
| 2023-07-08T11:38:54.625172
| 2022-03-07T12:29:00
| 2022-03-07T12:29:00
| 124,890,922
| 146
| 16
|
MIT
| 2022-08-15T07:57:08
| 2018-03-12T13:10:06
|
Python
|
UTF-8
|
Python
| false
| false
| 1,286
|
py
|
from .cast import value_cast, type_cast
from .fixp import Fixp, Ufixp
from .uint import Uint, Bool, Int, code
def get_out_type(val_type, fract):
if get_cut_bits(val_type, fract) <= 0:
raise TypeError(
f'Cannot qround type "{val_type}" with "{val_type.fract}" '
f'fractional bits, to produce the type with more fractional '
f'bits "fract={fract}"'
)
if fract != 0:
return val_type.base[val_type.integer + 1, val_type.integer + fract + 1]
else:
return (Int if val_type.signed else Uint)[val_type.integer + 1]
def get_cut_bits(val_type, fract):
return val_type.fract - fract
def qround(val, fract=0):
cut_bits = get_cut_bits(type(val), fract)
out_type = get_out_type(type(val), fract)
val_coded = code(val, Int) if type(val).signed else code(val)
res = val_coded + (Bool(1) << (cut_bits - 1))
return out_type.decode(res[cut_bits:])
def qround_even(val, fract=0):
cut_bits = get_cut_bits(type(val), fract)
out_type = get_out_type(type(val), fract)
val_coded = code(val, Int) if type(val).signed else code(val)
round_bit = val_coded[cut_bits]
res = val_coded + Uint([round_bit] + [~round_bit] * (cut_bits - 1))
return out_type.decode(res[cut_bits:])
|
[
"bogdan.vukobratovic@gmail.com"
] |
bogdan.vukobratovic@gmail.com
|
1e8c67d8c6ce32961276b4ea876788f030175bf7
|
d9b2805a8b39f147bd77e35c8e96e0cbd5eaa726
|
/flask공부/flaskTest/bin/pip
|
7eb65fc06f5c5c461cfe88d74e5a3c61d6549aab
|
[] |
no_license
|
LeeInHaeng/Study
|
ca8e3e2d4111dc3f742eefea541a67739d729e75
|
96bdb1d224702cebb8a6de6bbd596b075ee33f7b
|
refs/heads/master
| 2020-03-28T11:03:03.848316
| 2019-04-20T08:33:26
| 2019-04-20T08:33:26
| 148,172,460
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 241
|
#!/home/lih/pythonTest/flaskTest/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"lih0420@naver.com"
] |
lih0420@naver.com
|
|
b5ffffc779bb9a663018d7d61a89415f02b6c32a
|
3f9163cc45befbc20b19a19bf1fd875b483c2965
|
/python/paddle/distributed/passes/auto_parallel_gradient_merge.py
|
bc40dad8ac0d9a69f37ef6d6704bd644b87522cc
|
[
"Apache-2.0"
] |
permissive
|
forschumi/Paddle
|
27926b1ddb76be08dc3f768df787fc9a4078f8e4
|
58d2949d6d2a1689e17527fb501d69c3501adf9f
|
refs/heads/develop
| 2022-07-04T03:07:52.446858
| 2022-06-05T11:44:04
| 2022-06-05T11:44:04
| 83,878,029
| 0
| 0
| null | 2017-03-04T08:11:50
| 2017-03-04T08:11:49
| null |
UTF-8
|
Python
| false
| false
| 14,250
|
py
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from collections import OrderedDict
from typing import List, Tuple, Dict, Any
import paddle
from paddle.framework import core
from paddle.fluid.framework import program_guard, device_guard
from paddle.fluid import unique_name, layers
from paddle.fluid.clip import append_gradient_clip_ops
from .pass_base import PassBase, PassType, register_pass
from paddle.distributed.auto_parallel.utils import set_var_dist_attr
from paddle.distributed.auto_parallel.utils import naive_set_dist_op_attr_for_program_by_mesh_and_mapping
from paddle.distributed.auto_parallel.process_group import get_world_process_group
world_process_group = get_world_process_group()
def _is_the_backward_op(op):
OP_ROLE_KEY = core.op_proto_and_checker_maker.kOpRoleAttrName()
OpRole = core.op_proto_and_checker_maker.OpRole
return OP_ROLE_KEY in op.attr_names and \
int(op.all_attrs()[OP_ROLE_KEY]) & int(OpRole.Backward)
def _is_the_optimizer_op(op):
OP_ROLE_KEY = core.op_proto_and_checker_maker.kOpRoleAttrName()
OpRole = core.op_proto_and_checker_maker.OpRole
return OP_ROLE_KEY in op.attr_names and \
int(op.all_attrs()[OP_ROLE_KEY]) & int(OpRole.Optimize)
def _remove_and_get_optimizer_op(main_program, dist_context):
# 1 create tmp block
# 2 mv optimizer op from global program to tmp block
# 3 del the op from dist_context
from paddle.distributed.fleet.meta_optimizers.common import OpRole
main_block = main_program.global_block()
temp_block = main_program._create_block()
removed_op_idx = []
optimize_ops_desc = []
for idx, op in enumerate(main_block.ops):
if _is_the_optimizer_op(op):
# append optimizer op to tmp block
new_op_desc = temp_block.desc.append_op()
new_op_desc.copy_from(op.desc)
optimize_ops_desc.append(new_op_desc)
removed_op_idx.append(idx)
# del op from dist_context
if dist_context:
dist_context.del_dist_op_for_program(op)
for idx in removed_op_idx[::-1]:
main_block._remove_op(idx)
return optimize_ops_desc
def _remove_op_role_var(param, grad):
op_maker = core.op_proto_and_checker_maker
op = grad.op
if op.has_attr(op_maker.kOpRoleVarAttrName()):
op._remove_attr(op_maker.kOpRoleVarAttrName())
def _get_gm_cond_var(main_program, k_steps, dist_context):
main_block = main_program.global_block()
# Add const var
k_step_var = layers.create_global_var(name="gradient_merge_k",
shape=[1],
value=int(k_steps),
dtype='int32',
persistable=True,
force_cpu=True)
set_var_dist_attr(dist_context, k_step_var, [-1], world_process_group.ranks)
zero_var = layers.create_global_var(name="gradient_merge_zero",
shape=[1],
value=int(0),
dtype='int32',
persistable=True,
force_cpu=True)
set_var_dist_attr(dist_context, zero_var, [-1], world_process_group.ranks)
# Add step var & cond var
step_var = layers.create_global_var(name="gradient_merge_step",
shape=[1],
value=int(0),
dtype='int32',
persistable=True,
force_cpu=True)
set_var_dist_attr(dist_context, step_var, [-1], world_process_group.ranks)
cond_var = main_block.create_var(name="gradient_merge_cond",
shape=[1],
dtype='bool')
set_var_dist_attr(dist_context, cond_var, [-1], world_process_group.ranks)
with device_guard("cpu"):
# step_var = (step_var + 1) % k_step
layers.increment(x=step_var, value=1.0, in_place=True)
elementwise_mod_op = main_block.append_op(type='elementwise_mod',
inputs={
'X': step_var,
'Y': k_step_var
},
outputs={'Out': step_var},
attrs={
'axis': -1,
'use_mkldnn': False
})
naive_set_dist_op_attr_for_program_by_mesh_and_mapping(
elementwise_mod_op, world_process_group.ranks, [-1], dist_context)
# cond_var = (step_var == 0)
equal_op = main_block.append_op(type='equal',
inputs={
'X': step_var,
'Y': zero_var
},
outputs={'Out': cond_var})
naive_set_dist_op_attr_for_program_by_mesh_and_mapping(
equal_op, world_process_group.ranks, [-1], dist_context)
return cond_var
def _append_gradient_merge_backward_op(
main_program, startup_program, params_grads: List[Tuple[Any, Any]],
cond_var_name: str,
dist_context) -> Tuple[List[Tuple[Any, Any]], Dict[str, Any]]:
main_block = main_program.global_block()
startup_block = startup_program.global_block()
# step1: remove grad.op's op_role_var
for param, grad in params_grads:
assert (
param.type != core.VarDesc.VarType.SELECTED_ROWS
), "SELECTED_ROWS is not supported in GradientMergeOptimizer for now"
_remove_op_role_var(param, grad)
param_to_gradient_merge = {}
new_params_to_grads = []
# step2: create gradient_merge var and init with 0
for param, grad in params_grads:
param_name = param.name
param_var = main_block.var(param_name)
assert (param_var is not None)
ref_dist_attr = dist_context.get_tensor_dist_attr_for_program(param_var)
assert ref_dist_attr is not None
gradient_merge_var = main_block.create_var(name=param_name +
"@GRAD@GradientMerge",
shape=param_var.shape,
dtype=param_var.dtype,
persistable=True)
param_to_gradient_merge[param_name] = gradient_merge_var
ref_process_mesh = ref_dist_attr.process_mesh
ref_dims_mapping = ref_dist_attr.dims_mapping
set_var_dist_attr(dist_context, gradient_merge_var, ref_dims_mapping,
ref_process_mesh)
startup_gradient_merge_var = startup_block.create_var(
name=param_name + "@GRAD@GradientMerge",
shape=param_var.shape,
dtype=param_var.dtype,
persistable=True)
startup_block.append_op(type="fill_constant",
outputs={"Out": startup_gradient_merge_var},
attrs={
"shape": param_var.shape,
"dtype": param_var.dtype,
"value": float(0),
})
# grad_merge += grad
new_grad_op = main_block.append_op(type="elementwise_add",
inputs={
'X': grad,
'Y': gradient_merge_var
},
outputs={'Out': gradient_merge_var},
attrs={
'axis': -1,
'use_mkldnn': False
})
new_params_to_grads.append([param, gradient_merge_var])
naive_set_dist_op_attr_for_program_by_mesh_and_mapping(
new_grad_op, ref_process_mesh, ref_dims_mapping, dist_context)
return new_params_to_grads, param_to_gradient_merge
def _create_cond_block_and_update_optimizer(
main_program, cond_var, new_params_to_grads: List[Tuple[Any, Any]],
param_to_gradient_merge: Dict[str, Any], optimize_ops_desc: List[Any],
k_steps, avg):
def true_apply_gradient():
cur_block_idx = main_program.current_block_idx
cur_block = main_program.current_block()
# cur_block's forward_block & backward_block is itself
cur_block._set_forward_block_idx(cur_block_idx)
op_maker = core.op_proto_and_checker_maker
if avg:
for param, new_grad in new_params_to_grads:
# grad /= k_steps
cur_block.append_op(type='scale',
inputs={'X': new_grad},
outputs={'Out': new_grad},
attrs={
'scale': 1.0 / k_steps,
'bias': 0.0,
'bias_after_scale': False
})
new_grad.op._set_attr(op_maker.kOpRoleAttrName(),
op_maker.OpRole.Optimize)
# append optimizer ops
for op_desc in optimize_ops_desc:
new_op_desc = cur_block.desc.append_op()
new_op_desc.copy_from(op_desc)
#update input/output
for input_name in new_op_desc.input_arg_names():
if input_name in new_params_to_grads:
new_op_desc._rename_input(input_name,
new_params_to_grads[input_name])
for output_name in new_op_desc.output_arg_names():
if output_name in new_params_to_grads:
new_op_desc._rename_output(output_name,
new_params_to_grads[output_name])
# remove op_role_var
if new_op_desc.has_attr(op_maker.kOpRoleVarAttrName()):
new_op_desc.remove_attr(op_maker.kOpRoleVarAttrName())
# op's update Grad
if core.grad_var_suffix() in new_op_desc.input_arg_names():
grad_value = new_op_desc.input("Grad")[0]
# TODO FIXME(xym) support fp16
grad_merge_value = grad_value + '@GradientMerge'
new_op_desc.set_input("Grad", [grad_merge_value])
main_program.global_block()._sync_with_cpp()
cur_block._sync_with_cpp()
# clear gradient_merge_vars
for param, new_grad in new_params_to_grads:
layers.fill_constant(shape=new_grad.shape,
dtype=new_grad.dtype,
value=0.0,
out=new_grad)
new_grad.op._set_attr(op_maker.kOpRoleAttrName(),
op_maker.OpRole.Optimize)
layers.cond(cond_var, true_fn=true_apply_gradient, false_fn=None)
def parse_program(main_program, startup_program, params_grads, k_steps, avg,
dist_context):
# 1 create gradient_merge_cond
cond_var = _get_gm_cond_var(main_program, k_steps, dist_context)
# 2 remove optimizer_op from main_program
optimize_ops_desc = _remove_and_get_optimizer_op(main_program, dist_context)
# back to block 0
main_program._rollback()
# 3 append gradient merge backward op to main_program
new_params_to_grads, param_to_gradient_merge = _append_gradient_merge_backward_op(
main_program, startup_program, params_grads, cond_var.name,
dist_context)
# 4 create ConditionalBlock and append gradient merge optimizer ops
_create_cond_block_and_update_optimizer(main_program, cond_var,
new_params_to_grads,
param_to_gradient_merge,
optimize_ops_desc, k_steps, avg)
@register_pass("auto_parallel_gradient_merge_pass")
class GradientMergePass(PassBase):
def __init__(self):
super(GradientMergePass, self).__init__()
self.set_attr("k_steps", -1)
self.set_attr("avg", True)
self.set_attr("inner_optimizer", None)
def _check_self(self):
if self.get_attr("k_steps") < 1:
return False
return True
def _check_conflict(self, other_pass):
return True
def _type(self):
return PassType.COMM_OPT
def _apply_single_impl(self, main_program, startup_program, context):
k_steps = self.get_attr("k_steps", -1)
avg = self.get_attr("avg", False)
dist_context = self.get_attr("dist_context")
params_grads = self.get_attr("params_grads")
with paddle.static.program_guard(main_program, startup_program):
parse_program(main_program, startup_program, params_grads, k_steps,
avg, dist_context)
main_program._sync_with_cpp()
|
[
"noreply@github.com"
] |
forschumi.noreply@github.com
|
db2053ff6088956d37ca5a3a875a95d76e48daa4
|
e08f10a7469868f44a3cc5278033d4fd19b36598
|
/collection-old.py
|
f9b812733fa7253152c1f837adeda1807b1fea90
|
[
"MIT"
] |
permissive
|
neal-rogers/baseball-card-inventory
|
4498cadd444755afcf8b3d7c1ee2825264de40ce
|
9940ba746072892961b7ade586e63f7deb26d2e6
|
refs/heads/master
| 2020-12-25T23:18:48.513129
| 2017-02-19T00:45:32
| 2017-02-19T00:45:32
| 34,287,900
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,874
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This is a simple baseball card inventory system."""
import graphx, csv, json, os
def check_file(self):
"""Args:
None.
Returns:
something() = stuff.
Examples:
>>>
"""
if os.path.isfile('data\cards.json'):
print 'Loading database.'
jsoncards = open('data\cards.json', 'a')
else:
print 'No file found, importing from CSV...'
csvcards = open('data\cards.csv', 'r')
fields = (
"UID",
"PLAYER",
"POSITION",
"BRAND",
"CID",
"YEAR",
"CONDITION",
"SOLD",
"VALUE"
)
csv_rdr = csv.DictReader(csvcards, fields)
jsoncards = open('data\cards.json', 'a')
with jsoncards:
for row in csv_rdr:
json.dump(row, jsoncards)
jsoncards.write('\n')
csvcards.close()
print 'Import complete.'
#csvfile = open('cards.csv', 'r')
#jsonfile = open('cards.json', 'w')
ask_1 = raw_input('What would you like to do?, Update, Search, or Report?: ').title()
if ask_1 == 'Update':
reply_1 = 'Add'
elif ask_1 == 'Search' or 'Report':
reply_1 = 'Search By'
ask_2 = raw_input('{A}? '.format(A=reply_1)).title()
if ask_2 == 'Yes' or 'Add':
raw_input('Enter player name, position, team: ')
elif ask_2 == 'Search' or 'Report':
reply_2 = 'Search By'
uid += len()
# new_card = {'uid': , 'player': , 'position': , 'cid': , 'year': , 'condition':}
sreply = 'Add'
elif fask == 'Search' or 'Report':
sreply = 'Search By'
nask = raw_input('{A}? '.format(A=freply)).title()
"""
"""
if ask_3 == 'Report':
pass
# def c_sold ():
def main():
print graphx.BANNER
if __name__ == '__main__':
main()
|
[
"neal.rogers@spsmail.cuny.edu"
] |
neal.rogers@spsmail.cuny.edu
|
6e3b1401ef59aa4c7b7b60c95643e0850ea06e69
|
c9f710b342e0a2504f02ab8487d686ab9f2cff2b
|
/app/models/user.py
|
c93629ba2366c57f16871fb659b49cf4f3b6d62d
|
[] |
no_license
|
kalsmic/SendIT_API_endpoints
|
4c00394577a6fef1e8e88aa4943c35ae35343a55
|
91ba3e6258ae323f234e2f1aa1346bb4e061f31d
|
refs/heads/develop
| 2022-12-18T01:39:11.557734
| 2018-11-19T05:38:48
| 2018-11-19T05:38:48
| 156,332,638
| 0
| 0
| null | 2022-12-08T01:23:14
| 2018-11-06T05:50:50
| null |
UTF-8
|
Python
| false
| false
| 738
|
py
|
# users.py
""" Module contains Data structure for a User"""
class User:
"""class defines the user data structure"""
def __init__(self, id, user_name):
self.id = id
self.userName = user_name
def __str__(self):
return "User(id='%s',username='%s')" % (self.id, self.userName)
users = [
User(1, 'user1'),
User(2, 'user2'),
User(3, 'admin')
]
user_name_table = {user.userName: user for user in users}
user_id_table = {user.id: user for user in users}
def verify_user_id(user_id):
"""verify user id
Expects id of type int
Returns:
True if id exists
False if id does not exist"""
if user_id not in user_id_table.keys():
return False
return True
|
[
"kalulearthur@gmail.com"
] |
kalulearthur@gmail.com
|
805d18e7ddecffd215d64fb87ffd6e37dd42915a
|
4e3099dc20145814e35d81cc7c58996d72bf0b32
|
/venv/bin/django-admin.py
|
8c2150c19370424ca7cfabcd728dfc1875d8cb3f
|
[] |
no_license
|
eashwerks/evillage
|
65e0c2a01cb7898e2c1873eb28fad2adf7ab7f23
|
5482765498ead600145bcf80c343a80657386140
|
refs/heads/master
| 2022-12-03T17:03:25.084567
| 2020-06-05T13:04:43
| 2020-06-05T13:04:43
| 266,114,616
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 165
|
py
|
#!/home/eashwerks/PycharmProjects/evillage/venv/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
[
"eashwerks@gmail.com"
] |
eashwerks@gmail.com
|
2f1f274dd1ad0310608a42872e14fff7fbf05b1f
|
c65dfb808e23263b8f3f703a4f31ea7e153b4efd
|
/tockstats.py
|
62fa54d729eb6e180e8686f52ea5921fa2030dd9
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
18F/quick-stats
|
68fcd3bc9fde390f1a74a370f232dd8086865b07
|
400b48bcebea242ac574dd30f870ed1687c3b863
|
refs/heads/master
| 2021-01-17T06:08:00.304550
| 2016-07-27T16:09:22
| 2016-07-27T16:09:22
| 64,323,703
| 0
| 2
| null | 2016-08-08T15:25:00
| 2016-07-27T16:15:43
|
Python
|
UTF-8
|
Python
| false
| false
| 1,030
|
py
|
"""Hours statistics from Tock exports"""
from collections import Counter
from csv import DictReader
import sys
def file_to_counter(filename):
"""Read CSV, convert it to a counter of hours by project"""
counter = Counter()
with open(filename) as csvfile:
reader = DictReader(csvfile)
for row in reader:
counter[row['Project']] += float(row['Number of Hours'])
return counter
def merge_counters(counters):
totals = Counter()
for counter in counters:
for key, value in counter.items():
totals[key] += value
return totals
def print_totals(totals):
total = sum(totals.values())
for project, amount in totals.most_common(20):
print("{}: {}/{} = {}".format(project, amount, total, amount/total))
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python tockstats.py FILE.csv [FILE2.csv ...]")
else:
counters = [file_to_counter(f) for f in sys.argv[1:]]
print_totals(merge_counters(counters))
|
[
"christopher.lubinski@gsa.gov"
] |
christopher.lubinski@gsa.gov
|
560407d9be25f531ac76d887c37f045f78cda5f9
|
3636ccf637805558a841517fc85cb54c6ddd6985
|
/virtual/bin/wheel
|
3589919ddf4d44525cf25520ab3598c6ddc4fc20
|
[
"MIT"
] |
permissive
|
Chiuri254/pitch
|
db5497390241c15411c85c1eb454cef859c8d04d
|
fd49e9454a636f449fd4f109d4d55c5db5520325
|
refs/heads/master
| 2020-04-22T18:58:04.178490
| 2019-02-14T16:16:10
| 2019-02-14T16:16:10
| 169,743,427
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 250
|
#!/home/joseph/Videos/oneminite-master/virtual/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"joseshiory@gmail.com"
] |
joseshiory@gmail.com
|
|
89e8ab9227a0a50a720b819e974157d583e35762
|
b5012bfee90c3974176c76bd55c89652be441d9c
|
/hackers.py
|
59014634ed3ccebbec4acbb81d8f7ea453eaad14
|
[] |
no_license
|
Tuoppi1/School-work
|
5affeb751d46df6a584236f62f6e6fb24eb22f9a
|
d5ce3be273c644013ae0341a7776792f86e1ffed
|
refs/heads/master
| 2020-08-12T23:53:30.492774
| 2019-12-15T02:18:07
| 2019-12-15T02:18:07
| 214,868,954
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,097
|
py
|
import re
import time
import requests
import matplotlib.pyplot as plt
# This program uses a russian proxy IP address
# Because i got banned from www.iplocate.io
# Current proxy worked for me at least
# Use your default IP address by removing proxies parameter from requests.get(url,proxies).json() (line 30)
file = open("logins.txt")
hackList = []
ipAddressList = []
countryDict = {}
proxies = {"https":"195.239.86.102:45871"}
hackTimeDict = {"00":0,"01":0,"02":0,"03":0,"04":0,"05":0,"06":0,"07":0,"08":0,\
"09":0,"10":0,"11":0,"12":0,"13":0,"14":0,"15":0,"16":0,"17":0,\
"18":0,"19":0,"20":0,"21":0,"22":0,"23":0}
countryList = []
for line in file:
if "Tue Oct 2" in line:
ipAddress = re.search("\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}",line).group()
hackTime = re.search("\d+:\d+",line).group()
if ipAddress not in ipAddressList:
ipAddressList.append(ipAddress)
time.sleep(1)
hacker = requests.get("https://www.iplocate.io/api/lookup/{}".format(ipAddress)).json()
hackList.append(hacker)
try:
#print(hacker["country"])
if hacker["country"] in countryDict:
countryDict[hacker["country"]] += 1
else:
countryDict[hacker["country"]] = 1
hackTimeDict[hackTime[:-3]] += 1
except:
pass
#if len(hackList) > 15: # <-- uncomment this and break for testing purposes
#break
countryList = sorted(countryDict.items(),key=lambda x: x[1],reverse=True)
n = []
label = []
for i in countryList:
if len(n) == 10:
break
n.append(i[1])
label.append(i[0])
sortedHackTimeList = sorted(hackTimeDict.items(),key=lambda x: x[0])
n1 = []
label1 = []
for i in sortedHackTimeList:
n1.append(i[1])
label1.append(i[0])
plt.figure(figsize=(5,5))
plt.figure(1)
plt.pie(n,labels = label)
plt.savefig("hackpie.png")
plt.figure(2)
plt.figure(figsize=(10,6))
plt.bar(label1,n1)
plt.savefig("hackbar.png")
plt.show
|
[
"noreply@github.com"
] |
Tuoppi1.noreply@github.com
|
97fe4b22a0d5bd7822f3f5e943e4fad93fc6b66d
|
9de9e636cf845c681fdbf1c6c058cc69d5d05da5
|
/IO/Writer.py
|
5ed136de1753e4ebcc60d562cf59aef0e316b217
|
[] |
no_license
|
dxcv/Portfolio-Management-1
|
4278eebb5c91a3a02ea76398b681ef9dc5beeb1f
|
9f188aeab3177d0a13bae32e3a318a4f18642a3c
|
refs/heads/master
| 2020-12-05T21:48:37.690004
| 2019-01-03T01:34:41
| 2019-01-03T01:34:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,408
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 15 12:51:44 2018
Write to excel function
@author: ACM05
"""
import pandas as pd
import IO.IO_Tools_Func as IO_TF
class Writer():
def __init__( self,
f_name ):
""" Writer object for
defined format
"""
self.f_name = f_name
self.writer = pd.ExcelWriter( f_name,
engine='xlsxwriter',
options={'nan_inf_to_errors': True})
self.book = self.writer.book
""" Loading all format settings
"""
self.header_format = self.book.add_format(IO_TF.get_format())
self.ticker_format = self.book.add_format(IO_TF.get_ticker_format())
self.thousand_format = self.book.add_format(IO_TF.get_num_k_format())
self.bold_format = self.book.add_format(IO_TF.get_format_bold())
self.pct_format = self.book.add_format(IO_TF.get_num_pct_format())
self.BPS_format = self.book.add_format(IO_TF.get_num_BPS_format())
def add_sheet( self,
s_name ):
""" Add sheets into this workbook
Please pre define all worksheet names
"""
workbook = self.writer.book
worksheet = workbook.add_worksheet( s_name )
self.writer.sheets[s_name] = worksheet
def write_ticker( self,
s_name,
i_row,
i_col,
i_string ):
""" Write tickers with defined formatting
"""
worksheet = self.writer.sheets[s_name]
worksheet.write( i_row, i_col,
i_string, self.ticker_format )
def write_raw( self,
s_name,
i_row,
i_col,
i_string ):
""" Write string into given file with sheet name
raw data without design
"""
worksheet = self.writer.sheets[s_name]
worksheet.write( i_row, i_col,
i_string, self.bold_format )
def write_df( self,
i_row,
i_col,
df,
s_name ):
""" Write to excel given
file name and sheet name
"""
""" Step one load formatting
"""
worksheet = self.writer.sheets[s_name]
""" Step two write df into this work sheet
"""
df = df.reset_index()
df = IO_TF.Add_Sum_Row_df(df, "ALL")
df.to_excel( self.writer,
s_name,
startrow = i_row,
startcol = i_col,
index = False )
for col, value in enumerate(df.columns.values):
worksheet.write( i_row, col+i_col,
value, self.header_format )
for col, value in enumerate(df.iloc[-1]):
if value == value:
worksheet.write( i_row+df.shape[0], col+i_col,
value, self.bold_format )
else:
worksheet.write( i_row+df.shape[0], col+i_col,
"", self.bold_format )
def close( self ):
self.writer.save()
|
[
"noreply@github.com"
] |
dxcv.noreply@github.com
|
2a6a3d870dc598862aae7dc106fe09e8a905e679
|
c52b749a1f41f43881cb53c02982c2379497ecb2
|
/ecommerce/upstreet/store/admin.py
|
3675c71296c7c82f09bae59a0fa4b708752e7dfb
|
[] |
no_license
|
sureshkokkiri/CIDM6325
|
4349226e70d3e0dea382a7a4c76ad029570271c1
|
f38c3657779b25ccbd9a53e904da5d9093d0848b
|
refs/heads/main
| 2023-01-24T08:20:16.470959
| 2020-11-30T05:21:33
| 2020-11-30T05:21:33
| 301,221,610
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 665
|
py
|
from django.contrib import admin
#from parler.admin import TranslatableAdmin
from .models import Category, Product
# Register your models here.
@admin.register(Category)
class CategoryAdmin(admin.ModelAdmin):
list_display = ['name', 'slug']
prepopulated_fields={'slug': ('name',)}
# return {'slug': ('name',)}
@admin.register(Product)
class ProductAdmin(admin.ModelAdmin):
list_display = ['name', 'slug', 'price',
'available', 'created', 'updated']
list_filter = ['available', 'created', 'updated']
list_editable = ['price', 'available']
prepopulated_fields={'slug': ('name',)}
# return {'slug': ('name',)}
|
[
"westgree4@gmail.com"
] |
westgree4@gmail.com
|
16e6aad20191bccd6294c9578cb217ce79720b51
|
a6beb7c6901d49493fe8c2bd7e001464af799a19
|
/jd_goods/jd_goods/spiders/get_goods
|
67742aab9a5aecacc3c624e207892303cc41b2c3
|
[] |
no_license
|
RokieProgramer/Scrapy_demo
|
72b5b4f1ffa3abcf5cf7f5f138f1b1d4eda81fb3
|
3f5ebb70f975aa86494339f5ad43a9a3ff11bc35
|
refs/heads/master
| 2020-05-30T09:40:26.703200
| 2019-05-31T23:49:33
| 2019-05-31T23:49:33
| 189,652,087
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,572
|
# -*- coding: utf-8 -*-
import scrapy
from jd_goods.items import JdGoodsItem
from urllib.parse import urlencode
import re
import json
class GetGoodsSpider(scrapy.Spider):
name = 'get_goods'
# allowed_domains = ['jd.com']
# https://search.jd.com/Search?keyword=huwwei_p30pro
def start_requests(self):
# keyword = input("请输入想要查询的物品:")
# max_page = int(input('请输入想要爬取的最大页面数:'))
keyword = 'iphone X'
max_page = 1
url = 'https://search.jd.com/Search?'
for page in range(1, max_page+1):
url += urlencode({'keyword':keyword, 'enc': 'utf-8', 'page': page})
yield scrapy.Request(url)
def parse(self, response):
node_list = response.xpath('//div[@id="J_goodsList"]//div[@class="p-name p-name-type-2"]')
# print('***'*20)
# print(len(node_list))
# print('***' * 20)
for index, node in enumerate(node_list):
# 产品详情页面
goods_url = 'https:' + node.xpath('./a/@href').extract_first()
goods_url = response.urljoin(goods_url)
yield scrapy.Request(url=goods_url,callback=self.parse_goods_page)
#
def parse_goods_page(self, response):
'''
解析产品详情页面,解析颜色,获取当前一个颜色的所有机型的页面
:param response:
:return:
'''
item = JdGoodsItem()
describition = response.xpath(
'//div[@class="sku-name"]/text()').extract()
describition = ''.join(describition)
item['describition'] = describition.strip()
item = JdGoodsItem()
describition = response.xpath(
'//div[@class="sku-name"]/text()').extract()
describition = ''.join(describition)
item['describition'] = describition.strip()
node_list = response.xpath('//div[@id="choose-attr-1"]/div/div')
for node in node_list:
item['color'] = node.xpath('./a/i/text()').extract_first().strip()
# 颜色对应的页面链接
data_sku = node.xpath('./@data-sku').extract_first()
next_url = 'https://item.jd.com/{data_sku}.html'.format(data_sku=data_sku)
next_url = response.urljoin(next_url)
yield scrapy.Request(url=next_url,
meta={'item': item},
callback=self.parse_goods_page_1)
def parse_goods_page_1(self, response):
'''
当前颜色对应的内存+rom版本对应机型页面
:param response:
:return:
'''
item = response.meta['item']
node_list = response.xpath('//div[@id="choose-attr-2"]/div/div')
for node in node_list:
item['memories'] = node.xpath('./a/text()').extract_first().strip()
# 内存+ROM对应的页面链接
data_sku = node.xpath('./@data-sku').extract_first()
next_url = 'https://item.jd.com/{data_sku}.html'.format(
data_sku=data_sku)
next_url = response.urljoin(next_url)
yield scrapy.Request(next_url,
meta={'item': item},
callback=self.parse_goods_page_2)
def parse_goods_page_2(self, response):
'''
机型颜色,内存,ROM固定,获取套餐版本页面链接
:param response:
:return:
'''
item = response.meta['item']
node_list = response.xpath('//div[@id="choose-attr-3"]/div/div')
for node in node_list:
item['version'] = node.xpath('./a/text()').extract_first().strip()
# 套餐版本对应的页面链接
data_sku = node.xpath('./@data-sku').extract_first()
item['id'] = data_sku
next_url = 'https://item.jd.com/{data_sku}.html'.format(
data_sku=data_sku)
next_url = response.urljoin(next_url)
yield scrapy.Request(next_url,
meta={'item': item},
callback=self.parse_goods_page_3)
def parse_goods_page_3(self, response):
'''
颜色,内存+ROM,套餐版本都以确定,获取产品图片链接
:param response:
:return:
'''
item = response.meta['item']
# 产品价格
price= response.xpath('//span[@class="p-price"]/span/text()').extract()
# 包含所有当前颜色对应的产品图片
img_node_list = response.xpath('//ul[@class="lh"]/li')
img_list = []
for node in img_node_list:
img_source = node.xpath('./img/@src').extract_first()
img_url = re.sub('\d{1}\/\w{1}\d{2,3}\w{1}\d{2,3}\w{1}', '0/', img_source)
img_url = 'http:' + img_url
img_list.append(img_url)
item['img_url'] = img_list
url = "https://p.3.cn/prices/mgets?callback=&skuIds=J_" + item['id']
url = response.urljoin(url)
yield scrapy.Request(url, meta={'item': item}, callback=self.parse_price)
def parse_price(self, response):
'''
获取价格
:param response:
:return:
'''
item = response.meta['item']
price_str = response.body
price_str = price_str.decode('utf-8')
pattern = re.compile('"p":"(.*)"}')
result = re.findall(pattern, price_str)
print('价格是:', result[0])
item['price'] = '¥' + result[0]
yield item
|
[
"noreply@github.com"
] |
RokieProgramer.noreply@github.com
|
|
ca06edde0b5cfd9aac6613d1fe1529f2a76675e1
|
ad23346ae1a2c47446c7ef3363ac28192a7598ad
|
/rover_ws/build/catkin_tools_prebuild/catkin_generated/pkg.installspace.context.pc.py
|
a9c0976f9c91a358d3ddc0f7899fed4f0abe8126
|
[] |
no_license
|
michaeldibono/mini-rover-backup
|
5c24e307e9a546bbdf7371b2a684f6cc006af4ab
|
1d9fb172c75289eb69fc22d2bc28030d1a138e5e
|
refs/heads/master
| 2021-01-18T18:36:43.353374
| 2017-08-16T18:15:20
| 2017-08-16T18:15:20
| 100,519,468
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 390
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "catkin_tools_prebuild"
PROJECT_SPACE_DIR = "/home/robotics/git/rover_ws/install"
PROJECT_VERSION = "0.0.0"
|
[
"yetan@fiu.edu"
] |
yetan@fiu.edu
|
204a504cca4f179ed50d214b89f4d43b0f58d0eb
|
9a8878b48bafaac3877a2354e070b85dd7ec00f0
|
/EDPC/B_frog2.py
|
77d8b927688d8ba2a9cdf4b512d7e3a9e2ea5864
|
[] |
no_license
|
ikikohei/atcoder
|
5505506a20f15cb32821d43b592e3fc9ee44dafe
|
9fe585e8554cd19a9d04aa4d9c2e7531e6586fcb
|
refs/heads/master
| 2023-05-31T22:01:01.734021
| 2021-06-26T14:13:02
| 2021-06-26T14:13:02
| 311,360,312
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 679
|
py
|
def DPmethodForFrog2(N,h,K):
max_val = 10000000000
dp = [max_val]*(10**5 + 1)
dp[0] = 0
for i in range(N-1):
# if i+1 <= K:
# for j in range(i+1):
# if dp[i+1] > dp[j]+abs(h[i+1] - h[j]):
# dp[i+1] = dp[j] + abs(h[i+1]-h[j])
# else:
t = min(i+1,K)
for l in range(i+1-t,i+1):
if dp[i+1] > dp[l] + abs(h[i+1] - h[l]):
dp[i+1] = dp[l] + abs(h[i+1]-h[l])
return dp
def main():
N, K = map(int,input().split())
hight = list(map(int,input().split()))
dp = DPmethodForFrog2(N,hight,K)
print(dp[N-1])
if __name__=='__main__':
main()
|
[
"i.k0712r@gmail.com"
] |
i.k0712r@gmail.com
|
3ba25a861b7cc7b205249f2f68cddee66bca052e
|
879c73341649ffe8e4df11ca40e78c9a76280f97
|
/cityengine-mmk/scripts/mmkcegraph.py
|
ac20882941064cab4bb3599b3e87c07c4d560faa
|
[] |
no_license
|
zhechkoz/MMK
|
8f93ad0bbf74a9f9000f1d2993685a2108b95706
|
a5675091dea43170992980a71f29e11afea5bfad
|
refs/heads/master
| 2021-03-27T11:43:54.463922
| 2018-05-20T17:56:28
| 2018-05-20T20:19:01
| 99,523,207
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,591
|
py
|
'''
Created on 03.11.2017
@author: zhechkoz
'''
import sys
import datetime
class CEGraph(object):
def __init__(self, project, author):
self.author = author
self.date = datetime.date.today()
self.project = project
self.nodes = {}
self.segments = {}
self.OSMOID = {}
self.buildings = {}
self.parkings = {}
def appendNodes(self, items):
for item in items:
self.nodes[item.id] = item
if item.osmID in self.OSMOID:
raise ValueError('More than one node have the same OSM ID: ' + item.osmID)
self.OSMOID.setdefault(item.osmID, []).append(item.id)
def appendSegments(self, items):
for item in items:
self.segments[item.id] = item
self.OSMOID.setdefault(item.osmID, []).append(item.id)
def getNode(self, OID):
return self.nodes.get(OID, None)
def getSegment(self, OID):
return self.segments.get(OID, None)
def getNodeByOSMID(self, osmID):
return self.getNode(self.OSMOID.get(osmID, [''])[0])
def getSegmentsByOSMID(self, osmID):
ids = self.OSMOID.get(osmID, [])
segments = [self.getSegment(oid) for oid in ids]
return segments
def collectLanesFromSUMOItems(self, SUMOedges, SUMOnodes):
validLanes = {}
for (id, nodes) in SUMOnodes.iteritems():
osmID = int(id)
ceNode = self.getNodeByOSMID(osmID)
if not ceNode:
# Node was deleted during CE cleanup process so
# try to find the nearest node which exists and append the
# lanes information.
# IMPORTANT: User may have to correct the CE model to correspond
# to SUMO shapes manually.
missingNode = nodes[0]
minDistance = sys.maxint
for node in self.nodes.values():
abs = missingNode.vertices[0].distanceTo(node.vertices[0])
if abs < minDistance:
minDistance = abs
ceNode = node
for node in nodes:
validLanes.update(node.lanes)
ceNode.appendLanes(node.lanes.keys())
ceNode.hierarchy = node.hierarchy
for (id, edges) in SUMOedges.iteritems():
if len(id) > 1 and id[0:2] == '--':
# Edge's OSM ID is negative and direction is negative, so
# this is a backwards segment
osmID = int(id[2:])
forward = False
elif len(id) > 0 and id[0] == '-':
# If there is an edge with a positive sign then this is a
# backwards direction segment, else this is a forward
# segment with a negative OSM ID (according to OSM IDs can be negative)
forward = not SUMOedges.has_key(id[1:])
osmID = int(id) if forward else int(id[1:])
elif len(id) > 0:
# Normal forward edge
osmID = int(id)
forward = True
else:
print("Not valid OSM ID format found " + str(id))
continue
segments = self.getSegmentsByOSMID(osmID)
if len(segments) == 0:
print('WARNING: Segment with OSM ID ' + str(id) + ' found in SUMO is missing!')
continue
for segment in segments:
for edge in edges:
validLanes.update(edge.lanes)
segment.appendLanes(edge.lanes.keys(), forward=forward)
return validLanes
def translateCoordinates(self, dx, dy, dz):
for subList in [self.nodes.values(), self.segments.values(), self.buildings.values(), self.parkings.values()]:
for item in subList:
item.transform(dx, dy, dz)
def reprJSON(self):
dict = {'author' : self.author,
'date' : str(self.date),
'project' : self.project
}
dict.update({'nodes' : self.nodes.values()} if self.nodes else {})
dict.update({'segments' : self.segments.values()} if self.segments else {})
sceneObjects = {}
sceneObjects.update({'buildings' : self.buildings.values()} if self.buildings else {})
sceneObjects.update({'parkings' : self.parkings.values()} if self.parkings else {})
dict.update({'sceneObjects' : sceneObjects} if sceneObjects else {})
return dict
|
[
"jechkoj@gmail.com"
] |
jechkoj@gmail.com
|
0a9807695a6b48465c8bcda0fe72456b4e3a34b2
|
284d0cfb0d62bcbd1b5ec42cdafef0ff0c776231
|
/Imaging_Jellies/src/camera_test/camera_noise_test.py
|
0d7d13e7237949eebc310b9cdc222eec31e9cf3b
|
[] |
no_license
|
nicolewxu/BE107_ProjectGroup3_Jellyfish
|
d7e082f2279fa44e024d69be48cb9f2c046db0df
|
631b59fdd02d18acbbdd9d90161fbed829def94f
|
refs/heads/master
| 2020-12-24T13:20:59.944454
| 2015-05-26T06:50:47
| 2015-05-26T06:50:47
| 35,862,589
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,912
|
py
|
import cv2
import os
import numpy as np
import cPickle
def main():
#move_pics()
#make_avg_background()
#find_contours_with_background()
find_contours_with_adaptive_thresholding()
find_contours_with_otsu_thresholding()
#find_contours_vanilla()
def move_pics():
pics = dict()
for folder in ['fly_with_food_stills', 'larvae_stills']:
pics[folder] = []
img_folder = 'videos_for_tracking/' + folder
for root, dirs, filenames in os.walk(img_folder):
for f in filenames:
filename = os.path.join('videos_for_tracking', folder, f)
larva_img = cv2.imread(filename, 0)
pics[folder].append(larva_img)
output_filename = os.path.join('output', folder, f)
cv2.imwrite(output_filename, larva_img)
cPickle.dump(pics, open('output/all_imgs.pickle', 'wb'))
return pics
def make_avg_background():
pics = cPickle.load(open('output/all_imgs.pickle', 'r'))
for scene in pics.keys():
# initialize accumulator destination
cumulative_img = np.float64(pics[scene][0])
cumulative_img /= 255.0
cv2.imshow('init cumulative', cumulative_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
for n in range(100):
for i in range(len(pics[scene])):
#pics[scene][i] = 255 - pics[scene][i]
#cv2.imshow('img', pics[scene][i])
#cv2.waitKey(0)
#pics[scene][i] = cv2.cvtColor(pics[scene][i], cv2.COLOR_BGR2GRAY)
cv2.accumulateWeighted(np.float64(pics[scene][i])/255.0, cumulative_img, .01)
cv2.imshow('accumulating?', cumulative_img)
cv2.waitKey(0)
output_filename = os.path.join('output', scene, 'background.jpg')
cv2.imwrite(output_filename, cumulative_img*255.0)
def find_centroids(centroid_filename, contours, img):
with open(centroid_filename, 'w') as centroid_file:
centroid_file.write("X\tY\n")
centroids = []
for fly in contours:
max_coordinates = np.amax(fly, axis=0)
min_coordinates = np.amin(fly, axis=0)
centroid = (max_coordinates + min_coordinates)/2
centroid_file.write(str(centroid[0,0]) + "\t" + str(centroid[0,1]) + "\n")
cv2.circle(img, (centroid[0,0], centroid[0,1]), 2, (0, 255, 0), -1)
centroids.append(centroid)
return centroids, img
def find_contours_with_background():
pics = cPickle.load(open('output/all_imgs.pickle', 'r'))
for scene in pics.keys():
background_filename = os.path.join('output', scene, 'background.jpg')
background_img = cv2.imread(background_filename, 0)
x = 0
for larva_grey in pics[scene]:
x += 1
no_background_img = cv2.absdiff(larva_grey, background_img)
#cv2.imshow('background-corrected', no_background_img)
#cv2.waitKey(0)
blurred = cv2.GaussianBlur(no_background_img, (5,5), 0)
ret, thresh = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
#cv2.imshow('thresholded?', thresh)
#cv2.waitKey(0)
cv2.imwrite(os.path.join('output', scene, 'thresholded', 'thresholded_img_' + str(x) + '.jpg'), thresh)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
centroid_filename = os.path.join('output', scene, 'contours', 'background_subtraction', 'centroids_' + str(x) + '.txt')
centroids, larva_grey = find_centroids(centroid_filename, contours, larva_grey)
cv2.drawContours(larva_grey, contours, -1, (0,255,0), 1)
cv2.imshow('contours', larva_grey)
cv2.waitKey(0)
cv2.imwrite(os.path.join('output', scene, 'contours', 'background_subtraction', 'contour_of_img_' + str(x) + ".jpg"), larva_grey)
def find_contours_with_adaptive_thresholding():
pics = cPickle.load(open('output/all_imgs.pickle', 'r'))
for scene in pics.keys():
background_filename = os.path.join('output', scene, 'background.jpg')
background_img = cv2.imread(background_filename, 0)
x = 0
for larva_grey in pics[scene]:
x += 1
blurred = cv2.GaussianBlur(larva_grey, (5,5), 0)
thresh = cv2.adaptiveThreshold(blurred, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 3, 2)
#ret, thresh = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
cv2.imshow('thresholded?', thresh)
#cv2.waitKey(0)
cv2.imwrite(os.path.join('output', scene, 'thresholded', 'thresholded_img_' + str(x) + '.jpg'), thresh)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
centroid_filename = os.path.join('output', scene, 'contours', 'adaptive_threshold', 'centroids_' + str(x) + '.txt')
centroids, larva_grey = find_centroids(centroid_filename, contours, larva_grey)
cv2.drawContours(larva_grey, contours, -1, (0,255,0), 1)
cv2.imshow('contours', larva_grey)
#cv2.waitKey(0)
cv2.imwrite(os.path.join('output', scene, 'contours', 'adaptive_threshold', 'contour_of_img_' + str(x) + ".jpg"), larva_grey)
def find_contours_with_otsu_thresholding():
pics = cPickle.load(open('output/all_imgs.pickle', 'r'))
for scene in pics.keys():
background_filename = os.path.join('output', scene, 'background.jpg')
background_img = cv2.imread(background_filename, 0)
x = 0
for larva_grey in pics[scene]:
x += 1
#thresh = cv2.adaptiveThreshold(larva_grey, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 3, 2)
blurred = cv2.GaussianBlur(larva_grey, (5,5), 0)
ret, thresh = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
cv2.imshow('thresholded?', thresh)
#cv2.waitKey(0)
cv2.imwrite(os.path.join('output', scene, 'thresholded', 'thresholded_img_' + str(x) + '.jpg'), thresh)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
centroid_filename = os.path.join('output', scene, 'contours', 'otsu_threshold', 'centroids_' + str(x) + '.txt')
centroids, larva_grey = find_centroids(centroid_filename, contours, larva_grey)
cv2.drawContours(larva_grey, contours, -1, (0,255,0), 1)
cv2.imshow('contours', larva_grey)
#cv2.waitKey(0)
cv2.imwrite(os.path.join('output', scene, 'contours', 'otsu_threshold', 'contour_of_img_' + str(x) + ".jpg"), larva_grey)
def find_contours_vanilla():
pics = cPickle.load(open('output/all_imgs.pickle', 'r'))
for scene in pics.keys():
x = 0
for larva_grey in pics[scene]:
x += 1
larva_grey = 255-larva_grey
ret, thresh = cv2.threshold(larva_grey, 127, 255, cv2.THRESH_BINARY)
cv2.imshow('original', larva_grey)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.imshow('original 2', larva_grey)
centroid_filename = os.path.join('output', scene, 'contours', 'vanilla', 'centroids_' + str(x) + '.txt')
centroids, larva_grey = find_centroids(centroid_filename, contours, larva_grey)
cv2.drawContours(larva_grey, contours, -1, (0,255,0), 1)
cv2.imshow('contours', larva_grey)
cv2.waitKey(0)
cv2.imwrite(os.path.join('output', scene, 'contours', 'vanilla', 'contour_of_img_' + str(x) + ".jpg"), larva_grey)
main()
|
[
"nicolewxu@dhcp-7-245.caltech.edu"
] |
nicolewxu@dhcp-7-245.caltech.edu
|
a596bb3c9d9652d2ae7143f07e2a8838190af2e0
|
08b9e2128236a37df530d6afa5d127b7d2a28b95
|
/test/test.py
|
85bff6c475551aa6698d5c9525f433377c736b53
|
[] |
no_license
|
glebLit/Chat
|
c1889679ae3989efdc313a4fede5cadba94a91c0
|
271866dd1841235ba5e856cd14cb32627a6e91ac
|
refs/heads/master
| 2021-08-23T22:06:12.105637
| 2017-12-06T19:42:59
| 2017-12-06T19:42:59
| 109,710,943
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,149
|
py
|
#!/usr/bin/env python
import unittest
import urllib2
import time
from flask import Flask
from flask_socketio import SocketIO, emit
from socketIO_client import SocketIO, LoggingNamespace
from socketio import packet
import socket
app = Flask(__name__)
class FlaskTests(unittest.TestCase):
global result
result = urllib2.urlopen("http://localhost:5000/")
def setUp(self):
self.app = app.test_client()
self.app.testing = True
def test_home_status_code200(self):
self.assertEqual(result.code,200)
def test_html (self):
result = urllib2.urlopen("http://localhost:5000/")
self.assertEqual(result.read()[1:46], '!DOCTYPE html>\n<html lang="en">\n <head>\n ')
def test_send_msg(self):
socketIO = SocketIO('localhost', 5000)
socketIO.emit('my event', {'message': 'test', "user_name": 'test'})
pkt = packet.Packet()
self.assertEqual(pkt.packet_type, packet.EVENT)
self.assertTrue(socketIO.connected)
def test_connected(self):
socketIO = SocketIO('localhost', 5000)
self.assertTrue(socketIO.connected)
def test_disconnectsd(self):
socketIO = SocketIO('localhost', 5000)
socketIO.disconnect()
self.assertFalse(socketIO.connected)
def test_wait(self):
socketIO = SocketIO('localhost', 5000)
socketIO.emit('wait_with_disconnect')
timeout_in_seconds = 5
start_time = time.time()
socketIO.wait(timeout_in_seconds)
self.assertTrue(time.time() - start_time-1 < timeout_in_seconds)
def test_recieve_msg(self):
global msg
def on_aaa_response(*args):
global msg
msg = args[0]
def on_connect():
print('connect')
socketIO = SocketIO('localhost', 5000)
socketIO.on('connect', on_connect)
socketIO.on('my response', on_aaa_response)
socketIO.emit('my event', {'message': 'test', "user_name": 'test'})
socketIO.wait(seconds=1)
self.assertEqual(msg, {u'message': u'test', u'user_name': u'test'})
def test_delete_client(self):
global msg
def on_aaa_response(*args):
global msg
msg = args[0]
def on_connect():
print('connect')
socketIO = SocketIO('localhost', 5000)
socketIO.on('connect', on_connect)
socketIO.on('my response', on_aaa_response)
socketIO.emit('my event', {'data': u'Anonimus Disconnected'})
socketIO.wait(seconds=1)
self.assertEqual(msg, {u'message': u'Anonimus left chat room', u'user_name': u'server'})
def test_new_client(self):
global msg
def on_aaa_response(*args):
global msg
msg = args[0]
def on_connect():
print('connect')
socketIO = SocketIO('localhost', 5000)
socketIO.on('connect', on_connect)
socketIO.on('my response', on_aaa_response)
socketIO.emit('my event', {u'data': u'User Connected'})
socketIO.wait(seconds=1)
self.assertEqual(msg, {u'message': u'One more user!', u'user_name': u'server'})
def setDown(self):
pass
if __name__== '__main__':
unittest.main()
|
[
"gleb19960@yandex.ru"
] |
gleb19960@yandex.ru
|
c7e5ffa9443909c4bd27e126568a6a404fd4a262
|
3e35a6b2d6ad60d62083081bbaed4e85fc3d29d0
|
/hello.py
|
502345209d8ab7c8b1088fbde830d67012ad09c0
|
[] |
no_license
|
issderek/first_rsp
|
535d573bdc0da9189158ef43e9738344d73d11b0
|
7356ef71be080ef26eaf9fcf5227546005a978b6
|
refs/heads/master
| 2021-01-22T04:15:09.330104
| 2017-02-15T02:26:08
| 2017-02-15T02:26:08
| 81,522,469
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,275
|
py
|
#coding:utf-8
import pandas as pd
import sys
reload(sys)
sys.setdefaultencoding('utf8')
ori_qa = pd.read_csv("qa.csv",header=None, sep=",")
querys = list(ori_qa[0])
answers = list(ori_qa[1])
you_str = "#".join(list(pd.read_csv("you.csv",header=None, sep=",")[0]))
#替换所有的称谓
for i in range(0,len(querys)):
querys[i] = querys[i].replace("$you","[@you:call_you]")
#拆query的string成list用于后续处理
querys_list = []
for item in querys:
l = item.split("|")
querys_list.append([l])
def cut(list):
r_list = []
for a_list in list:
for item in a_list:
if "#" in item:
mid_list = item.split("#")
for each_one in mid_list:
l = a_list[:]
l.remove(item)
l.append(each_one)
r_list.append(l)
break
return r_list
def is_finish(list):
for a in list:
for b in a:
if "#" in b:
return False
return True
# 根据#把内容展开
def expand_list(list):
while not is_finish(list):
list = cut(list)
return list
# 乱序排序,把结果放到result
def permutation(result, str, list):
"""
取一个数组的全排列
list:为输入列表
str:传空字符串
result: 为结果列表
"""
if len(list) == 1:
result.append(str + "[w:0-10]" + list[0] + "[w:0-10]")
else:
for temp_str in list:
temp_list = list[:]
temp_list.remove(temp_str)
permutation(result, str + "[w:0-10]" + temp_str, temp_list)
#扩展子串
query_expanded_list = []
answer_expanded_list = []
for i in range(0,len(querys_list)):
#for i in range(0, 2):
ori_query = querys_list[i]
ori_expanded = expand_list(ori_query)
concat_result = []
for each_expanded in ori_expanded:
permutation(concat_result, "", each_expanded)
query_expanded_list += concat_result
for j in range(0,len(concat_result)):
answer_expanded_list.append(answers[i])
result = pd.DataFrame({'query':pd.Series(query_expanded_list),'answer':pd.Series(answer_expanded_list)},columns=['query','answer'])
result.to_csv("result_qa.csv",sep=",")
|
[
"53200690@qq.com"
] |
53200690@qq.com
|
174769dec8a2daec7c96b080ca6fe9f8c1de0b45
|
22af8fd63f53ead6cf92fd1628025d95859f0942
|
/emplay/wsgi.py
|
bc70109ee95562f413bf5ce87b1880b3b69f8b6e
|
[] |
no_license
|
alok162/EMPLAY-PRO
|
dd2e076ce381182719f599c23f5bc402c4629d93
|
345fae1c41067c674de542450b011967b9f2328c
|
refs/heads/master
| 2020-03-19T17:04:47.598527
| 2018-06-13T05:11:02
| 2018-06-13T05:11:02
| 136,743,323
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 389
|
py
|
"""
WSGI config for emplay project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "emplay.settings")
application = get_wsgi_application()
|
[
"alok@Aloks-MacBook-Pro.local"
] |
alok@Aloks-MacBook-Pro.local
|
bf96a09a63a04c6313e4811b636283950b097e22
|
ef0aed73e99562d86250e13eb9d0bcf2cf6b2229
|
/main.py
|
1d89ca3a79e87bdc03d5f61afbed1c0ec61977d5
|
[] |
no_license
|
HuesodeDurazno/MagicPoemModel
|
38869dfed4ea82515eddf9207693b68facfcbd5d
|
30b3058c342c8051cf1084cf4c208b0b0c5f3576
|
refs/heads/main
| 2023-09-01T10:43:34.852352
| 2021-10-26T22:42:38
| 2021-10-26T22:42:38
| 421,604,862
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 172
|
py
|
import os
import numpy as np
import re
import spacy
import urllib.request
def poem_generator(file, word, n_sents=4):
nlp = spacy.load("es")
init_str = nlp(word)
|
[
"ivanwhite230@gmail.com"
] |
ivanwhite230@gmail.com
|
458365ada1fd265704f15bcdea4a1b43e2126cfb
|
57c0b710086538c41a119d1866a71705c43ea2d9
|
/Face and Feature Detection/detectfacescircle.py
|
7ca145d58a9f47d937dfedf8341858d2ad7b91d5
|
[] |
no_license
|
AlokRanjanBhoi/pencv-For-Python
|
3bf171790f5c0bb50553eba6b0cd3a0252bdcd67
|
087150f6c1c486e3b136e8d71e140241d83c67cf
|
refs/heads/master
| 2020-04-08T02:00:31.737585
| 2018-12-27T16:41:10
| 2018-12-27T16:41:10
| 158,918,641
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 512
|
py
|
import numpy as np
import cv2
img = cv2.imread("faces.jpg",1)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
path = "haarcascade_eye.xml"
eye_cascade = cv2.CascadeClassifier(path)
eyes = eye_cascade.detectMultiScale(gray, scaleFactor=1.02,minNeighbors=20,minSize=(10,10))
print(len(eyes))
for (x,y,w,h) in eyes:
xc = (x+x+w)/2
yc = (y+y+h)/2
radius =w/2
cv2.circle(img,(int(xc),int(yc)), int(radius), (255,0,0),2)
cv2.imshow("Eyes",img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"noreply@github.com"
] |
AlokRanjanBhoi.noreply@github.com
|
f502a1ab4fbd3fb3c402deb9bcb0a91171c04ca9
|
3ea75e35408de10bba250f52120b5424bd50fdd9
|
/py/plotSigzFunc.py
|
21812552da7cc7dfcdf7898e0e091f012d051cf2
|
[] |
no_license
|
jobovy/segue-maps
|
9848fe59ee24a11a751df4f8855c40f2480aef23
|
ed20b1058a98618700a20da5aa9b5ebd2ea7719b
|
refs/heads/main
| 2022-11-30T15:27:08.079999
| 2016-12-20T04:28:26
| 2016-12-20T04:28:26
| 40,663,061
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,199
|
py
|
import os, os.path
import math
import numpy
import cPickle as pickle
from matplotlib import pyplot
from optparse import OptionParser
from scipy import optimize, special
from galpy.util import bovy_coords, bovy_plot
def plotSigzFunc(parser):
(options,args)= parser.parse_args()
if len(args) == 0:
parser.print_help()
return
if os.path.exists(args[0]):#Load savefile
savefile= open(args[0],'rb')
params1= pickle.load(savefile)
samples1= pickle.load(savefile)
savefile.close()
else:
print "Need to give filename ..."
if os.path.exists(args[1]):#Load savefile
savefile= open(args[1],'rb')
params1= pickle.load(savefile)
samples2= pickle.load(savefile)
savefile.close()
else:
print "Need to give filename ..."
#First one
zs= numpy.linspace(0.3,1.2,1001)
xrange= [0.,1.3]
yrange= [0.,60.]
#Now plot the mean and std-dev from the posterior
zmean= numpy.zeros(len(zs))
nsigs= 3
zsigs= numpy.zeros((len(zs),2*nsigs))
fs= numpy.zeros((len(zs),len(samples1)))
ds= zs-0.5
for ii in range(len(samples1)):
thisparams= samples1[ii]
fs[:,ii]= math.exp(thisparams[1])+thisparams[2]*ds+thisparams[3]*ds**2.
#Record mean and std-devs
zmean[:]= numpy.mean(fs,axis=1)
bovy_plot.bovy_print()
xlabel=r'$|z|\ [\mathrm{kpc}]$'
ylabel=r'$\sigma_z\ [\mathrm{km\ s}^{-1}]$'
bovy_plot.bovy_plot(zs,zmean,'k-',xrange=xrange,yrange=yrange,
xlabel=xlabel,
ylabel=ylabel)
for ii in range(nsigs):
for jj in range(len(zs)):
thisf= sorted(fs[jj,:])
thiscut= 0.5*special.erfc((ii+1.)/math.sqrt(2.))
zsigs[jj,2*ii]= thisf[int(math.floor(thiscut*len(samples1)))]
thiscut= 1.-thiscut
zsigs[jj,2*ii+1]= thisf[int(math.floor(thiscut*len(samples1)))]
colord, cc= (1.-0.75)/nsigs, 1
nsigma= nsigs
pyplot.fill_between(zs,zsigs[:,0],zsigs[:,1],color='0.75')
while nsigma > 1:
pyplot.fill_between(zs,zsigs[:,cc+1],zsigs[:,cc-1],
color='%f' % (.75+colord*cc))
pyplot.fill_between(zs,zsigs[:,cc],zsigs[:,cc+2],
color='%f' % (.75+colord*cc))
cc+= 1.
nsigma-= 1
bovy_plot.bovy_plot(zs,zmean,'k-',overplot=True)
#Second one
zmean= numpy.zeros(len(zs))
zsigs= numpy.zeros((len(zs),2*nsigs))
fs= numpy.zeros((len(zs),len(samples2)))
for ii in range(len(samples2)):
thisparams= samples2[ii]
fs[:,ii]= math.exp(thisparams[1])+thisparams[2]*ds+thisparams[3]*ds**2.
#Record mean and std-devs
zmean[:]= numpy.mean(fs,axis=1)
for ii in range(nsigs):
for jj in range(len(zs)):
thisf= sorted(fs[jj,:])
thiscut= 0.5*special.erfc((ii+1.)/math.sqrt(2.))
zsigs[jj,2*ii]= thisf[int(math.ceil(thiscut*len(samples2)))]
thiscut= 1.-thiscut
zsigs[jj,2*ii+1]= thisf[int(math.ceil(thiscut*len(samples2)))]
colord, cc= (1.-0.75)/nsigs, 1
nsigma= nsigs
pyplot.fill_between(zs,zsigs[:,0],zsigs[:,1],color='0.75')
while nsigma > 1:
pyplot.fill_between(zs,zsigs[:,cc+1],zsigs[:,cc-1],
color='%f' % (.75+colord*cc))
pyplot.fill_between(zs,zsigs[:,cc],zsigs[:,cc+2],
color='%f' % (.75+colord*cc))
cc+= 1.
nsigma-= 1
bovy_plot.bovy_plot(zs,zmean,'k-',overplot=True)
bovy_plot.bovy_text(r'$-0.4 < [\mathrm{Fe/H}] < 0.5\,, \ \ -0.25 < [\alpha/\mathrm{Fe}] < 0.2$',bottom_right=True)
bovy_plot.bovy_text(r'$-1.5 < [\mathrm{Fe/H}] < -0.5\,, \ \ 0.25 < [\alpha/\mathrm{Fe}] < 0.5$',top_left=True)
bovy_plot.bovy_end_print(options.plotfile)
return None
def get_options():
usage = "usage: %prog [options] <savefilename>\n\nsavefilename= name of the file that the fit/samples will be saved to"
parser = OptionParser(usage=usage)
parser.add_option("-o",dest='plotfile',
help="Name of file for plot")
return parser
if __name__ == '__main__':
plotSigzFunc(get_options())
|
[
"jo.bovy@gmail.com"
] |
jo.bovy@gmail.com
|
d0fd9ae97dd8894464641a2387bc5db31a6ea3a3
|
04bd3387ed96a9856c14f76e3022171305203a72
|
/GetPopuler.py
|
348fc46c31c5691ec2af8fdeaedfdaec2f02e79d
|
[] |
no_license
|
Yashwant94308/ATG-Selenium
|
bb3fff41b642951db3b5ab605d524ddcee4794f1
|
39424bee93e49f752105dd35311c2569e1a2de43
|
refs/heads/master
| 2023-05-26T04:36:58.998935
| 2021-05-29T08:34:26
| 2021-05-29T08:34:26
| 371,921,460
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 249
|
py
|
import requests, json
response = requests.get(
'https://www.flickr.com/services/rest/?method=flickr.photos.getPopular&api_key=22a1377a56b4c384b61b723a80a73492'
'&user_id=193065083%40N04&format=json&nojsoncallback=1')
print(response.json())
|
[
"yashwant94308@gmail.com"
] |
yashwant94308@gmail.com
|
6c5fcdc365a2d9524863e685c0e18db9824f2d75
|
4856abe62782bbd1c1f56758ad949aea785b65af
|
/game_functions.py
|
3c52d29a2ad1738466abe44261960c24e91cad60
|
[] |
no_license
|
theVidVid/Alien_Invasion
|
fc8196d67b3658403297ca3b39133b4c5c73f511
|
6b47032e540be96d16af0f1f9b60a9f6f3f60543
|
refs/heads/master
| 2020-04-18T20:29:55.248742
| 2019-03-13T21:11:00
| 2019-03-13T21:11:00
| 167,739,600
| 4
| 1
| null | 2019-03-06T19:49:48
| 2019-01-26T21:14:10
|
Python
|
UTF-8
|
Python
| false
| false
| 11,928
|
py
|
import sys
from time import sleep
import pygame
from bullet import Bullet
from alien import Alien
# The sys module is used to exit the game when the quits.
# The pygame module contains the functionality needed to make a game.
def check_events(ai_settings, screen, stats, sb, play_button, ship, aliens,
bullets):
"""Respond to keypresses and mouse events."""
for event in pygame.event.get():
if event.type == pygame.QUIT:
save_high_score(stats, sb)
pygame.quit()
sys.exit()
# KEYDOWN signifies the pressing of a key
elif event.type == pygame.KEYDOWN:
check_keydown_events(event, ai_settings, screen, stats, sb,
ship, bullets)
# KEYUP signifies the release of a pressed key
elif event.type == pygame.KEYUP:
check_keyup_events(event, ship)
elif event.type == pygame.MOUSEBUTTONDOWN:
mouse_x, mouse_y = pygame.mouse.get_pos()
check_play_button(ai_settings, screen, stats, sb, play_button,
ship, aliens, bullets, mouse_x, mouse_y)
def check_play_button(ai_settings, screen, stats, sb, play_button, ship,
aliens, bullets, mouse_x, mouse_y):
"""Start a new game when the player clicks Play button."""
button_clicked = play_button.rect.collidepoint(mouse_x, mouse_y)
if button_clicked and not stats.game_active:
# Load the game's song
ai_settings.game_song = pygame.mixer.music.load("GameSong/Grind.ogg")
ai_settings.game_song = pygame.mixer.music.play(-1)
# Reset the game settings.
ai_settings.initialize_dynamic_settings()
# Hide the mouse cursor.
pygame.mouse.set_visible(False)
# Reset the game statistics.
stats.reset_stats()
stats.game_active = True
# Reset the scoreboard images.
sb.prep_score()
sb.prep_high_score()
sb.prep_level()
sb.prep_ships()
# Empty the list of aliens and bullets.
aliens.empty()
bullets.empty()
# Create a new fleet and center the ship.
create_fleet(ai_settings, screen, ship, aliens)
ship.center_ship()
def check_keydown_events(event, ai_settings, screen, stats, sb, ship, bullets):
"""Respond to keypresses."""
if event.key == pygame.K_RIGHT:
# Move the ship to the right.
ship.moving_right = True
elif event.key == pygame.K_LEFT:
# Move the ship to the left.
ship.moving_left = True
elif event.key == pygame.K_SPACE:
# Fires a bullet.
fire_bullet(ai_settings, screen, ship, bullets)
elif event.key == pygame.K_p:
# Entering the letter 'p' pauses the game.
pause_game(event, ai_settings, screen, stats, sb, ship, bullets)
elif event.key == pygame.K_u:
# Entering the letter 'u' unpauses the game.
unpause_game(event, ai_settings, screen, stats, sb, ship, bullets)
elif event.key == pygame.K_q:
# Entering the letter 'q' quits the game.
save_high_score(stats, sb)
pygame.quit()
sys.exit()
def fire_bullet(ai_settings, screen, ship, bullets):
"""Fire a bullet if limit hasn't been reached yet."""
# Create a new bullet and add it to bullets group.
if len(bullets) < ai_settings.bullets_allowed:
new_bullet = Bullet(ai_settings, screen, ship)
bullets.add(new_bullet)
ai_settings.bullet_sound.play()
def pause_game(event, ai_settings, screen, stats, sb, ship, bullets):
"""Puts the game in a pause state."""
if stats.game_active == True:
# Load pause sound clip.
ai_settings.pause_in.play()
# Pause the game's song
ai_settings.game_song = pygame.mixer.music.load("GameSong/Grind.ogg")
ai_settings.game_song = pygame.mixer.music.pause()
# Set game state to False.
stats.game_active = False
def unpause_game(event, ai_settings, screen, stats, sb, ship, bullets):
"""Returns the game back to previous state."""
if stats.game_active == False:
# Set game state to True
stats.game_active = True
# Load the unpause sound clip.
ai_settings.pause_out.play()
# Play the game's song
ai_settings.game_song = pygame.mixer.music.load("GameSong/Grind.ogg")
ai_settings.game_song = pygame.mixer.music.play(-1)
def check_keyup_events(event, ship):
"""Respond to key releases."""
if event.key == pygame.K_RIGHT:
# The ship stops moving to the right.
ship.moving_right = False
elif event.key == pygame.K_LEFT:
# The ship stops moving to the left.
ship.moving_left = False
def update_bullets(ai_settings, screen, stats, sb, ship, aliens, bullets):
"""Update position of bullets and get rid of old bullets"""
# Update bullet positions.
bullets.update()
# Get rid of bullets that have disappeared.
for bullet in bullets.copy():
if bullet.rect.bottom <= 0:
bullets.remove(bullet)
check_bullet_alien_collisions(ai_settings, screen, stats, sb, ship,
aliens, bullets)
def check_bullet_alien_collisions(ai_settings, screen, stats, sb, ship,
aliens, bullets):
"""Respond to bullet-alien collisions."""
# Remove any bullets and aliens that have collided.
collisions = pygame.sprite.groupcollide(bullets, aliens, True, True)
if collisions:
for aliens in collisions.values():
stats.score += ai_settings.alien_points * len(aliens)
sb.prep_score()
ai_settings.destroyed_alien.play()
check_high_score(stats, sb)
if len(aliens) == 0:
# Load level cleared sound clip
ai_settings.new_level.play()
# If the entire fleet is destroyed, start a new level.
bullets.empty()
ai_settings.increase_speed()
# Increase level.
stats.level += 1
sb.prep_level()
create_fleet(ai_settings, screen, ship, aliens)
def update_screen(ai_settings, screen, stats, sb, ship, aliens, bullets,
play_button):
"""Update images on the screen and flip to the new screen."""
# Redraw the screen during each pass through the loop.
screen.fill(ai_settings.bg_color)
# Redraw all bullets behind ship and aliens.
for bullet in bullets.sprites():
bullet.draw_bullet()
ship.blitme()
aliens.draw(screen)
# Draw the score information.
sb.show_score()
# Draw the play button if the game is inactive.
if not stats.game_active:
play_button.draw_button()
# Make the most recently drawn screen visible.
pygame.display.flip()
def get_number_aliens_x(ai_settings, alien_width):
"""Determing the number of aliens that fit in a row."""
available_space_x = ai_settings.screen_width - 2 * alien_width
number_aliens_x = int(available_space_x / (2 * alien_width))
return number_aliens_x
def get_number_rows(ai_settings, ship_height, alien_height):
"""Determine the number of rows of aliens that fit on the screen."""
available_space_y = (ai_settings.screen_height -
(3 * alien_height) - ship_height)
number_rows = int(available_space_y / (2 * alien_height))
return number_rows
def create_alien(ai_settings, screen, aliens, alien_number, row_number):
"""Create an alien and place it in the row."""
alien = Alien(ai_settings, screen)
# Spacing between each alien is equal to one alien width.
alien_width = alien.rect.width
alien.x = alien_width + 2 * alien_width * alien_number
alien.rect.x = alien.x
alien.rect.y = alien.rect.height + 2 * alien.rect.height * row_number
aliens.add(alien)
def create_fleet(ai_settings, screen, ship, aliens):
"""Create a full fleet of aliens."""
# Create an alien and find the number of aliens in a row.
alien = Alien(ai_settings, screen)
number_aliens_x = get_number_aliens_x(ai_settings, alien.rect.width)
number_rows = get_number_rows(ai_settings, ship.rect.height,
alien.rect.height)
# Create the fleet of aliens.
for row_number in range(number_rows):
for alien_number in range(number_aliens_x):
create_alien(ai_settings, screen, aliens, alien_number,
row_number)
def check_fleet_edges(ai_settings, aliens):
"""Respond appropriately if any aliens have reached an edge."""
for alien in aliens.sprites():
if alien.check_edges():
change_fleet_direction(ai_settings, aliens)
break
def change_fleet_direction(ai_settings, aliens):
"""Drop the entire fleet and change the fleet's direction."""
for alien in aliens.sprites():
alien.rect.y += ai_settings.fleet_drop_speed
ai_settings.fleet_direction *= -1
def ship_hit(ai_settings, screen, stats, sb, ship, aliens, bullets):
"""Respond to ship being hit by alien."""
if stats.ships_left > 0:
# Reduce the number of ships left
stats.ships_left -= 1
# Load ship collision sound
ai_settings.ship_collision.play()
# Stop game music
ai_settings.game_song = pygame.mixer.music.load("GameSong/Grind.ogg")
ai_settings.game_song = pygame.mixer.music.stop()
# Update scoreboard
sb.prep_ships()
# Empty the list of aliens and bullets.
aliens.empty()
bullets.empty()
# Plays level refresh sound clip.
ai_settings.level_refresh.play()
# Plays the game song again from the beginning.
ai_settings.game_song = pygame.mixer.music.play(-1)
# Create a new fleet and center the ship.
create_fleet(ai_settings, screen, ship, aliens)
ship.center_ship()
# Pause.
sleep(1.5)
else:
ai_settings.game_song = pygame.mixer.music.stop()
ai_settings.game_over.play()
stats.game_active = False
pygame.mouse.set_visible(True)
def check_aliens_bottom(ai_settings, screen, stats, sb, ship, aliens,
bullets):
"""Check if any aliens have reached the bottom of the screen."""
screen_rect = screen.get_rect()
for alien in aliens.sprites():
if alien.rect.bottom >= screen_rect.bottom:
#Treat this the same as if the ship got hit.
ship_hit(ai_settings, screen, stats, sb, ship, aliens, bullets)
break
def update_aliens(ai_settings, screen, stats, sb, ship, aliens, bullets):
"""
Check if the fleet is at an edge,
and then update the positions of all aliens in the fleet.
"""
check_fleet_edges(ai_settings, aliens)
aliens.update()
# Look for alien-ship collisions
if pygame.sprite.spritecollideany(ship, aliens):
ship_hit(ai_settings, screen, stats, sb, ship, aliens, bullets)
# Look for aliens hitting the bottom of the screen
check_aliens_bottom(ai_settings, screen, stats, sb, ship, aliens, bullets)
def check_high_score(stats, sb):
"""Check to see if there's a new high score."""
if stats.score > stats.high_score:
stats.high_score = stats.score
sb.prep_high_score()
def save_high_score(stats, sb):
"""Saves the highest score to a .txt file."""
save_file = "GameSaveData/high_score.txt"
high_score = round(stats.high_score, -1)
saved_high_score = str(high_score)
with open(save_file, 'w') as f_obj:
f_obj.write(saved_high_score)
|
[
"ianjvidaurre@gmail.com"
] |
ianjvidaurre@gmail.com
|
c088978c79af6296316fa521727b2ebf9bb34d07
|
291c7ed825161973a2d8742f2f180f04ce6e2689
|
/website/__init__.py
|
a55432fa42f2f76214b396a4257247662bb44e9c
|
[] |
no_license
|
opoweiluo/E-commerce-for-small-business
|
ec1f870d7cadaffdcfe449e5b51e09a05c3a8f5b
|
2ab617e1814259aac689579025c6af4775e4181b
|
refs/heads/main
| 2022-12-27T00:33:14.023612
| 2020-10-13T01:42:51
| 2020-10-13T01:42:51
| 302,741,684
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 629
|
py
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_bootstrap import Bootstrap
from flask_login import LoginManager
from flask_bcrypt import Bcrypt
import pymysql
pymysql.install_as_MySQLdb()
app = Flask(__name__)
app.secret_key = 'david'
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://root:root@localhost/data'
app.config['SQLALCHEMY_TRACK_MODIFICATION'] = False
bcrypt = Bcrypt(app)
bootstrap = Bootstrap(app)
db = SQLAlchemy(app)
login_manager = LoginManager(app)
login_manager.init_app(app)
login_manager.login_view = "login"
login_manager.session_protection = "strong"
from website import routes
|
[
"noreply@github.com"
] |
opoweiluo.noreply@github.com
|
bcf521a2c46390f05edb564682e5fe27c896efd1
|
7a1085aa658114bacc3889c1c7d92d1d6e411159
|
/thread_cls.py
|
06dbbfe502d83a277367c7634480f01650c20699
|
[] |
no_license
|
huahuijay/class3
|
cc66956ba13006e832a722aff560225df9c5b3f2
|
0551f4a64f5748d396d44f0915e5f9fea110f966
|
refs/heads/master
| 2021-01-11T02:00:20.122088
| 2016-10-13T15:03:01
| 2016-10-13T15:03:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 622
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import Queue
import threading
import time
ip_queue = Queue.Queue()
class Ping(object):
def __call__(self):
while 1:
if not ip_queue.qsize():
break
ip = ip_queue.get()
time.sleep(1)
print ip
if __name__ == '__main__':
for i in range(254):
ip = '192.168.1.%s' % (i + 1)
ip_queue.put(ip)
threads = []
for i in range(50):
t = threading.Thread(target=Ping())
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
|
[
"peng.luo@idiaoyan.com"
] |
peng.luo@idiaoyan.com
|
d47dec240304cb46afa238c6d1e46db1d150dbf4
|
8d9e9f5e3246ea3c682b81c6a8c9578dd4e0666b
|
/max_soft_function.py
|
272135a948561debcd22f51eb1c589849c64aa25
|
[] |
no_license
|
defunSM/python-machine-learning
|
f9df9e32b8c28b58410203a2300f31935f6477b5
|
21eb73491e0526b6b654df00a70fe66869837b23
|
refs/heads/master
| 2021-01-12T08:58:07.509854
| 2017-06-01T11:22:15
| 2017-06-01T11:22:15
| 76,738,388
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 320
|
py
|
#!/usr/bin/env python
import sys, os
import numpy as np
def maxsoftfunc(x):
return np.exp(x) / np.sum(np.exp(x))
def main():
array = [0.1, 1.1, 2.1]
ans = maxsoftfunc(array)
print(ans)
total = 0
for i in ans:
total += i
print(total)
if __name__=="__main__":
main()
|
[
"sxh8083@g.rit.edu"
] |
sxh8083@g.rit.edu
|
b6f35c1a1ea83e61d21732c084d936b0869d821b
|
401f694fd838a88b7782ab3fb80fe687a0268082
|
/InterpreterDebugger/demo_script.py
|
9a30fc19b36c28932359006b406607b0405fef40
|
[
"MIT",
"Python-2.0"
] |
permissive
|
rockobonaparte/cloaca
|
8871f46ebcd1d018be2a61b5775daa65444ee0eb
|
f2b28cb77faaeca736bb0cf286361dd96f362789
|
refs/heads/master
| 2022-10-22T08:46:36.872543
| 2022-10-08T19:21:06
| 2022-10-08T19:21:06
| 175,341,337
| 4
| 4
| null | 2020-05-15T11:57:02
| 2019-03-13T03:42:17
|
C#
|
UTF-8
|
Python
| false
| false
| 65
|
py
|
import heapq
heapq.heapify([0, 9, 1, 8, 7, 3])
print("Done!")
|
[
"adam.preble@gmail.com"
] |
adam.preble@gmail.com
|
13cd0c8c3642448ab20d30e377b9836c2e2b3b0f
|
7a4ed01a40e8d79126b26f5e8fca43c8e61e78fd
|
/Python Built-in Modules/Python Itertools Module/1.Infinite Iterators/1.3.repeat()/1.Example_Of_repeat.py
|
b1c087d52f7e27726d3536cbc8a9c1f00b84432f
|
[] |
no_license
|
satyam-seth-learnings/python_learning
|
5a7f75bb613dcd7fedc31a1567a434039b9417f8
|
7e76c03e94f5c314dcf1bfae6f26b4a8a6e658da
|
refs/heads/main
| 2023-08-25T14:08:11.423875
| 2021-10-09T13:00:49
| 2021-10-09T13:00:49
| 333,840,032
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 102
|
py
|
from itertools import repeat
print ("Printing the numbers repeatedly : ")
print (list(repeat(25, 4)))
|
[
"satyam1998.1998@gmail.com"
] |
satyam1998.1998@gmail.com
|
633f383970cec190025bf934ec1d356e6c1d9a65
|
3823bfba09bbb1bc0f525a241283df3cb6b97615
|
/scrapyprj/replay/ZipUtility.py
|
742983d59edfb0b637490abcd6254bdb2f2d1a06
|
[] |
no_license
|
alexmason528/torforum_crawler
|
ed3a585dd7b454ad71b021f3014f40a48b38c1e9
|
70964905d79818501c9fc32d3e6417dcc6eaa828
|
refs/heads/master
| 2020-03-19T01:04:25.914342
| 2018-05-29T18:24:36
| 2018-05-29T18:24:36
| 135,520,458
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,584
|
py
|
import os
import sys
import stat
from os.path import abspath
import fnmatch
import collections
import errno
try:
import zlib
del zlib
_ZLIB_SUPPORTED = True
except ImportError:
_ZLIB_SUPPORTED = False
try:
import bz2
del bz2
_BZ2_SUPPORTED = True
except ImportError:
_BZ2_SUPPORTED = False
try:
from pwd import getpwnam
except ImportError:
getpwnam = None
try:
from grp import getgrnam
except ImportError:
getgrnam = None
def _make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0,
owner=None, group=None, logger=None):
"""Create a (possibly compressed) tar file from all the files under
'base_dir'.
'compress' must be "gzip" (the default), "bzip2", or None.
'owner' and 'group' can be used to define an owner and a group for the
archive that is being built. If not provided, the current owner and group
will be used.
The output tar file will be named 'base_name' + ".tar", possibly plus
the appropriate compression extension (".gz", or ".bz2").
Returns the output filename.
"""
if compress is None:
tar_compression = ''
elif _ZLIB_SUPPORTED and compress == 'gzip':
tar_compression = 'gz'
elif _BZ2_SUPPORTED and compress == 'bzip2':
tar_compression = 'bz2'
else:
raise ValueError("bad value for 'compress', or compression format not "
"supported : {0}".format(compress))
compress_ext = '.' + tar_compression if compress else ''
archive_name = base_name + '.tar' + compress_ext
archive_dir = os.path.dirname(archive_name)
if archive_dir and not os.path.exists(archive_dir):
if logger is not None:
logger.info("creating %s", archive_dir)
if not dry_run:
os.makedirs(archive_dir)
# creating the tarball
import tarfile # late import so Python build itself doesn't break
if logger is not None:
logger.info('Creating tar archive')
uid = _get_uid(owner)
gid = _get_gid(group)
def _set_uid_gid(tarinfo):
if gid is not None:
tarinfo.gid = gid
tarinfo.gname = group
if uid is not None:
tarinfo.uid = uid
tarinfo.uname = owner
return tarinfo
if not dry_run:
tar = tarfile.open(archive_name, 'w|%s' % tar_compression)
try:
tar.add(base_dir, filter=_set_uid_gid)
finally:
tar.close()
return archive_name
def _call_external_zip(base_dir, zip_filename, verbose=False, dry_run=False):
# XXX see if we want to keep an external call here
if verbose:
zipoptions = "-r"
else:
zipoptions = "-rq"
from distutils.errors import DistutilsExecError
from distutils.spawn import spawn
try:
spawn(["zip", zipoptions, zip_filename, base_dir], dry_run=dry_run)
except DistutilsExecError:
# XXX really should distinguish between "couldn't find
# external 'zip' command" and "zip failed".
raise ExecError, \
("unable to create zip file '%s': "
"could neither import the 'zipfile' module nor "
"find a standalone zip utility") % zip_filename
def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None):
"""Create a zip file from all the files under 'base_dir'.
The output zip file will be named 'base_name' + ".zip". Uses either the
"zipfile" Python module (if available) or the InfoZIP "zip" utility
(if installed and found on the default search path). If neither tool is
available, raises ExecError. Returns the name of the output zip
file.
"""
zip_filename = base_name + ".zip"
archive_dir = os.path.dirname(base_name)
if archive_dir and not os.path.exists(archive_dir):
if logger is not None:
logger.info("creating %s", archive_dir)
if not dry_run:
os.makedirs(archive_dir)
# If zipfile module is not available, try spawning an external 'zip'
# command.
try:
import zlib
import zipfile
except ImportError:
zipfile = None
if zipfile is None:
_call_external_zip(base_dir, zip_filename, verbose, dry_run)
else:
if logger is not None:
logger.info("creating '%s' and adding '%s' to it",
zip_filename, base_dir)
if not dry_run:
with zipfile.ZipFile(zip_filename, "w",
compression=zipfile.ZIP_DEFLATED,
allowZip64=True) as zf:
path = os.path.normpath(base_dir)
if path != os.curdir:
zf.write(path, path)
if logger is not None:
logger.info("adding '%s'", path)
for dirpath, dirnames, filenames in os.walk(base_dir):
for name in sorted(dirnames):
path = os.path.normpath(os.path.join(dirpath, name))
zf.write(path, path)
if logger is not None:
logger.info("adding '%s'", path)
for name in filenames:
path = os.path.normpath(os.path.join(dirpath, name))
if os.path.isfile(path):
zf.write(path, path)
if logger is not None:
logger.info("adding '%s'", path)
return zip_filename
_ARCHIVE_FORMATS = {
'tar': (_make_tarball, [('compress', None)], "uncompressed tar file"),
'zip': (_make_zipfile, [], "ZIP file")
}
def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0,
dry_run=0, owner=None, group=None, logger=None):
"""Create an archive file (eg. zip or tar).
'base_name' is the name of the file to create, minus any format-specific
extension; 'format' is the archive format: one of "zip", "tar", "gztar",
or "bztar". Or any other registered format.
'root_dir' is a directory that will be the root directory of the
archive; ie. we typically chdir into 'root_dir' before creating the
archive. 'base_dir' is the directory where we start archiving from;
ie. 'base_dir' will be the common prefix of all files and
directories in the archive. 'root_dir' and 'base_dir' both default
to the current directory. Returns the name of the archive file.
'owner' and 'group' are used when creating a tar archive. By default,
uses the current owner and group.
"""
save_cwd = os.getcwd()
if root_dir is not None:
if logger is not None:
logger.debug("changing into '%s'", root_dir)
base_name = os.path.abspath(base_name)
if not dry_run:
os.chdir(root_dir)
if base_dir is None:
base_dir = os.curdir
kwargs = {'dry_run': dry_run, 'logger': logger}
try:
format_info = _ARCHIVE_FORMATS[format]
except KeyError:
raise ValueError, "unknown archive format '%s'" % format
func = format_info[0]
for arg, val in format_info[1]:
kwargs[arg] = val
if format != 'zip':
kwargs['owner'] = owner
kwargs['group'] = group
try:
filename = func(base_name, base_dir, **kwargs)
finally:
if root_dir is not None:
if logger is not None:
logger.debug("changing back to '%s'", save_cwd)
os.chdir(save_cwd)
return filename
|
[
"steven@gilligan.io"
] |
steven@gilligan.io
|
ac8bb2b49f625d413a32f8fef679bc03ce802ab6
|
ade22d64b99e7306eaeaf06684cc9c4f2d539881
|
/oscquintette/tests/v1/test_plugin.py
|
36de987851507a942e89237853e783acf38e25f1
|
[
"Apache-2.0"
] |
permissive
|
dtroyer/osc-quintette
|
59204e4ad2e25be237fb3ec13cbb5087518197d6
|
e37585936b1db9e87ab52e11e714afaf167a0039
|
refs/heads/master
| 2020-04-04T22:57:54.745055
| 2015-01-15T06:42:16
| 2015-01-15T06:42:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,266
|
py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import sys
from oscquintette.tests import base
from oscquintette.tests import fakes
from oscquintette.v1 import plugin
# Load the plugin init module for the plugin list and show commands
import oscquintette.plugin
plugin_name = 'oscquintette'
plugin_client = 'oscquintette.plugin'
class FakePluginV1Client(object):
def __init__(self, **kwargs):
#self.servers = mock.Mock()
#self.servers.resource_class = fakes.FakeResource(None, {})
self.auth_token = kwargs['token']
self.management_url = kwargs['endpoint']
class TestPluginV1(base.TestCommand):
def setUp(self):
super(TestPluginV1, self).setUp()
self.app.client_manager.oscquintette = FakePluginV1Client(
endpoint=fakes.AUTH_URL,
token=fakes.AUTH_TOKEN,
)
# Get a shortcut to the Service Catalog Mock
#self.catalog_mock = self.app.client_manager.identity.service_catalog
#self.catalog_mock.reset_mock()
class TestPluginList(TestPluginV1):
def setUp(self):
super(TestPluginList, self).setUp()
self.app.ext_modules = [
sys.modules[plugin_client],
]
# Get the command object to test
self.cmd = plugin.ListPlugin(self.app, None)
def test_plugin_list(self):
arglist = []
verifylist = []
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
collist = ('Name', 'Versions', 'Module')
self.assertEqual(columns, collist)
datalist = ((
plugin_name,
oscquintette.plugin.API_VERSIONS.keys(),
plugin_client,
), )
self.assertEqual(tuple(data), datalist)
class TestPluginShow(TestPluginV1):
def setUp(self):
super(TestPluginShow, self).setUp()
self.app.ext_modules = [
sys.modules[plugin_client],
]
# Get the command object to test
self.cmd = plugin.ShowPlugin(self.app, None)
def test_plugin_show(self):
arglist = [
plugin_name,
]
verifylist = [
('name', plugin_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
collist = ('1', 'module', 'name')
self.assertEqual(columns, collist)
datalist = (
oscquintette.plugin.API_VERSIONS['1'],
plugin_client,
plugin_name,
)
self.assertEqual(data, datalist)
|
[
"dtroyer@gmail.com"
] |
dtroyer@gmail.com
|
40a3d067d1e3b7a8dc8e422b14866b6111bd77a8
|
3e9ac661325657664f3f7fa26ff2edf5310a8341
|
/python/demo100/15.py
|
82e514ce0d7a6957012d7aafb52d784906df006e
|
[] |
no_license
|
JollenWang/study
|
47d1c22a6e15cb82d0ecfc6f43e32e3c61fbad36
|
660a47fd60dd1415f71da362232d710b322b932f
|
refs/heads/master
| 2020-06-15T23:53:37.625988
| 2017-04-21T11:18:20
| 2017-04-21T11:18:20
| 75,257,807
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 490
|
py
|
#!/usr/bin/python
#-*- coding:utf-8 -*-
#author : Jollen Wang
#date : 2016/05/10
#version: 1.0
'''
题目:利用条件运算符的嵌套来完成此题:学习成绩>=90分的同学用A表示,60-89分之间的用B表示,60分以下的用C表示。
'''
def main():
score = int(raw_input("$>Enter the score:"))
print "grade=",
if score >= 90:
print "A"
elif score >= 60:
print "B"
else:
print "C"
if __name__ == "__main__":
main()
|
[
"jollen_wang@163.com"
] |
jollen_wang@163.com
|
dd84a0764d1cd38b85cddd32caf67859a5427497
|
4ac77337083c7fdb28a901831003cfd0e0ef7bf1
|
/any_urlfield/models/fields.py
|
2d6a67b84cb2f887d661bd1a22600a432304957f
|
[
"Apache-2.0"
] |
permissive
|
borgstrom/django-any-urlfield
|
deb6a10b87c26f53bb3ca5085d486238ab6c2a6c
|
3f97bfd628a5770268b715ee8f796aaab89cf841
|
refs/heads/master
| 2020-12-11T02:13:14.725873
| 2013-12-12T21:55:12
| 2013-12-12T21:55:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,655
|
py
|
"""
Custom model fields to link to CMS content.
"""
from django.core.exceptions import ValidationError
from django.core.validators import URLValidator
from django.db import models
from any_urlfield.models.values import AnyUrlValue
from any_urlfield.registry import UrlTypeRegistry
class AnyUrlField(models.CharField):
"""
A CharField that can either refer to a CMS page ID, or external URL.
.. figure:: /images/anyurlfield1.*
:width: 363px
:height: 74px
:alt: AnyUrlField, with external URL input.
.. figure:: /images/anyurlfield2.*
:width: 290px
:height: 76px
:alt: AnyUrlField, with internal page input.
By default, the ``AnyUrlField`` only supports linking to external pages.
To add support for your own models (e.g. an ``Article`` model),
include the following code in :file:`models.py`:
.. code-block:: python
from any_urlfield.models import AnyUrlField
AnyUrlField.register_model(Article)
Now, the ``AnyUrlField`` offers users a dropdown field to directly select an article.
By default, it uses a :class:`django.forms.ModelChoiceField` field with a :class:`django.forms.Select` widget
to render the field. This can be customized using the ``form_field`` and ``widget`` parameters:
.. code-block:: python
from any_urlfield.models import AnyUrlField
from any_urlfield.forms import SimpleRawIdWidget
AnyUrlField.register_model(Article, widget=SimpleRawIdWidget(Article))
Now, the ``Article`` model will be displayed as raw input field with a browse button.
"""
__metaclass__ = models.SubfieldBase
_static_registry = UrlTypeRegistry() # Also accessed by AnyUrlValue as internal field.
def __init__(self, *args, **kwargs):
if not kwargs.has_key('max_length'):
kwargs['max_length'] = 300
super(AnyUrlField, self).__init__(*args, **kwargs)
@classmethod
def register_model(cls, ModelClass, form_field=None, widget=None, title=None, prefix=None):
"""
Register a model to use in the URL field.
This function needs to be called once for every model
that should be selectable in the URL field.
:param ModelClass: The model to register.
:param form_field: The form field class used to render the field.
:param widget: The widget class, can be used instead of the form field.
:param title: The title of the model, by default it uses the models ``verbose_name``.
:param prefix: A custom prefix for the model in the serialized database format. By default it uses "appname.modelname".
"""
cls._static_registry.register(ModelClass, form_field, widget, title, prefix)
def formfield(self, **kwargs):
# Associate formfield.
# Import locally to avoid circular references.
from any_urlfield.forms.fields import AnyUrlField as AnyUrlFormField
kwargs['form_class'] = AnyUrlFormField
kwargs['url_type_registry'] = self._static_registry
if kwargs.has_key('widget'):
del kwargs['widget']
return super(AnyUrlField, self).formfield(**kwargs)
def to_python(self, value):
if isinstance(value, AnyUrlValue):
return value
# Convert the string value
if value is None:
return None
return AnyUrlValue.from_db_value(value, self._static_registry)
def get_prep_value(self, value):
if isinstance(value, basestring):
# Happens with south migration
return value
elif value is None:
return None if self.null else ''
else:
# Convert back to string
return value.to_db_value()
def value_to_string(self, obj):
# For dumpdata
value = self._get_val_from_obj(obj)
return self.get_prep_value(value)
def validate(self, value, model_instance):
# Final validation of the field, before storing in the DB.
super(AnyUrlField, self).validate(value, model_instance)
if value:
if value.type_prefix == 'http':
validate_url = URLValidator()
validate_url(value.type_value)
elif value.type_value:
if not value.exists():
raise ValidationError(self.error_messages['invalid_choice'] % value.type_value)
# Tell South how to create custom fields
try:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], [
"^" + __name__.replace(".", "\.") + "\.AnyUrlField",
])
except ImportError:
pass
|
[
"vdboor@edoburu.nl"
] |
vdboor@edoburu.nl
|
6a71b1c6d6048d65ec434b7db6b8ca6701671ae5
|
8d1237874cceee240d605edc0caa4ac5255d040e
|
/HW4/example-ptt(version dict).py
|
b51264fe5da721ebace4a993db6d7de471b097f7
|
[] |
no_license
|
fatwinnie/Sam_Test
|
545f9c7f4782d7e279a2aafd3278e58b8272ee23
|
3ba99bd3d21baeb74b94c703edcad1d480b99661
|
refs/heads/master
| 2022-12-27T16:06:47.323058
| 2020-10-16T09:51:44
| 2020-10-16T09:51:44
| 287,498,760
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,342
|
py
|
import requests
from bs4 import BeautifulSoup
import numpy as np
import pandas as pd
url = "https://www.ptt.cc/bbs/joke/index.html"
response = requests.get(url)
html_doc = response.text # text 屬性就是 html檔案
soup = BeautifulSoup(response.text, "html.parser")
post_author = [] #建立一個空的 list 放作者 id
recommend = [] # 放推文數
post_title = [] #放標題
post_date = [] #放貼文日期
posts = soup.find_all("div", class_ = "r-ent")
for post in posts:
try:
post_author.append(post.find("div", class_ = "author").string)
except:
post_author.append(np.nan)
try:
post_title.append(post.find("a").string)
except:
post_titles.append(np.nan)
try:
post_date.append(post.find("div", class_ = "date").string)
except:
post_dates.append(np.nan)
find_recommend = soup.find_all("div", class_ = "nrec")
for rec in find_recommend:
try:
#recommend.append(post.find("span").string)
recommend.append(int(rec.find("span").string))
except:
recommend.append(np.nan)
joke_dict = {"author": post_author,
"recommends": recommend,
"title": post_title,
"date": post_date
}
joke_df = pd.DataFrame(ptt_nba_dict)
#joke_df
joke_df.to_csv('beatifulsoup.csv',encoding='utf_8_sig')
|
[
"noreply@github.com"
] |
fatwinnie.noreply@github.com
|
7b1ce87dae5357070654d64c13e0421c247e54da
|
51f886e82c1b670d2202e9e2dee9ae7e776f55ad
|
/HITB-XCTF 2018/WEB/Python's revenge/run.py
|
ff9357baaba52f64c3568293733c5bb94d5dd212
|
[] |
no_license
|
kankanhua/CTFS
|
e2249699c0c8096b3362fd7b79add6fa897fffa9
|
2ea1cedbd124e9136e28ad77d5173524a90ee0b3
|
refs/heads/master
| 2020-04-03T01:19:05.281002
| 2018-07-09T13:20:57
| 2018-07-09T13:20:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 80
|
py
|
#!flask/bin/python
from app import *
if __name__ == "__main__":
app.run()
|
[
"523279238@qq.com"
] |
523279238@qq.com
|
3df1a403fdfd729c21e8a16868b681af9bfc772f
|
7f8ac39fa2d83a6a12cbb7ac26996dc465f57bcb
|
/Analysis/susyAnalysis.py
|
3f1324ab35c40586a4a7a5d10f4c283c9fbe1a80
|
[] |
no_license
|
ingridavh/SUSY
|
7450ce7c2bec21e78158c2448eb7d016fddbb875
|
8a9b3a6eae1720b5674a06d01ffda91b87234554
|
refs/heads/master
| 2021-01-20T09:36:55.930940
| 2017-06-01T17:42:43
| 2017-06-01T17:42:43
| 90,267,226
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,706
|
py
|
import AnalysisHelpers as AH
import ROOT
import Analysis
import math
#======================================================================
class susyAnalysis(Analysis.Analysis):
"""Semileptonic susyAnalysis loosely based on the ATLAS analyses of top pair events where
one W boson decays to leptons and one decays to hadrons.
"""
def __init__(self, store):
super(susyAnalysis, self).__init__(store)
def initialize(self):
self.hist_WtMass = self.addStandardHistogram("WtMass")
self.hist_leptn = self.addStandardHistogram("lep_n")
self.hist_leptpt = self.addStandardHistogram("lep_pt")
self.hist_lepteta = self.addStandardHistogram("lep_eta")
self.hist_leptE = self.addStandardHistogram("lep_E")
self.hist_leptphi = self.addStandardHistogram("lep_phi")
self.hist_leptch = self.addStandardHistogram("lep_charge")
self.hist_leptID = self.addStandardHistogram("lep_type")
self.hist_leptptc = self.addStandardHistogram("lep_ptconerel30")
self.hist_leptetc = self.addStandardHistogram("lep_etconerel20")
self.hist_lepz0 = self.addStandardHistogram("lep_z0")
self.hist_lepd0 = self.addStandardHistogram("lep_d0")
self.hist_njets = self.addStandardHistogram("n_jets")
self.hist_jetspt = self.addStandardHistogram("jet_pt")
self.hist_jetm = self.addStandardHistogram("jet_m")
self.hist_jetJVF = self.addStandardHistogram("jet_jvf")
self.hist_jeteta = self.addStandardHistogram("jet_eta")
self.hist_jetmv1 = self.addStandardHistogram("jet_MV1")
self.hist_jet1pt = self.addStandardHistogram("jet1_pt")
self.hist_jet2pt = self.addStandardHistogram("jet2_pt")
self.hist_jet3pt = self.addStandardHistogram("jet3_pt")
self.hist_etmiss = self.addStandardHistogram("etmiss")
self.hist_vxp_z = self.addStandardHistogram("vxp_z")
self.hist_pvxp_n = self.addStandardHistogram("pvxp_n")
#Self-added
self.hist_mt = self.addStandardHistogram("mt")
self.hist_meff = self.addStandardHistogram("meff")
self.hist_meratio = self.addStandardHistogram("meratio")
def analyze(self):
# retrieving objects
eventinfo = self.Store.getEventInfo()
weight = eventinfo.scalefactor()*eventinfo.eventWeight() if not self.getIsData() else 1
self.countEvent("all", weight)
# apply standard event based selection
if not AH.StandardEventCuts(eventinfo): return False
self.countEvent("EventCuts", weight)
# neutrinos are expected, so cut on missing transverse momentum
#-----------------------------------Change cut according to signal
etmiss = self.Store.getEtMiss()
#self.countEvent("MET", weight)
# one good lepton from one of the W boson decays is expected, so require exactly one good lepton
goodLeptons = AH.selectAndSortContainer(self.Store.getLeptons(), AH.isGoodLepton, lambda p: p.pt())
if not (len(goodLeptons) == 1): return False
self.countEvent("1 Lepton", weight)
leadlepton = goodLeptons[0]
# two jets from one of the W boson decays as well as two b-jets from the top pair decays are expected
#-------------------------------------------------------
#HER kan du bestemme antall jets
goodJets = AH.selectAndSortContainer(self.Store.getJets(), AH.isGoodJet, lambda p: p.pt())
#Require at least 3 jets
if not (len(goodJets) >= 3) : return False
self.countEvent("Jets", weight)
#PT requirement on leading lepton
if not (leadlepton.pt() > 25) : return False
#lepton veto
if len(goodLeptons) > 1 :
if not (goodLeptons[-1].pt() > 10): return False
#----------------------------------------------------
#PT requirement on leading jets
if not goodJets[0].pt() > 80: return False
if not goodJets[1].pt() > 80: return False
if not goodJets[2].pt() > 30: return False
#Jet veto
if len(goodJets) > 4:
if goodJets[4].pt() > 40: return False
#Cut on missing energy
#change to 400GeV because of plot
if not (etmiss.et() > 500.0): return False
# apply the b-tagging requirement using the MV1 algorithm at 80% efficiency
#-------------------------Not looking for b-jets
btags = sum([1 for jet in goodJets if jet.mv1() > 0.7892])
#if not (btags >= 2): return False
#self.countEvent("btags", weight)
# apply a cut on the transverse mass of the W boson decaying to leptons
#-----------------Change transverse mass mass according to sample
#if not (AH.WTransverseMass(leadlepton, etmiss) > 30.0): return False
#trasverse mass
mt = math.sqrt( 2*leadlepton.pt() * etmiss.et() * (1-math.cos(leadlepton.phi()) ) )
#inclusive effective mass and exclusive effective mass
meffincl = etmiss.et()
meffexcl = etmiss.et()
for i in range(len(goodLeptons)) :
meffincl += goodLeptons[i].pt()
meffexcl += goodLeptons[i].pt()
for j in range(len(goodJets)) : meffincl += goodJets[j].pt()
for k in range(3) : meffexcl += goodJets[k].pt()
#Ratio
emratio = 0
if not meffexcl == 0 :
emratio = etmiss.et()/float(meffexcl)
# cut on transverse mass
if not (mt > 150) : return False
#cut on ratio
if not (emratio > 0.3) : return False
#cut on effective inclusive mass
if not (meffincl > 1400) : return False
# Histograms detailing event information
self.hist_vxp_z.Fill(eventinfo.primaryVertexPosition(), weight)
self.hist_pvxp_n.Fill(eventinfo.numberOfVertices(), weight)
# histograms for the W boson properties
self.hist_WtMass.Fill(AH.WTransverseMass(leadlepton, etmiss), weight)
# histograms for missing et
self.hist_etmiss.Fill(etmiss.et(),weight)
# histograms detailing lepton information
self.hist_leptn.Fill(len(goodLeptons), weight)
self.hist_leptpt.Fill(leadlepton.pt(), weight)
self.hist_lepteta.Fill(leadlepton.eta(), weight)
self.hist_leptE.Fill(leadlepton.e(), weight)
self.hist_leptphi.Fill(leadlepton.phi(), weight)
self.hist_leptch.Fill(leadlepton.charge(), weight)
self.hist_leptID.Fill(leadlepton.pdgId(), weight)
self.hist_lepz0.Fill(leadlepton.z0(), weight)
self.hist_lepd0.Fill(leadlepton.d0(), weight)
self.hist_leptptc.Fill(leadlepton.isoptconerel30(), weight)
self.hist_leptetc.Fill(leadlepton.isoetconerel20(), weight)
# histograms detailing jet information
self.hist_njets.Fill(len(goodJets), weight)
[self.hist_jetm.Fill(jet.m(), weight) for jet in goodJets]
[self.hist_jetspt.Fill(jet.pt(), weight) for jet in goodJets]
[self.hist_jetJVF.Fill(jet.jvf(), weight) for jet in goodJets]
[self.hist_jeteta.Fill(jet.eta(), weight) for jet in goodJets]
[self.hist_jetmv1.Fill(jet.mv1(), weight) for jet in goodJets]
self.hist_jet1pt.Fill(goodJets[0].pt(), weight)
self.hist_jet2pt.Fill(goodJets[1].pt(), weight)
self.hist_jet3pt.Fill(goodJets[2].pt(), weight)
#Histograms detailing self-added
self.hist_mt.Fill(mt, weight)
self.hist_meff.Fill(meffincl, weight)
self.hist_meratio.Fill(emratio, weight)
return True
def finalize(self):
pass
|
[
"ingridavh@hotmail.com"
] |
ingridavh@hotmail.com
|
3a5877287dae4f76c16aa6d37576640de2e97602
|
127cdebd60c1c439f559c1f711d071c0d920d131
|
/leilei/cainiao.py
|
d88ffe7d7fa247568fd80458973cfb4e750a4fe7
|
[] |
no_license
|
fei2yi/mytoos
|
b13660af90b2a4029bbb9fd292961d8007e856c8
|
9901e33a0a19f1ac0bf19f7b7c7b2eb0e58b96b0
|
refs/heads/master
| 2020-04-08T18:27:59.262780
| 2019-05-16T03:45:02
| 2019-05-16T03:45:02
| 159,609,149
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,127
|
py
|
import xlrd
from win32com import client as wc
from docx import Document
kinship,amend = {},{}
name,num,mynum,col_index,margin,mf_list,ROW = [],[],[],[],[],[],[]
# def doSaveAas():
# word = wc.Dispatch('Word.Application')
# doc = word.Documents.Open(u'aaaa.doc') # 目标路径下的文件
# doc.SaveAs(u'E:\\bin\\tuokoulv\\aaaa.docx', 12, False, "", True, "", False, False, False, False) # 转化后路径下的文件
# doc.Close()
# word.Quit()
path = 'aaaa.docx'
document = Document(path) #读入文件
tables = document.tables #获取文件中的表格集
table1 = tables[2]
for row in range(1,len(table1.rows)):
kinship[table1.cell(row,0).text] = table1.cell(row,2).text
print('kinship',kinship)
table = tables[-2 ]#获取文件中的第一个表格
for row in range(0,len(table.rows)):#从表格第二行开始循环读取表格数据
if row == 0:
for col in range(0, len(table.columns)):
name.append(table.cell(row,col).text)
for col in range(3, len(table.columns)):
if table.cell(row,col).text != '':
num.append(table.cell(row,col).text)
mynum.append(table.cell(row,col).text)
elif table.cell(row,col).text == '':
num.append(num[-1])
pre = ''
lis_ind = []
print('num',num)
for i, vvv in enumerate(num):
if kinship[vvv] != '胚胎':
continue
elif vvv != pre:
lis_ind = []
lis_ind.append(i + 3)
col_index.append(lis_ind)
else:
lis_ind.append(i + 3)
pre = vvv
temp = []
# print('col_index1',col_index)
for clid, cl in enumerate(col_index):
if clid == len(col_index) - 1:
temp.append(cl)
else:
temp.append(cl[:-1])
margin.append(cl[-2])
amend [cl[-2]] = mynum[clid]
col_index = temp
print('col_index',col_index)
#cell(i,0)表示第(i+1)行第1列数据,以此类推
elif row == 1:
for col in range(0, len(table.columns)):
if col < 3:
ROW.append('')
else:
ROW.append(table.cell(row,col).text)
print('ROW', ROW)
MF_ = []
for i, vvv in enumerate(ROW):
if vvv:
MF_.append(vvv[0])
elif not vvv and MF_:
mf_list.append(MF_)
MF_ = []
if i == len(ROW) - 1:
mf_list.append(MF_)
print('mf_list',mf_list)
else:
rews_h = []
xf = []
for h, vh in enumerate(ROW):
# print('vh',vh)
# print('rows',rows)
xf = table.cell(row,h)
print(type(xf))
print(xf.background)
# bgx = xfx.background.pattern_colour_index
# if bgx == 10:
# rews_h.append(1)
# elif vh == '?':
# rews_h.append(2)
# else:
# rews_h.append(0)
# base_.append(rews_h)
# doSaveAas()
|
[
"415923506@qq.com"
] |
415923506@qq.com
|
28bf8e32b2fc71691571cc473c7d4d6c7cefcf3a
|
fe98f7502a5724be0ec7ec3ae73ff4703d299d6e
|
/Neural Tree/data.py
|
1b85e274b45f66319d308125e39f23e90bf4375f
|
[] |
no_license
|
SoumitraAgarwal/BTP
|
92ab095aacf3dd374148f40b9e777bb49c4253f1
|
07df960ad7e8680680a9d3494c8a860b394867d1
|
refs/heads/master
| 2020-03-16T12:39:13.548988
| 2018-05-09T06:09:11
| 2018-05-09T06:09:11
| 132,671,601
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,513
|
py
|
import random
import math
import pandas as pd
import numpy as np
from sklearn import preprocessing
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import axes3d, Axes3D
plt.switch_backend('agg')
random.seed(311)
def generate(radius, centre):
alpha = 2 * math.pi * random.random()
r = radius*random.random()
x = r*math.cos(alpha) + centre[0]
y = r*math.sin(alpha) + centre[1]
return [x,y]
k = 10
n = 600
ranger = 500
C = []
X = []
Y = []
for j in range(k):
T = [random.uniform(0, ranger), random.uniform(0, ranger)]
temp = []
C.append([[j*ranger + random.uniform(0, ranger), ranger*random.uniform(0, k)], 400*random.uniform(0, 1)])
for i in range(n):
temp.append(generate(C[j][1], C[j][0]))
temp = np.asarray(temp)
Y.append(np.matmul(temp,T))
X.append(temp)
X = np.asarray(X)
Y = np.asarray(Y)
fig = plt.figure()
ax1 = fig.add_subplot(111)
colors = cm.rainbow(np.linspace(0, 1, len(Y)))
for i in range(k):
x1, y1 = X[i].T
ax1.scatter( x1,y1, s = 3, marker="o", label='target', color=colors[i])
plt.savefig('Data.png')
X1 = []
X2 = []
for i in range(k):
x1,x2 = X[i].T
X1.append(x1)
X2.append(x2)
X1 = np.asarray(X1)
X2 = np.asarray(X2)
Y = Y.ravel()
X1 = X1.ravel()
X2 = X2.ravel()
X1 = preprocessing.scale(X1)
X2 = preprocessing.scale(X2)
Y = preprocessing.scale(Y)
data = pd.DataFrame(data = {
'X1':X1,
'X2':X2,
'Y' :Y
})
data = data.sample(frac=1).reset_index(drop=True)
data.to_csv('data.csv', index = False)
|
[
"agarwalsoumitra1504@gmail.com"
] |
agarwalsoumitra1504@gmail.com
|
5ddbda28127ab2fb18249701f06df9c1649219a4
|
8fe781f8ac5b1c1d5214ac5a87c5ad855f791a6d
|
/src/clean_data.py
|
90720e0134fea7776aa816fbd08598bb52e51b1b
|
[] |
no_license
|
ternaus/kaggle_ultrasound
|
fabf45b89f5ab0888bb22e9b5205d90b14ce8f06
|
2d688d0cea8e2b1651980e972b1d6400b797c70b
|
refs/heads/master
| 2021-01-11T15:48:19.835115
| 2016-08-20T01:47:37
| 2016-08-20T01:47:37
| 64,818,757
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,349
|
py
|
from __future__ import division
"""
This script makes train data less noisy in a way:
Finds similar images assigns to these clusters of images max mask
"""
import networkx as nx
import os
import pandas as pd
from tqdm import tqdm
from PIL import Image
import glob
import pandas as pd
import cv2
import os
import numpy as np
from pylab import *
from tqdm import tqdm
from scipy.cluster.hierarchy import dendrogram, linkage
from scipy.spatial.distance import pdist, squareform
image_rows = 420
image_cols = 580
data_path = '../data'
train_data_path = os.path.join(data_path, 'train')
images = os.listdir(train_data_path)
total = len(images) / 2
imgs = np.ndarray((total, 1, image_rows, image_cols), dtype=np.uint8)
imgs_mask = np.ndarray((total, 1, image_rows, image_cols), dtype=np.uint8)
i = 0
print('-'*30)
print('Creating training images...')
print('-'*30)
for image_name in tqdm(images):
if 'mask' in image_name:
continue
image_mask_name = image_name.split('.')[0] + '_mask.tif'
img = cv2.imread(os.path.join(train_data_path, image_name), cv2.IMREAD_GRAYSCALE)
img_mask = cv2.imread(os.path.join(train_data_path, image_mask_name), cv2.IMREAD_GRAYSCALE)
img = np.array([img])
img_mask = np.array([img_mask])
imgs[i] = img
imgs_mask[i] = img_mask
i += 1
print('Loading done.')
train_ids = [x for x in images if 'mask' not in x]
train = pd.DataFrame()
train['subject'] = map(lambda x: int(x.split('_')[0]), train_ids)
train['filename'] = train_ids
train['image_num'] = map(lambda x: int(x.split('.')[0].split('_')[1]), train_ids)
imgs_flat = np.reshape(imgs, (5635, 420*580))
for subject in train['subject'].unique():
a = imgs_flat[(train['subject'] == subject).astype(int).values == 1]
b = squareform(pdist(a))
graph = []
for i in range(1, 2000):
for j in range(i + 1, 120):
if (b < 5000)[(i, j)]:
graph += [(i, j)]
G = nx.Graph()
G.add_edges_from(graph)
connected_components = list(map(list, nx.connected_component_subgraphs(G)))
clusters = pd.DataFrame(zip(range(len(connected_components), connected_components)),
columns=['cluster_name', 'components'])
temp = pd.DataFrame()
temp['image_num'] = train.loc[(train['subject'] == subject), 'image_num']
temp['subject'] = subject
|
[
"iglovikov@gmail.com"
] |
iglovikov@gmail.com
|
95a196583e24ca85981351ed0bf9c3a4ece3c9bb
|
f6644453d0a228cec214d09551468105134c5da3
|
/mobilenetv2.py
|
84c1b802d29d0097fc7043eceb38466e66aa5a92
|
[] |
no_license
|
ibadami/MobileNet_v2
|
97a49692edce291631b8d123ae2031c8179ff395
|
b10065448bf65e007032e0e04a170045c4a0b8b2
|
refs/heads/master
| 2020-04-17T22:02:34.702865
| 2019-01-22T10:53:33
| 2019-01-22T10:53:33
| 166,977,607
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,710
|
py
|
"""MobileNet v2 model for TFLearn.
# Reference
[MobileNetV2: Inverted Residuals and Linear Bottlenecks]
(https://arxiv.org/abs/1801.04381)
_author__ = "Ishrat Badami, badami@nevisq.com"
"""
from tflearn.layers.conv import conv_2d, grouped_conv_2d, global_avg_pool
from tflearn.layers.core import dropout, reshape, input_data
from tflearn.layers.merge_ops import merge
from tflearn.layers.normalization import batch_normalization
def _conv_block(input_net, filters, kernel, strides):
"""Convolution Block
This function defines a 2D convolution operation with BN and relu6.
Parameters
----------
input_net: Tensor, input tensor of convolution layer.
filters: Integer, the dimensionality of the output space.
kernel: An integer or tuple/list of 2 integers, specifying the
width and height of the 2D convolution window.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
# Returns
Output tensor.
"""
net = conv_2d(input_net, filters, kernel, strides, activation='relu6', weights_init='xavier')
net = batch_normalization(net)
return net
def _bottleneck(input_net, filters, kernel, t, s, r=False):
"""Bottleneck
This function defines a basic bottleneck structure.
Parameters
----------
input_net: Tensor, input tensor of conv layer.
filters: Integer, the dimensionality of the output space.
kernel: An integer or tuple/list of 2 integers, specifying the
width and height of the 2D convolution window.
t: Integer, expansion factor.
t is always applied to the input size.
s: An integer or tuple/list of 2 integers,specifying the strides
of the convolution along the width and height.Can be a single
integer to specify the same value for all spatial dimensions.
r: Boolean, Whether to use the residuals.
# Returns
Output tensor.
"""
t_channel = input_net.shape[3] * t # channel expansion
net = _conv_block(input_net, t_channel, (1, 1), (1, 1))
net = grouped_conv_2d(net, channel_multiplier=1, filter_size=kernel, strides=(s, s), padding='same',
activation='relu6', weights_init='xavier')
net = batch_normalization(net)
net = conv_2d(net, filters, (1, 1), strides=(1, 1), padding='same')
net = batch_normalization(net)
if r:
net = merge([net, input_net], 'elemwise_sum')
return net
def _inverted_residual_block(input_net, filters, kernel, t, strides, n):
"""Inverted Residual Block
This function defines a sequence of 1 or more identical layers.
Parameters
----------
input_net: Tensor, input tensor of conv layer.
filters: Integer, the dimensionality of the output space.
kernel: An integer or tuple/list of 2 integers, specifying the
width and height of the 2D convolution window.
t: Integer, expansion factor.
t is always applied to the input size.
s: An integer or tuple/list of 2 integers,specifying the strides
of the convolution along the width and height.Can be a single
integer to specify the same value for all spatial dimensions.
n: Integer, layer repeat times.
# Returns
Output tensor.
"""
net = _bottleneck(input_net, filters, kernel, t, strides)
for i in range(1, n):
net = _bottleneck(net, filters, kernel, t, 1, True)
return net
def mobile_net_v2(input_shape, n_classes, img_prep=None, img_aug=None):
"""MobileNetv2
This function defines a MobileNetv2 architectures.
Parameters
----------
input_shape: An integer or tuple/list of 3 integers, shape
of input tensor.
n_classes: Number of classes.
img_prep: Function handle for image pre-processing
img_aug: Function handle for image augmentation
# Returns
MobileNetv2 model.
"""
inputs = input_data(shape=input_shape, data_preprocessing=img_prep, data_augmentation=img_aug)
x = reshape(inputs, [-1, input_shape[0], input_shape[1], 1])
x = _conv_block(x, 32, (3, 3), strides=(2, 2))
x = _inverted_residual_block(x, 16, (3, 3), t=1, strides=1, n=1)
x = _inverted_residual_block(x, 24, (3, 3), t=6, strides=2, n=2)
x = _inverted_residual_block(x, 32, (3, 3), t=6, strides=2, n=3)
x = _inverted_residual_block(x, 64, (3, 3), t=6, strides=2, n=4)
x = _inverted_residual_block(x, 96, (3, 3), t=6, strides=1, n=3)
x = _inverted_residual_block(x, 160, (3, 3), t=6, strides=2, n=3)
x = _inverted_residual_block(x, 320, (3, 3), t=6, strides=1, n=1)
x = _conv_block(x, 1280, (1, 1), strides=(1, 1))
x = global_avg_pool(x)
x = reshape(x, [-1, 1, 1, 1280])
x = dropout(x, 0.3, name='Dropout')
x = conv_2d(x, n_classes, (1, 1), padding='same', activation='softmax', weights_init='xavier')
output = reshape(x, [-1, n_classes])
return output
|
[
"noreply@github.com"
] |
ibadami.noreply@github.com
|
015abfc9ea79a4582643af75018f4ff2b5c66e16
|
97b87984b70021730df04bac0b5cf1d076bf4592
|
/quartile.py
|
cbbe94b69dd22d55559ef291addce60772d64294
|
[] |
no_license
|
ayush-ruel/Statistics
|
0bcede4691e861ec45e4b4d2f3d9274e071b6b78
|
1f2ba99d685c8a0a224d41b9cb46dc6b255f5a8f
|
refs/heads/master
| 2020-04-23T18:02:54.940588
| 2019-02-26T21:59:03
| 2019-02-26T21:59:03
| 171,353,355
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 282
|
py
|
from statistics import median
n=int(input())
arr=[int(x) for x in input().split()]
arr.sort()
t=int(len(arr)/2)
if len(arr)%2==0:
L=arr[:t]
U=arr[t:]
else:
L=arr[:t]
U=arr[t+1:]
print(int(median(L)))
print(int(median(arr)))
print(int(median(U)))
|
[
"noreply@github.com"
] |
ayush-ruel.noreply@github.com
|
e9c684dd4c93afc3e080a8243908e154b3b2ebeb
|
ff4fc5ffab9143e5440ae78415b1128429530a5f
|
/knowledge_files/函数列表.py
|
ee836d26dae015ce05eb4a8b6fc3c47366260076
|
[] |
no_license
|
LyuuuuXinwei/knowledge
|
4ee2a4cdc5007430ca6ef09a1368604b89deead0
|
218f1c903bc8113b70fd4997e5f5349a86263153
|
refs/heads/master
| 2021-01-20T01:50:04.702084
| 2018-02-08T16:18:48
| 2018-02-08T16:18:48
| 101,301,112
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,136
|
py
|
match_ # 正则检查需要被变复数的单词
apply_ # 以正则方式替换
import re
def match_sxz(noun):
return re.search('[sxz]$', noun)
def apply_sxz(noun):
return re.sub('$', 'es', noun)
'''rules函数列表'''
rules = ((match_sxz, apply_sxz),
(match_h, apply_h),
(match_y, apply_y),
(match_default, apply_default)
)
def plural(noun):
for matches_rule, apply_rule in rules:
if matches_rule(noun):
return apply_rule(noun)
'''体会下高级语言额简洁'''
'''闭合,动态函数~'''
def build_match_and_apply_functions(pattern, search, replace):
def matches_rule(word):
return re.search(pattern, word)
def apply_rule(word): ②
return re.sub(search, replace, word)
return(matches_rule, apply_rule)
'''构造一个四组参数的tuple,传入参数的构造整齐简洁'''
patterns=(('[sxz]$', '$', 'es'),('[^aeioudgkprt]h$', '$', 'es'),('(qu|[^aeiou])y$', 'y$', 'ies'),('$', '$', 's'))
'''for传递成组参数,结果简洁'''
rules = [build_match_and_apply_functions(pattern, search, replace) for (pattern, search, replace) in patterns]
|
[
"416626614@qq.com"
] |
416626614@qq.com
|
2129c72f2a51f1fe14350be0ad03e9f24634a8fb
|
ccb7633dae1393b546a189e0fd513665fae3e49f
|
/lambda/lambda_function.py
|
56e022fca38257a6b11f88d467cc74fffe8ec3e3
|
[] |
no_license
|
rahulsinha036/Weather-API-Alexa-Skill
|
65003976b119aaa0f154a9df52fcd0fa816f39eb
|
d07bf560f62a3c351e5d78111a310ff2b68ce46b
|
refs/heads/master
| 2022-12-25T05:34:51.151714
| 2020-09-27T10:35:09
| 2020-09-27T10:35:09
| 299,008,011
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,801
|
py
|
# -*- coding: utf-8 -*-
# This sample demonstrates handling intents from an Alexa skill using the Alexa Skills Kit SDK for Python.
# Please visit https://alexa.design/cookbook for additional examples on implementing slots, dialog management,
# session persistence, api calls, and more.
# This sample is built using the handler classes approach in skill builder.
import logging
import ask_sdk_core.utils as ask_utils
import requests
from ask_sdk_core.skill_builder import SkillBuilder
from ask_sdk_core.dispatch_components import AbstractRequestHandler
from ask_sdk_core.dispatch_components import AbstractExceptionHandler
from ask_sdk_core.handler_input import HandlerInput
from ask_sdk_model import Response
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class LaunchRequestHandler(AbstractRequestHandler):
"""Handler for Skill Launch."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_request_type("LaunchRequest")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
speak_output = "Welcome to RS weather."
return (
handler_input.response_builder
.speak(speak_output)
.ask(speak_output)
.response
)
class GetWeatherIntentHandler(AbstractRequestHandler):
"""Handler for Get Weather Intent."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_intent_name("GetWeatherIntent")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
slots = handler_input.request_envelope.request.intent.slots
city = slots['city'].value
speak_output = ''
api_address = "http://api.openweathermap.org/data/2.5/weather?appid=d2926362ecd564ba1863bb2df0772161&units=metric&q="
url = api_address + city
json_data = requests.get(url).json()
formatted_json = json_data['weather'][0]['main']
temp = json_data['main']['temp']
name = json_data['name']
sys = json_data['sys']['country']
description = json_data['weather'][0]['description']
speak_output = "The weather is {}, {} and temp is {} of {} in {}.".format(formatted_json, description, temp, name, sys)
repromptOutput = " You want to know weather again of other city?"
return (
handler_input.response_builder
.speak(speak_output)
.ask(repromptOutput)
.response
)
class HelpIntentHandler(AbstractRequestHandler):
"""Handler for Help Intent."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_intent_name("AMAZON.HelpIntent")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
speak_output = "Hello,How we can help you? You can say that tell me new york tipsy weather."
return (
handler_input.response_builder
.speak(speak_output)
.ask(speak_output)
.response
)
class CancelOrStopIntentHandler(AbstractRequestHandler):
"""Single handler for Cancel and Stop Intent."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return (ask_utils.is_intent_name("AMAZON.CancelIntent")(handler_input) or
ask_utils.is_intent_name("AMAZON.StopIntent")(handler_input))
def handle(self, handler_input):
# type: (HandlerInput) -> Response
speak_output = "Goodbye! see you soon."
return (
handler_input.response_builder
.speak(speak_output)
.response
)
class SessionEndedRequestHandler(AbstractRequestHandler):
"""Handler for Session End."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_request_type("SessionEndedRequest")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
# Any cleanup logic goes here.
return handler_input.response_builder.response
class IntentReflectorHandler(AbstractRequestHandler):
"""The intent reflector is used for interaction model testing and debugging.
It will simply repeat the intent the user said. You can create custom handlers
for your intents by defining them above, then also adding them to the request
handler chain below.
"""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_request_type("IntentRequest")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
intent_name = ask_utils.get_intent_name(handler_input)
speak_output = "You just triggered " + intent_name + "."
return (
handler_input.response_builder
.speak(speak_output)
# .ask("add a reprompt if you want to keep the session open for the user to respond")
.response
)
class CatchAllExceptionHandler(AbstractExceptionHandler):
"""Generic error handling to capture any syntax or routing errors. If you receive an error
stating the request handler chain is not found, you have not implemented a handler for
the intent being invoked or included it in the skill builder below.
"""
def can_handle(self, handler_input, exception):
# type: (HandlerInput, Exception) -> bool
return True
def handle(self, handler_input, exception):
# type: (HandlerInput, Exception) -> Response
logger.error(exception, exc_info=True)
speak_output = "Sorry, I had trouble doing what you asked. Please try again."
return (
handler_input.response_builder
.speak(speak_output)
.ask(speak_output)
.response
)
# The SkillBuilder object acts as the entry point for your skill, routing all request and response
# payloads to the handlers above. Make sure any new handlers or interceptors you've
# defined are included below. The order matters - they're processed top to bottom.
sb = SkillBuilder()
sb.add_request_handler(LaunchRequestHandler())
sb.add_request_handler(GetWeatherIntentHandler())
sb.add_request_handler(HelpIntentHandler())
sb.add_request_handler(CancelOrStopIntentHandler())
sb.add_request_handler(SessionEndedRequestHandler())
sb.add_request_handler(IntentReflectorHandler()) # make sure IntentReflectorHandler is last so it doesn't override your custom intent handlers
sb.add_exception_handler(CatchAllExceptionHandler())
lambda_handler = sb.lambda_handler()
|
[
"noreply@github.com"
] |
rahulsinha036.noreply@github.com
|
8bf5be3241894ee6b7a5f6ec3ba7cdc9946a56f7
|
57e1c170b3721e2c4f9529415b0c015d0de91ff1
|
/utils.py
|
675248601b525ba2ea93060751236375ab0ed512
|
[] |
no_license
|
sobadrush/DockerDev-ReadCsv2Excel
|
3b849222f7cd0a64fed423df1ff179cdfc5f9794
|
4e1d1c088f6da9a34473987f8c671965b3829d7e
|
refs/heads/main
| 2023-09-05T10:00:37.796979
| 2021-11-22T07:50:41
| 2021-11-22T07:50:41
| 417,730,672
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 886
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
def plus(a, b):
return a + b
def handleMemo(_list):
if len(_list) == 1 or _list[0] == '':
return
# for idx, val in enumerate(tempLine):
# if idx >= 4:
# tempLine[idx] = val.replace(" ", ",")
# return tempLine
# ref. https://www.geeksforgeeks.org/python-replace-elements-greater-than-k/
tempList = [elem.replace(" ", ",") if idx >= 4 else elem for idx, elem in enumerate(_list)]
# 1. Google: python clear list
# 2. Concate list: ref. https://stackoverflow.com/questions/1720421/how-do-i-concatenate-two-lists-in-python
_list[:] = tempList[0:5] + [''.join(tempList[5:])]
if __name__ == '__main__':
print(f'My Answer is: {plus(3, 4)}')
# myList = ["A1", "B2", "C3", "D4", "E 5", "F 6"]
# handleMemo(myList)
# print(f'handleMemo is: {myList}')
|
[
"sobadrush@icloud.com"
] |
sobadrush@icloud.com
|
8c4a5643db05aa582d6890691f9259ba39448975
|
455ce0c304e4a9f080862cb8459066ac741f3d38
|
/day06/funcAsVar.py
|
abdfe02a2e1454e4e58302d9b697a4725542a345
|
[] |
no_license
|
venkatram64/python3_work
|
23a835b0f8f690ca167e74bbbe94f46e3bd8c99f
|
754f42f6fa2be4446264a8e2532abd55213af4df
|
refs/heads/master
| 2021-06-15T22:41:31.031925
| 2021-02-16T04:56:50
| 2021-02-16T04:56:50
| 153,217,220
| 1
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 125
|
py
|
def square(num):
return num * num
s = square # function is assigned to a variable, and later can be excuted
print(s(5))
|
[
"venkat.veerareddy@hotmail.com"
] |
venkat.veerareddy@hotmail.com
|
d476c12d19016fedb10bf55bbe245feb207b93ac
|
971e0efcc68b8f7cfb1040c38008426f7bcf9d2e
|
/tests/model_control/detailed/transf_Quantization/model_control_one_enabled_Quantization_Lag1Trend_NoCycle_NoAR.py
|
ff7ef11eee723d83fe871324617d9665f298f2bc
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
antoinecarme/pyaf
|
a105d172c2e7544f8d580d75f28b751351dd83b6
|
b12db77cb3fa9292e774b2b33db8ce732647c35e
|
refs/heads/master
| 2023-09-01T09:30:59.967219
| 2023-07-28T20:15:53
| 2023-07-28T20:15:53
| 70,790,978
| 457
| 77
|
BSD-3-Clause
| 2023-03-08T21:45:40
| 2016-10-13T09:30:30
|
Python
|
UTF-8
|
Python
| false
| false
| 154
|
py
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Quantization'] , ['Lag1Trend'] , ['NoCycle'] , ['NoAR'] );
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
ef3d990361a736c2c8243ef71653066e995e9f04
|
a1c7b21d96d6326790831b2b3115fcd2563655a4
|
/pylidc/__init__.py
|
95c187456f43a5b9aafdc6d2673def316432c058
|
[
"MIT"
] |
permissive
|
jovsa/pylidc
|
3837b17fbe02bc60817081a349681612f24b2f81
|
bd378a60a4b0e6dfb569afb25c3dfcbbcd169550
|
refs/heads/master
| 2021-06-13T02:45:41.359793
| 2017-03-10T23:14:57
| 2017-03-10T23:14:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,068
|
py
|
"""
--------------------------------------------------------
Author: Matt Hancock, not.matt.hancock@gmail.com
--------------------------------------------------------
This python module implements an (ORM) object relational mapping
to an sqlite database containing the annotation information from
the XML files provided by the LIDC dataset. The purpose of this
module is to make for easier data querying and to include
functional aspects of the data models in addition to pure
attribute information, e.g., computing nodule centroids from
contour attribtues.
The ORM is implemented using sqlalchemy. There are three data models:
Scan, Annotation, and Contour
The relationships are "one to many" for each model going left
to right, i.e., scans have many annotations and annotations
have many contours.
For more information, see the model classes themselves.
"""
from __future__ import print_function as _pf
__version__ = '0.1.2'
# Hidden stuff.
import os as _os
import pkg_resources as _pr
from sqlalchemy import create_engine as _create_engine
from sqlalchemy.orm import sessionmaker as _sessionmaker
_dbpath = _pr.resource_filename('pylidc', 'pylidc.sqlite')
_engine = _create_engine('sqlite:///'+_dbpath)
_session = _sessionmaker(bind=_engine)()
# Public stuff.
from .Scan import Scan
from .Scan import dicompath
from .Annotation import Annotation
from .Contour import Contour
def query(*args):
"""
Wraps the sqlalchemy session object. Some example usage:
>>> import pylidc as pl
>>> qu = pl.query(pl.Scan).filter(pl.Scan.slice_thickness <= 1.)
>>> print qu.count()
>>> # => 97
>>> scan = qu.first()
>>> print len(scan.annotations)
>>> # => 11
>>> qu = pl.query(pl.Annotation).filter((pl.Annotation.malignancy > 3), (pl.Annotation.spiculation < 3))
>>> print qu.count()
>>> # => 1083
>>> annotation = qu.first()
>>> print annotation.estimate_volume()
>>> # => 5230.33874999
"""
return _session.query(*args)
|
[
"mhancock743@gmail.com"
] |
mhancock743@gmail.com
|
64a5d750a89fb054c3e4071ed1214595cb22f90c
|
5acc62ab7c37bc2aa5b2e7eedc8c0960b37bda7b
|
/setup.py
|
ef25940cc4b5a049843c0c1d65b07ed48d136ecb
|
[
"MIT"
] |
permissive
|
DanWertheimer/pyJDBCConnector
|
6476a3039ec0ba7a957e28744a869a0cfe859c41
|
41d7121fefb51692f1cec9bfc464de448bfc159f
|
refs/heads/master
| 2022-08-29T00:30:42.744437
| 2020-05-20T10:19:34
| 2020-05-20T10:19:34
| 264,123,305
| 2
| 0
|
MIT
| 2020-05-19T14:14:32
| 2020-05-15T07:12:19
|
Python
|
UTF-8
|
Python
| false
| false
| 1,675
|
py
|
import setuptools
from pyjdbcconnector import __version__
# read the contents of your README file
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setuptools.setup(
name='pyjdbcconnector',
version=__version__,
description='A high level JDBC API',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/DanWertheimer/pyJDBCConnector',
download_url='https://github.com/DanWertheimer/pyJDBCConnector/archive/v0.2.1.tar.gz',
author='Daniel Wertheimer',
author_email='danwertheimer@gmail.com',
packages=setuptools.find_packages(),
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3.6',
],
zip_safe=False,
python_requires='>=3.6',
install_requires=[
'JPype1 == 0.6.3',
'JayDeBeApi >= 1.1.1',
'PyHive == 0.6.2'
],
project_urls={
'Documentation': 'https://pyjdbcconnector.readthedocs.io/en/latest/',
'Source': 'https://github.com/DanWertheimer/pyJDBCConnector',
'Say Thanks!': 'https://saythanks.io/to/danwertheimer%40gmail.com'
},
)
|
[
"Daniel.Wertheimer@absa.africa"
] |
Daniel.Wertheimer@absa.africa
|
6e1df05610ec9c28d74938442be383a7185cc1d9
|
d1820a2b617f0ebb1c60f5f258bd8a844bd660d3
|
/schedule/models/route.py
|
b6edf5c88fd2f300488236fc29d6f820544d1c52
|
[] |
no_license
|
sfrieson/subway
|
0db4a8b9ef60bcb9c62803c0c9a28e6dbb8e6fc1
|
7f4252d12b7d5a935188dba359579849da008397
|
refs/heads/master
| 2020-03-26T04:36:27.977293
| 2018-09-09T11:35:17
| 2018-09-09T11:35:17
| 144,512,476
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 709
|
py
|
from lib import db
def get(route_id):
return db.get_one("SELECT * FROM routes where route_id = '%s';" % route_id)
def get_paths(route_id):
return db.get_many("""
SELECT
shape_id, points.point_id, shape_pt_sequence, point_lon, point_lat
FROM
shapes
JOIN points on shapes.point_id = points.point_id
WHERE
shape_id IN (
SELECT DISTINCT
shapes.shape_id
FROM
shapes
JOIN trips ON trips.shape_id = shapes.shape_id
WHERE route_id = '%s'
)
ORDER BY
shape_id, shape_pt_sequence
""" % route_id)
|
[
"sfrieson@gmail.com"
] |
sfrieson@gmail.com
|
9207c0d477fcdff3fdfc7421a7da2c788d2c36af
|
38b5f3ad3ff93342c817b74e7b315f06dd077119
|
/exec27.py
|
14ae0f84658618befe9a7b0257f2663c1df63854
|
[] |
no_license
|
xXxDevBR/Python-Exercicios
|
abff6e2fcc48b27db11d68ba375916413ca90edc
|
aaa2c1608f258023adb1d56370a9d8c596ef458e
|
refs/heads/master
| 2020-04-16T10:50:38.294074
| 2019-01-13T15:17:28
| 2019-01-13T15:17:28
| 165,518,809
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 165
|
py
|
nome = str(input('Digite seu nome: ')).strip().split()
print('Seu primeiro nome é {}'.format(nome[0]))
print('Seu último nome é {}'.format(nome[len(nome)-1]))
|
[
"xXxDevBR@clashoverdrive245@protonmail.com"
] |
xXxDevBR@clashoverdrive245@protonmail.com
|
5be51dbb88aa58f10058062d78de161544e789e6
|
cc2fcc1a0c5ea9789f98ec97614d7b25b03ba101
|
/st2common/tests/unit/test_configs_registrar.py
|
e23dfe74262ed4c55b95ba299c1a0f50fbeb08c9
|
[
"Apache-2.0"
] |
permissive
|
Junsheng-Wu/st2
|
6451808da7de84798641882ca202c3d1688f8ba8
|
c3cdf657f7008095f3c68b4132b9fe76d2f52d81
|
refs/heads/master
| 2022-04-30T21:32:44.039258
| 2020-03-03T07:03:57
| 2020-03-03T07:03:57
| 244,301,363
| 0
| 0
|
Apache-2.0
| 2022-03-29T22:04:26
| 2020-03-02T06:53:58
|
Python
|
UTF-8
|
Python
| false
| false
| 4,577
|
py
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import mock
from st2common.content import utils as content_utils
from st2common.bootstrap.configsregistrar import ConfigsRegistrar
from st2common.persistence.pack import Pack
from st2common.persistence.pack import Config
from st2tests.api import SUPER_SECRET_PARAMETER
from st2tests.base import CleanDbTestCase
from st2tests import fixturesloader
__all__ = [
'ConfigsRegistrarTestCase'
]
PACK_1_PATH = os.path.join(fixturesloader.get_fixtures_packs_base_path(), 'dummy_pack_1')
PACK_6_PATH = os.path.join(fixturesloader.get_fixtures_packs_base_path(), 'dummy_pack_6')
class ConfigsRegistrarTestCase(CleanDbTestCase):
def test_register_configs_for_all_packs(self):
# Verify DB is empty
pack_dbs = Pack.get_all()
config_dbs = Config.get_all()
self.assertEqual(len(pack_dbs), 0)
self.assertEqual(len(config_dbs), 0)
registrar = ConfigsRegistrar(use_pack_cache=False)
registrar._pack_loader.get_packs = mock.Mock()
registrar._pack_loader.get_packs.return_value = {'dummy_pack_1': PACK_1_PATH}
packs_base_paths = content_utils.get_packs_base_paths()
registrar.register_from_packs(base_dirs=packs_base_paths)
# Verify pack and schema have been registered
pack_dbs = Pack.get_all()
config_dbs = Config.get_all()
self.assertEqual(len(pack_dbs), 1)
self.assertEqual(len(config_dbs), 1)
config_db = config_dbs[0]
self.assertEqual(config_db.values['api_key'], '{{st2kv.user.api_key}}')
self.assertEqual(config_db.values['api_secret'], SUPER_SECRET_PARAMETER)
self.assertEqual(config_db.values['region'], 'us-west-1')
def test_register_all_configs_invalid_config_no_config_schema(self):
# verify_ configs is on, but ConfigSchema for the pack doesn't exist so
# validation should proceed normally
# Verify DB is empty
pack_dbs = Pack.get_all()
config_dbs = Config.get_all()
self.assertEqual(len(pack_dbs), 0)
self.assertEqual(len(config_dbs), 0)
registrar = ConfigsRegistrar(use_pack_cache=False, validate_configs=False)
registrar._pack_loader.get_packs = mock.Mock()
registrar._pack_loader.get_packs.return_value = {'dummy_pack_6': PACK_6_PATH}
packs_base_paths = content_utils.get_packs_base_paths()
registrar.register_from_packs(base_dirs=packs_base_paths)
# Verify pack and schema have been registered
pack_dbs = Pack.get_all()
config_dbs = Config.get_all()
self.assertEqual(len(pack_dbs), 1)
self.assertEqual(len(config_dbs), 1)
def test_register_all_configs_with_config_schema_validation_validation_failure(self):
# Verify DB is empty
pack_dbs = Pack.get_all()
config_dbs = Config.get_all()
self.assertEqual(len(pack_dbs), 0)
self.assertEqual(len(config_dbs), 0)
registrar = ConfigsRegistrar(use_pack_cache=False, fail_on_failure=True,
validate_configs=True)
registrar._pack_loader.get_packs = mock.Mock()
registrar._pack_loader.get_packs.return_value = {'dummy_pack_6': PACK_6_PATH}
# Register ConfigSchema for pack
registrar._register_pack_db = mock.Mock()
registrar._register_pack(pack_name='dummy_pack_5', pack_dir=PACK_6_PATH)
packs_base_paths = content_utils.get_packs_base_paths()
expected_msg = ('Failed validating attribute "regions" in config for pack "dummy_pack_6" '
'(.*?): 1000 is not of type u\'array\'')
self.assertRaisesRegexp(ValueError, expected_msg,
registrar.register_from_packs,
base_dirs=packs_base_paths)
|
[
"wei.ying@easystack.cn"
] |
wei.ying@easystack.cn
|
2da6036ab876d2232b76b7ef2f9869daa5258513
|
1d3c4449b6884578dbd9b8d0cdcf183c844c2801
|
/1106.py
|
13f7dcb81326f394a13899b3001195962f6aa552
|
[] |
no_license
|
thanos73k/CoTe
|
fa5e84048c6405daa712445391b14e9160fb7507
|
e96754b970ed33269c50fef00973aa066d9739a6
|
refs/heads/main
| 2023-07-13T10:42:51.845553
| 2021-08-23T14:31:08
| 2021-08-23T14:31:08
| 383,343,960
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 305
|
py
|
goal, city= map(int, input().split())
info=[]
INF=987654321
dp=[0] + [INF]* 2000
for _ in range(city):
tmp =list( map(int,input().split()))
info.append(tmp)
for cost, people in info:
for i in range(people, goal + people):
dp[i]= min(dp[i],dp[i-people]+cost)
print(min(dp[goal:]))
|
[
"59728699+thanos73k@users.noreply.github.com"
] |
59728699+thanos73k@users.noreply.github.com
|
45b764bb9a978270fd4f4004643081df442084c4
|
8531e93ea9a499804da4606499dfb258efc4ed3c
|
/speechToText.py
|
5d0edc8dad0fe274ac651a8c210f34be183b1a8c
|
[] |
no_license
|
marblefactory/Speech-to-Text
|
a09ce4fe9276af89200ee1dcc1a85f6ef68a07b5
|
8888561bbbe27a41594ab27da64f6281d9dc0fc0
|
refs/heads/master
| 2021-07-20T23:57:10.949806
| 2017-10-30T12:47:22
| 2017-10-30T12:47:22
| 108,848,476
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 839
|
py
|
import speech_recognition as sr
r = sr.Recognizer()
m = sr.Microphone()
try:
print("Geting ambinent noise")
with m as source: r.adjust_for_ambient_noise(source)
print("Energy threshold established")
while True:
print("Say something:")
with m as source: audio = r.listen(source)
print("Will no convert to text...")
try:
text = r.recognize_google(audio)
if str is bytes:
print(u"You said {}".format(text).encode("utf-8"))
else:
print("You said {}".format(text))
except sr.UnknownValueError:
print("Oops! Didn't catch that")
except sr.RequestError as e:
print("Uh oh! Couldn't request results from Google Speech Recognition service; {0}".format(e))
except KeyboardInterrupt:
pass
|
[
"c.leathart@hotmail.co.uk"
] |
c.leathart@hotmail.co.uk
|
debd832aa46efe641d1cd44a23481c762f2593d0
|
eb874c1b263a1db2f7beb04b08d51a5b0f6ad8e8
|
/dcl_main.py
|
d8d844d6508f216dc70b4286a3389fafcbe761c9
|
[] |
no_license
|
freekang/DCL-1
|
2c89d5b07767864819709118f06f1e34465c5c5d
|
63f30e282716a22a922c29e4f3b5e5f81696dbb0
|
refs/heads/main
| 2023-04-03T14:29:45.764790
| 2021-04-11T02:53:20
| 2021-04-11T02:53:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,652
|
py
|
import os
import sys
import torch
import yaml
# from torchvision import datasets
from models.mlp_head import MLPHead
from models.bert_base_network import BertNet
from trainer import BertBYOLTrainer
from dataclasses import dataclass, field
from typing import Optional
import logging
# os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' # 按照PCI_BUS_ID顺序从0开始排列GPU设备
# os.environ['CUDA_VISIBLE_DEVICES'] = '0' #设置当前使用的GPU设备仅为0号设备
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
BYOLDataCollatorForLanguageModeling,
HfArgumentParser,
LineByLineTextDataset,
PreTrainedTokenizer,
TrainingArguments,
set_seed,
RobertaModel,
RobertaTokenizer,
RobertaForMaskedLM,
BertForMaskedLM,
BertTokenizer,
AutoConfig,
AutoModelForSequenceClassification,
glue_processors
)
from utils.util import GlueContrastDataset
from transformers import GlueDataset
logger = logging.getLogger(__name__)
MODEL_CONFIG_CLASSES = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class
into argparse arguments to be able to specify them on
the command line.
"""
task_name: str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys())})
data_dir: str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."}
)
max_seq_length: int = field(
default=128,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
mlm: bool = field(
default=False, metadata={"help": "Train with masked-language modeling loss instead of language modeling."}
)
mlm_probability: float = field(
default=0.15, metadata={"help": "Ratio of tokens to mask for masked language modeling loss"}
)
plm_probability: float = field(
default=1 / 6,
metadata={
"help": "Ratio of length of a span of masked tokens to surrounding context length for permutation language modeling."
},
)
max_span_length: int = field(
default=5, metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."}
)
block_size: int = field(
default=-1,
metadata={
"help": "Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
def __post_init__(self):
self.task_name = self.task_name.lower()
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
model_type: Optional[str] = field(
default=None,
metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
norm_type: Optional[str] = field(
default=None, metadata={"help": "norm type in contrast_model"}
)
def prepare_bert(model_path):
if model_path == 'bert-base-uncased':
model_class = BertForMaskedLM
tokenizer_class = BertTokenizer
pretrained_weights = 'bert-base-uncased'
else:
model_class = RobertaForMaskedLM
tokenizer_class = RobertaTokenizer
pretrained_weights = 'roberta-base'
# 载入预训练的tokenizer和model
tokenizer = tokenizer_class.from_pretrained(pretrained_weights)
model = model_class.from_pretrained(pretrained_weights)
# model1 = RobertaModel.from_pretrained(pretrained_weights)
# id = tokenizer.encode("Here is some text to encode", add_special_tokens=True)
# input_ids = torch.tensor([tokenizer.encode("Here is some text to encode", add_special_tokens=True)])
# with torch.no_grad():
# last_hidden_states = model(input_ids)[0] # Models outputs are now tuples
return tokenizer,model
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
byol_config = yaml.load(open("./config/config.yaml", "r"), Loader=yaml.FullLoader)
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
training_args.local_rank,
training_args.device,
training_args.n_gpu,
bool(training_args.local_rank != -1),
training_args.fp16,
)
logger.info("Training/evaluation parameters %s", training_args)
# Set seed
set_seed(training_args.seed)
# 导入roberta相关模型
tokenizer, bertformlm = prepare_bert(model_args.model_name_or_path)
bertformlm.resize_token_embeddings(len(tokenizer))
if data_args.block_size <= 0:
data_args.block_size = tokenizer.max_len
else:
data_args.block_size = min(data_args.block_size, tokenizer.max_len)
# Get datasets
train_dataset = (
GlueContrastDataset(data_args, tokenizer=tokenizer,
cache_dir=model_args.cache_dir) if training_args.do_train else None
)
data_collator = BYOLDataCollatorForLanguageModeling(
tokenizer=tokenizer, mlm=data_args.mlm, mlm_probability=data_args.mlm_probability
)
dataset_num = len(train_dataset)
training_args.save_steps = dataset_num // training_args.per_device_train_batch_size//2
training_args.logging_steps = training_args.save_steps
print('train_dataset.len:', len(train_dataset))
print('save_steps:', training_args.save_steps)
print('eval_steps:', training_args.eval_steps)
print('logging_steps:', training_args.logging_steps)
# online encoder
online_network = BertNet(bert=bertformlm,norm_type=model_args.norm_type, model_name=model_args.model_name_or_path,**byol_config['network'])
# predictor network
predictor = MLPHead(norm_type=model_args.norm_type,in_channels=online_network.projetion.net[-1].out_features,
**byol_config['network']['projection_head'],layer=2)
# target encoder
target_network = BertNet(bert=bertformlm,norm_type=model_args.norm_type, model_name=model_args.model_name_or_path, **byol_config['network'])
# load pre-trained model if defined
if model_args.model_name_or_path:
try:
# load pre-trained parameters
# online2 = BertNet(bert=bertformlm,norm_type='power',model_name=model_args.model_name_or_path, **byol_config['network'])
# load_params = torch.load(os.path.join(os.path.join('/home/chenxiang/pretrained_model/PyTorch-BYOL/new_result/bert_power_pretrain_glue/MNLI/checkpoint-epoch1', 'pytorch_model.bin')),
# map_location=torch.device(torch.device(training_args.device)))
#
# online2.load_state_dict(load_params['online_network_state_dict'])
# online_network.roberta.load_state_dict(online2.roberta.state_dict())
# del load_params,online2
logger.info("Training online_network parameters from %s", model_args.model_name_or_path)
except FileNotFoundError:
logger.info("Pre-trained weights not found. Training from scratch.")
trainer = BertBYOLTrainer(
args = training_args,
online_network=online_network,
target_network=target_network,
# optimizer=optimizer,
predictor=predictor,
data_collator=data_collator,
train_dataset=train_dataset,
**byol_config['trainer'])
# Training
if training_args.do_train:
model_path = model_args.model_name_or_path
trainer.train(model_path=model_path)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
if __name__ == '__main__':
main()
|
[
"852975133@qq.com"
] |
852975133@qq.com
|
aa187842f546ec38434033435a6d0116e9184da1
|
d8127d11c7294796a4d85a8ed0dba43cb6fce715
|
/basic_knowledge/10_2_3.py
|
fd0e314d26afb2b1df2080fef79e4910693c7a57
|
[] |
no_license
|
TophTab/Learning_python_firsttime
|
77e6a33069fa6718c878bcf6b7925fa3f703d16e
|
4e30d6d2c120a911c8e6cc1f0918128bf7fb372b
|
refs/heads/main
| 2023-01-06T22:23:39.482131
| 2020-11-07T13:34:55
| 2020-11-07T13:34:55
| 310,820,651
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 217
|
py
|
with open('programming.txt','a') as file_object:
file_object.write("I also love finding\n")
file_object.write('I love creating\n')
with open('programming.txt','r') as file_object:
print(file_object.read())
|
[
"TophTab@github.com"
] |
TophTab@github.com
|
21ac7b8bd278492a0c7324f8fe0d272953855df9
|
67dbbe74262b95ddce44287967d7dd7af393a60b
|
/web_deploy/__init__.py
|
bfa0f3ffe8e23bb2df5f913dedc9ed52dfdb19f2
|
[
"Apache-2.0"
] |
permissive
|
ygavenchuk/web-deploy
|
444b93e70ffa8ba848992f7cfd2a68cae0f5fb01
|
2a1cc0931a931a68114c535af3b508fc01f00684
|
refs/heads/master
| 2021-01-01T19:21:17.094038
| 2015-07-06T12:59:11
| 2015-07-06T12:59:11
| 38,040,524
| 0
| 2
| null | 2015-07-01T10:07:38
| 2015-06-25T09:31:04
|
Python
|
UTF-8
|
Python
| false
| false
| 830
|
py
|
# -*- coding: utf-8 -*-
#
# Copyright 2015 Yuriy Gavenchuk aka murminathor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .base import *
from .daemon import *
from .db import *
from .project import *
from .system import *
from .vcs import *
__author__ = 'y.gavenchuk'
__version__ = '0.0.3'
VERSION = __version__.split('.')
|
[
"y.gavenchuk@bvblogic.com"
] |
y.gavenchuk@bvblogic.com
|
15a49c1e93ac6f6575ef74ea8af48334dbb97f8f
|
f6f9ca202b70e93a324de89b8316f389bf838508
|
/src/gui/archat.py
|
9c95974953d8bd0a0d132a0a6ee09cafd8c5017e
|
[
"MIT"
] |
permissive
|
180D-FW-2020/Team8
|
4e6d46870041d5015f2d5094acd44f15270017fa
|
ba439a2e65d759d0abd8d4fa827601b16228d841
|
refs/heads/master
| 2023-03-13T20:00:05.024413
| 2021-03-11T17:43:04
| 2021-03-11T17:43:04
| 297,810,211
| 2
| 1
|
MIT
| 2021-03-11T17:41:58
| 2020-09-23T00:46:07
|
Python
|
UTF-8
|
Python
| false
| false
| 7,302
|
py
|
## AR Chat #######################################################################################################
## a class for creating a message board in OpenCV
ROOT = "data/gui/"
import cv2 as cv
'''
An ARChat object outputs an array with a custom UI created by the Butcher Bros (spec. Michael)
Public Functions:
- post: posts a message to the ARChat
- stage: stages a message to the user's staging area
- getPath: returns the path to the ARChat image
'''
class ARChat():
'''
Initialize a new ARChat.
The ARChat always initializes with at least one board topic, "general"
Inputs:
- topic: the topic associated with each ARChat
- roomIndex: number signifying which chatroom in the list of chatrooms is currently selected
- chatrooms: a list of type str, which contains a string of currently active boards
'''
def __init__(self, topic, roomIndex, chatrooms = []):
self.topic = topic
self.messages = []
self.rooms = chatrooms
self.roomIndex = roomIndex
self.wordlimit = 42
self.fontSize = 1.5
self.boardpath = topic
self.stagedMessage = ""
self.write_messages()
self.write_rooms()
'''
Posts a message to be updated to the currently active board
Inputs:
- user (str): the name of the user who sent the message
- message (str): the message sent by the user
- color (tuple): the RGB tuple associated with the user
- time (dict): dictionary with hour, minute, second
'''
def post(self, user, message, color, timestamp, overflow=False):
h = ""
m = ""
s = ""
if(len(user) + len(message) <= self.wordlimit):
if(overflow == False):
h = str(timestamp["hour"])
m = str(timestamp["minute"])
s = str(timestamp["second"])
if(len(str(timestamp["hour"])) == 1):
h = '0' + str(timestamp["hour"])
if(len(str(timestamp["minute"])) == 1):
m = '0' + str(timestamp["minute"])
if(len(str(timestamp["second"])) == 1):
s = '0' + str(timestamp["second"])
self.messages.insert(0, (user, message, color, "<" + h + ":" + m + ":" + s + ">"))
else:
self.messages.insert(0, ("", message, color, ""))
self.write_messages()
self.write_rooms()
self.write_staged_message()
else:
if(overflow == False):
h = str(timestamp["hour"])
m = str(timestamp["minute"])
s = str(timestamp["second"])
if(len(str(timestamp["hour"])) == 1):
h = '0' + str(timestamp["hour"])
if(len(str(timestamp["minute"])) == 1):
m = '0' + str(timestamp["minute"])
if(len(str(timestamp["second"])) == 1):
s = '0' + str(timestamp["second"])
msg = self.process_msg(message, self.wordlimit)
self.messages.insert(0, (user, msg[0], color, "<" + h + ":" + m + ":" + s + ">"))
self.post(user, msg[1], color, "", True)
else:
msg = self.process_msg(message, self.wordlimit)
self.messages.insert(0, ("", msg[0], color, ""))
self.post(user, msg[1], color, "", True)
'''
Stages a message to the user's staging area
Inputs:
- message (str): the string to place in the user's message box
'''
def stage(self, message, index=0):
if(len(message) <= (self.wordlimit + 20)):
self.stagedMessage = message
if(index == 0):
self.write_staged_message()
else:
self.write_staged_message(index)
else:
msg = self.process_msg(message, self.wordlimit + 20)
self.stagedMessage = msg[0]
self.write_staged_message()
index += 1
self.stage(msg[1], index)
'''
Returns a path to the saved ARChat .jpg
'''
def getPath(self):
return ROOT + self.boardpath
def addRoom(self, topic):
self.rooms.append(topic)
self.write_messages()
self.write_rooms()
# message overflow processing
def process_msg(self, message, lim):
for i in range(lim-1, 0, -1):
if(message[i] == ' '):
return (message[:i], message[i+1:])
# post messages to chatboard
def write_messages(self):
im = cv.imread(ROOT + 'chat.png', 1)
index = 0
for message in self.messages:
if len(message[3]) != 0:
cv.putText(im, message[3] + " " + message[0] + ": " + message[1], (int(im.shape[1]/4), im.shape[0]-200-80*index), cv.FONT_HERSHEY_SIMPLEX, self.fontSize, message[2], 2, cv.LINE_AA)
else:
cv.putText(im, message[0] + message[1], (int(im.shape[1]/4), im.shape[0]-200-80*index), cv.FONT_HERSHEY_SIMPLEX, self.fontSize, message[2], 2, cv.LINE_AA)
index += 1
cv.imwrite(str(self.getPath()) + '.jpg', im)
# post message rooms to chatboard
def write_rooms(self):
im = cv.imread(str(self.getPath()) + '.jpg', 1)
index = 0
for room in self.rooms:
cv.putText(im, room, (50, im.shape[0]-1000+100*index), cv.FONT_HERSHEY_SIMPLEX, self.fontSize, (255,255,255), 2, cv.LINE_AA)
if(index == self.roomIndex):
cv.putText(im, room, (50, im.shape[0]-1000+100*index), cv.FONT_HERSHEY_SIMPLEX, self.fontSize, (255,0,255), 20, cv.LINE_AA)
cv.putText(im, room, (50, im.shape[0]-1000+100*index), cv.FONT_HERSHEY_SIMPLEX, self.fontSize, (255,255,255), 2, cv.LINE_AA)
index += 1
cv.imwrite(str(self.getPath()) + '.jpg', im)
# post queued message to chatboard
def write_staged_message(self, index=0):
im = cv.imread(str(self.getPath()) + '.jpg', 1)
cv.putText(im, self.stagedMessage, (int(im.shape[1]/4), im.shape[0]-90+50*index), cv.FONT_HERSHEY_SIMPLEX, self.fontSize, [255,255,255], 2, cv.LINE_AA)
cv.imwrite(str(self.getPath()) + '.jpg', im)
if __name__ == '__main__':
masterRooms = ["chat1", "chat2", "chat3"]
chat1 = ARChat("chat1", 0, masterRooms)
chat2 = ARChat("chat2", 1, masterRooms)
chat3 = ARChat("chat3", 2, masterRooms)
chat1.post("Nico", "some bullshit hey this is a bullshit space", [255,124,255], {"hour": 12, "minute": 31, "second": 22})
chat1.post("Nate", "this is a tester message really long please overflow due to character count", [0,0,255], {"hour": 12, "minute": 32, "second": 11})
chat1.post("Tommy", "this is a tester message again", [255,0,0], {"hour": 12, "minute": 33, "second": 1})
chat1.stage("I don't think this overflow works as intended yet so why don't I keep testing this stuff and see when it breaks")
chat2.post("Michael", "testing testing 123", [0,255,0], {"hour": 1, "minute": 5, "second": 2})
chat3.post("Walter", "Let's cook", [125,125,255], {"hour": 1, "minute": 5, "second": 2})
chat3.post("Jesse", "Hell yeah", [255,125,125], {"hour": 1, "minute": 5, "second": 56})
|
[
"noreply@github.com"
] |
180D-FW-2020.noreply@github.com
|
196964f8812712d14c761353096cc995312f630d
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/containsDuplicate_20200907093833.py
|
a13711fe822495f778880bcdac9e84cd2d398e7d
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 133
|
py
|
def duplicate(nums,k,t):
number = None
for i in range(len(nums)):
for j in range(i+1,len(nums)):
if nu
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
8f580dc6cac81d892c88c452ae7a6db718851418
|
ded0a8f68a945d40aaeabdd298a08117cbe79e3c
|
/Projets/fonction.py
|
ea482df223a7f40553a74440ee21358e51c510cf
|
[] |
no_license
|
Stephanie-Njike/Python
|
cd3931af29851f3bc89ccf468123dcb8c01b496b
|
f22f3a5ddb19bd27404a532781802ef7d33af8bc
|
refs/heads/main
| 2023-07-05T15:39:41.744830
| 2021-08-09T15:28:25
| 2021-08-09T15:28:25
| 394,320,479
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,242
|
py
|
"""
def list_in_param(*params):
return params[0]+params[1]+params[2]
value = list_in_param(1, 2, 3, 4, 5, 8)
print(value)
value2 = list_in_param('1', '2', '3', '4')
print(value2)
def list_in_param2(a, *params):
return params[0]+params[1]+params[2]+a
value = list_in_param2(1, 2, 3, 4, 5, 8)
print(value)
def dico_in_param(**params):
return params
print(dico_in_param(nom="Stephanie", prenom="olive"))
def maliste(listeParam):
listeParam.append('moi')
return listeParam
liste = [1, 2, 3]
print(maliste(liste))
"""
"""
def triangle(n):
nombre_espace_gauche = n
nombre_etoiles_milieu = 1
nombre_espace_droit = n
for i in range(n):
for j in range (nombre_espace_gauche):
print(' ', end = '')
for k in range (nombre_etoiles_milieu):
print('*', end = '')
for l in range (nombre_espace_droit):
print(' ', end = '')
# print(' => ',i)
print('')
nombre_espace_gauche = nombre_espace_gauche - 1
nombre_etoiles_milieu = nombre_etoiles_milieu + 2
nombre_espace_droit = nombre_espace_droit - 1
return " "
print(triangle(n = int(input("saisir la hauteur du triangle : "))))
"""
"""
def triangleNewMethod(n):
espace = n
nombreEtoile = 1
for i in range(n):
print(' '*espace, '*'*nombreEtoile, ' '*espace)
espace = espace - 1
nombreEtoile = nombreEtoile + 2
triangleNewMethod(n = int(input("saisir la hauteur du triangle : ")))
"""
"""
def maximum(nb1,nb2,nb3):
max1 = nb1
if nb1 > nb2 and nb1 > nb3:
max1 = nb1
if nb3 > nb1 and nb3 > nb2:
max1 = nb3
else:
max1 = nb2
return max1
print(maximum(nb1 = int(input("saisir le premier nombre : ")),
nb2 = int(input("saisir le deuxieme nombre : ")),
nb3 = int(input("saisir le troisieme nombre : "))))
"""
"""
def triangleAvecChiffres(n):
espace = n
nombreEtoile = 2
valeurAAfficher = 1
for i in range(n):
if valeurAAfficher < n:
print(' '*espace, str(valeurAAfficher)*nombreEtoile, ' '*espace)
espace = espace - 1
nombreEtoile = nombreEtoile + 2
valeurAAfficher = valeurAAfficher + 1
triangleAvecChiffres(n = int(input("saisir la hauteur du triangle : ")))
"""
"""
def triangleNewMethod(n):
espace = n
nombreEtoile = 2
for i in range(1, n):
print(' '*espace, str(i)*nombreEtoile, ' '*espace)
espace = espace - 1
nombreEtoile = nombreEtoile + 2
triangleNewMethod(n = int(input("saisir la hauteur du triangle : ")))
"""
"""
def chiffre_porte_bonheur (nb):
nouveau = nb
while nouveau >= 10:
resultat = str(nouveau)
somme = 0
for i in range(len(resultat)):
chiffre = int(resultat[i])
print(chiffre,"**2 = ",chiffre**2)
somme = somme + chiffre**2
nouveau = somme
if nouveau == 1:
print("le chiffre ", nb, " est un chiffre porte bonheur")
else:
print ("le chiffre ", nb, " n'est pas un chiffre porte bonheur")
print(chiffre_porte_bonheur (nb = int(input("saisir le nombre : "))))
"""
"""
def compte_mot(phrase):
liste_mot = phrase.split(':')
print(liste_mot)
nombre_mot = len(liste_mot)
return nombre_mot
print(compte_mot('Bonjour:toi:et:moi:aussi'))
"""
"""
def NbCMin(passe):
liste = [i.islower() for i in passe]
print(liste)
return sum(liste)
"""
"""
def NbCMin(passe):
som = 0
for i in passe:
if i.islower():
som = som + 1
return som
print(NbCMin('paREDsse'))
"""
"""
def longMaj(passe):
longMajMax = 0
longTmp = 0
for i in passe:
if i.isupper():
longTmp = longTmp + 1
else:
longTmp = 0
if longTmp > longMajMax:
longMajMax = longTmp
return longMajMax
print(longMaj('ERRRiiuooiiAAfgghrrhhhDDDDR'))
"""
def premier_nb(borninf,bornsup):
for nbr1 in range(borninf,bornsup):
nbrpre = 1
for div1 in range(2,nbr1):
if nbr1 % div1 == 0:
nbrpre = 0
if nbrpre == 1:
print("Le nombre - ",nbr1, " - est premier" )
return nbr1
premier_nb(1,100)
|
[
"pair_one@smart-coders.guru"
] |
pair_one@smart-coders.guru
|
a25ee7b2a7ac37922e741c367dfb32c3a7c44e00
|
dbe4e7b18e611907201f2d46a1d3df5834cae14f
|
/splunk_add_on_ucc_framework/start_alert_build.py
|
d05cfedf7930e2f14bad9b2898621ee42d26788d
|
[
"Apache-2.0"
] |
permissive
|
JasonConger/addonfactory-ucc-generator
|
d6198e0d0e8b1113034403d3e2df67c9e00f4652
|
17548eee55ee270d8954e9b3df3ebc9da2d90300
|
refs/heads/main
| 2023-04-04T20:44:55.629207
| 2021-04-12T07:12:52
| 2021-04-14T18:57:49
| 358,644,617
| 1
| 0
| null | 2021-04-16T15:35:03
| 2021-04-16T15:35:02
| null |
UTF-8
|
Python
| false
| false
| 2,621
|
py
|
# SPDX-FileCopyrightText: 2020 2020
#
# SPDX-License-Identifier: Apache-2.0
from __future__ import absolute_import
from . import normalize
import logging
import os
from .modular_alert_builder.build_core import generate_alerts
import traceback
class LoggerAdapter(logging.LoggerAdapter):
def __init__(self, prefix, logger):
super(LoggerAdapter, self).__init__(logger, {})
self.prefix = prefix
def process(self, msg, kwargs):
return '[%s] %s' % (self.prefix, msg), kwargs
def validate(alert, logger):
try:
fields = []
if alert.get("entity"):
for entity in alert.get("entity"):
if entity.get("field") in fields:
raise Exception("Field names should be unique")
else:
fields.append(entity.get("field"))
if entity.get("type") in ["radio", "singleSelect"]:
if not entity.get("options"):
raise Exception(
"{} type must have options parameter".format(entity.get("type")))
elif entity.get("options"):
raise Exception(
"{} type must not contain options parameter".format(entity.get("type")))
if entity.get("type") in ["singleSelectSplunkSearch"]:
if not all([entity.get("search"), entity.get("valueField"), entity.get("labelField")]):
raise Exception("{} type must have search, valueLabel and valueField parameters".format(
entity.get("type")))
elif any([entity.get("search"), entity.get("valueField"), entity.get("labelField")]):
raise Exception("{} type must not contain search, valueField or labelField parameter".format(
entity.get("type")))
except:
logger.error(traceback.format_exc())
raise
def alert_build(schema_content, product_id, short_name, output_dir,sourcedir):
# Initializing logger
logging.basicConfig()
logger = logging.getLogger('Alert Logger')
logger = LoggerAdapter('ta="{}" Creating Alerts'.format(short_name),
logger)
# Validation
for alert in schema_content['alerts']:
validate(alert, logger)
# Get the alert schema with required structure
envs = normalize.normalize(schema_content, product_id, short_name)
pack_folder = os.path.join(sourcedir, 'arf_dir_templates', 'modular_alert_package')
# Generate Alerts
generate_alerts(pack_folder, output_dir, logger, envs)
|
[
"rfaircloth@splunk.com"
] |
rfaircloth@splunk.com
|
00ccd496ce7dea6f368a321332faf326dce7f540
|
a38eb3c692ea213b9db90148edb2052468ff7bc3
|
/datasets/__init__.py
|
da8a68bfb78cbd159bf9a7a64efa3cf0917df160
|
[] |
no_license
|
VictorTao1998/ActiveStereoRui
|
cc5cc5d282d2935dab5c7ecb34e691a88682ec4e
|
cd5325932b52ff4090e6e40403a8710a99723a6d
|
refs/heads/main
| 2023-08-24T03:59:05.904458
| 2021-10-28T17:42:31
| 2021-10-28T17:42:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 45
|
py
|
"""
Author: Isabella Liu 8/8/21
Feature:
"""
|
[
"jit124@ucsd.edu"
] |
jit124@ucsd.edu
|
4ce5322003186f1ce33d14ae98702957ae346789
|
0cbe53436e0e753c41b039a3050f77a8b7f2e0e1
|
/data_pro/Queries_pkl.py
|
e2f6e2feee71993a08e39c38d504c278388b8a1d
|
[] |
no_license
|
s1530129650/Code_Recommendation
|
9848d95ec3edc7c61a48178b9e9e06348b2e2e66
|
1d77e1a35a1ab92a717e0435e521cf4ff520cec6
|
refs/heads/master
| 2020-05-14T23:51:11.111958
| 2019-05-28T12:08:56
| 2019-05-28T12:08:56
| 182,002,981
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,380
|
py
|
#!/usr/bin/env python
#!-*-coding:utf-8 -*-
"""
@version: python3.7
@author: v-enshi
@license: Apache Licence
@contact: 123@qq.com
@site:
@software: PyCharm
@file: Queries_pkl.py
@time: 2019/4/25 9:46
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import json
import random
import numpy as np
import time
torch.manual_seed(1)
use_gpu = False
use_gpu = True
if use_gpu:
device = torch.device("cuda")
max_vocab_size = 50000
CONTEXT_WINDOW = 100
else:
device = torch.device("cpu")
max_vocab_size = 100
CONTEXT_WINDOW = 100
time_start = time.time()
##1. data loading {"type":xxx, "children":XXX} or {"type":xxx, "value":XXX}
def data_loading(filepath):
data = []
with open(filepath, 'r') as load_f:
data1 = load_f.readlines()
for i in range(len(data1)):
content = json.loads(data1[i])
data.append(content)
return data
if use_gpu:
training_path = r"../data/python/python100k_train.json"
else:
str = r"D:\v-enshi\Language model\suggestion\Code Completion with Neural Attention and Pointer Networks"
training_path = str + r"\data\python\f10_.json"
training_data = data_loading(training_path)
now = time.time()
print("data loading",now-time_start)
## 2. build vocabulary
def build_vocab(data):
type_to_ix = {"EOF": 0,"UNK":1}
word_to_ix = {}
for i in range(len(data)):
for item in data[i]:
if item["type"] not in type_to_ix:
type_to_ix[item["type"]] = len(type_to_ix)
if "value" in item.keys():
if item["value"] in word_to_ix:
word_to_ix[item["value"]] = word_to_ix[item["value"]] + 1
else:
word_to_ix[item["value"]] = 1
# 1k 10k 50k vocabulary
L = sorted(word_to_ix.items(), key=lambda item: item[1], reverse=True)
print("L len",len(L),L[max_vocab_size][1])
value_to_ix = {"EOF": 0,"UNK":1}
for i in range(max_vocab_size):
value_to_ix[L[i][0]] = len(value_to_ix)
return type_to_ix, value_to_ix
type_vocab,value_vocab = build_vocab(training_data)
now = time.time()
print("build vocabulary",now-time_start)
# 3. make the queries
def Queries(data):
data_rd = []
random = np.random.RandomState(0)
for i in range(len(data)):
length = len(data[i])
if length <= CONTEXT_WINDOW + 2:
continue
rd = random.randint(CONTEXT_WINDOW + 1, length - 1)
while "value" not in data[i][rd].keys(): # 1.look for leaf node
rd = rd + 1
if rd >= length:
break
if rd >= length:
continue
query = []
# find same node in the context
for j in range(rd - 1, rd - CONTEXT_WINDOW - 1,
-1): # whether the remove node in the context.if the node in context,we remeber the position in context
if data[i][rd]["type"] == data[i][j]["type"] and "value" in data[i][j].keys() and data[i][rd]["value"] == \
data[i][j]["value"]:
#print("j$$$$$$$$$$$",rd - 1, rd - CONTEXT_WINDOW - 1,j,rd - j - 1)
query = [data[i][:rd], [data[i][rd]], rd, rd - j - 1]
break
if j == rd - CONTEXT_WINDOW: # there is no same node in context
continue
# add parents node
for j in range(rd - 1, rd - CONTEXT_WINDOW - 1, -1):
if "children" in data[i][j].keys() and rd in data[i][j]["children"]:
query.append(rd - j - 1)
break
if j == rd - CONTEXT_WINDOW:
query.append(CONTEXT_WINDOW - 1)
break
# query = [context,predict_node,position, same_node_position,parent_node_position]
data_rd.append(query)
return data_rd
training_queries = Queries(training_data)
training_queries.sort( key=lambda x: x[2],reverse=True) # sort
#print(training_queries)
now = time.time()
print("make the queries",now-time_start)
#4 text -> index
def prepare_sequence(seq, val_to_ix, type_to_ix): # trans code to idex
idxs_ty = []
idxs_vl = []
UNK = 1
for node in seq:
value_str = node.get('value', 'UNK')
idxs_vl.append(val_to_ix.get(value_str, UNK))
idxs_ty.append(type_to_ix[node.get('type', 'UNK')])
#print("np.array([idxs_ty, idxs_vl])",np.array([idxs_ty, idxs_vl]))
return torch.tensor([idxs_vl, idxs_ty],dtype = torch.long)
input_value = []
input_type = []
parent = []
target = []
for i in range(len(training_queries)):
sequence = training_queries[i][0]
[input_val, input_ty] = prepare_sequence(sequence, value_vocab, type_vocab)
par = torch.tensor(training_queries[i][4],dtype = torch.long)
targ = torch.tensor(training_queries[i][3],dtype = torch.long)
input_value.append(input_val)
input_type.append(input_ty)
parent.append(par)
target.append(targ)
now = time.time()
print("text -> index",now-time_start)
#5 padding and save
import torch.nn.utils.rnn as rnn_utils
from torch.utils.data import DataLoader
import torch.utils.data as data
class MyData(data.Dataset):
def __init__(self,data_seq, input_value, input_type, target, parent):
self.input_value = input_value
self.input_type = input_type
self.target = target
self.parent = parent
self.length = len(self.target)
self.data_length = [len(sq) for sq in data_seq]
def __len__(self):
return self.length
def __getitem__(self, idx):
return self.input_type[idx],self.input_value[idx], self.data_length[idx], self.target[idx], self.parent[idx]
x = rnn_utils.pad_sequence(input_value, batch_first=True)
y = rnn_utils.pad_sequence(input_type, batch_first=True)
dataAll = MyData(input_value,x,y,target,parent)
#print(dataAll.data_length)
now = time.time()
print("5. padding ",now-time_start)
# save
import pickle
with open('../data/python/training_50k.pickle', 'wb') as f:
pickle.dump(dataAll, f, protocol=pickle.HIGHEST_PROTOCOL)
np.savez('../data/python/vocabulary_50k.npz',value_vocab = value_vocab,type_vocab = type_vocab)
|
[
"noreply@github.com"
] |
s1530129650.noreply@github.com
|
4a18ef0719c0058c463c0200d66e76acbe62ccfd
|
e49b654d3db99773390c5b9686df9c99fbf92b2a
|
/linked_lists/is_palindrome.py
|
703a4960dc6cfbe7e741efde1dd056a7ede1b2cc
|
[] |
no_license
|
hao89/diary_of_programming_puzzles
|
467e8264d0ad38768ba5ac3cfb45301293d79943
|
0e05d3716f28075f99bbd7b433d16a383209e57c
|
refs/heads/master
| 2021-01-16T00:49:38.956102
| 2015-08-25T13:44:53
| 2015-08-25T13:44:53
| 41,692,587
| 1
| 0
| null | 2015-08-31T18:20:38
| 2015-08-31T18:20:36
|
Python
|
UTF-8
|
Python
| false
| false
| 831
|
py
|
"""
Implement a function to check if a linked list is a palindrome
"""
import random
from linked_list import LinkedListNode
def is_palindrome1(linked_list):
# reverse and compare
pass
def build_palindrome_list():
root = LinkedListNode(5)
previous_node = root
for i in range(0, 2):
new_node = LinkedListNode(random.randint(0, 9))
previous_node.next = new_node
previous_node = new_node
stack = []
current_node = root
while current_node.next != None: # all but the last one
stack.append(current_node.data)
current_node = current_node.next
while len(stack) != 0:
data = stack.pop()
new_node = LinkedListNode(data)
previous_node.next = new_node
previous_node = new_node
return root
def build_random_list():
pass
|
[
"me@davidadamojr.com"
] |
me@davidadamojr.com
|
69709d69ea014f6f4dfe7ed425332532be7c41f0
|
617c294d421f8abbd7f85e627bf662430b7a28bc
|
/easy_web_py/core/mainHandler.py
|
d08aba816b8ec4082b3093df6830c42b896bf31a
|
[] |
no_license
|
Gustavovaler/easy_web_py
|
a6a9df7b3f30e9c9226921fd6205b8918c6fb7af
|
695e8713c06dbbabcab5bb584f53dbf0820dc95f
|
refs/heads/master
| 2022-09-21T03:08:21.632406
| 2020-06-04T06:39:53
| 2020-06-04T06:39:53
| 256,370,531
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 371
|
py
|
from http.server import BaseHTTPRequestHandler
from ..resources.routes import routes_list
class MainHandler(BaseHTTPRequestHandler):
print(routes_list)
def parse_path(self):
for route in routes_list:
if self.path in route["path"]:
self.controller_request = route["controller"]
return self.controller_request
|
[
"gustavodevaler@gmail.com"
] |
gustavodevaler@gmail.com
|
286901b3a6a7ec15eaad0c29b53851f0e00a3e81
|
599db5e2e3c4d6c296de25a8ef8e95a862df032b
|
/OpenAI Gym/(clear)breakout-ramdeterministic-v4/model6/14000epi/modelPlay.py
|
7e5537269ecb2dd11378115b992616635625fad7
|
[] |
no_license
|
wantyouring/ML-practice-code
|
bb7577e99f22587c7ca016c1c4d067175e5ce9d9
|
a3efbb4d252bacc831c5d7a01daf6476e7a755e4
|
refs/heads/master
| 2020-05-14T17:45:17.735081
| 2019-06-30T14:43:25
| 2019-06-30T14:43:25
| 181,898,074
| 0
| 1
| null | 2019-06-15T05:52:44
| 2019-04-17T13:31:24
|
Python
|
UTF-8
|
Python
| false
| false
| 3,233
|
py
|
# 학습모델 play. random action과 학습model 비교.
import gym
import pylab
import numpy as np
import gym.wrappers as wrappers
from doubleDQN2 import DoubleDQNAgent
EPISODES = 1 # 처음은 random으로 수행, 나중에는 학습model로 수행
global_step = 0
def change_action(action):
if action == 0:
return 0
elif action == 1:
return 2
elif action == 2:
return 3
elif action == 3:
return 3
if __name__ == "__main__":
env = gym.make('Breakout-ramDeterministic-v4')
env = wrappers.Monitor(env,"./results",force = True)
state_size = 128
action_size = 3
agent = DoubleDQNAgent(state_size, action_size)
agent.load_model()
agent.epsilon = -1 # Q value에 의해서만 움직이게끔.
agent.render = True
scores, episodes = [], []
random_success_cnt = 0
model_success_cnt = 0
# 랜덤액션 진행시
for e in range(EPISODES):
done = False
score = 0
life = 5
env.reset()
for i in range(5):
env.step(1) # 시작 action.
while not done:
action = env.action_space.sample()
_, reward, done, info = env.step(change_action(action))
score += reward
if done:
if score > 0:
random_success_cnt += 1
print("episode:", e, " score:", score)
scores.append(score)
episodes.append(e)
break
if life != info['ale.lives']:
life = info['ale.lives']
for i in range(5):
state, _, _, _ = env.step(1)
state = np.reshape(state, [1, 128])
# 학습모델 play
for e in range(EPISODES,EPISODES*2):
done = False
life = 5
score = 0
state = env.reset()
for i in range(5):
state, _, _, _ = env.step(1) # 시작 action.
state = np.reshape(state,[1,128])
while not done:
global_step += 1
if agent.render:
env.render()
# 현재 s에서 a취해 s`, r, done 정보 얻기.
action = agent.get_action(state)
next_state, reward, done, info = env.step(change_action(action))
score += reward
state = next_state
state = np.reshape(state, [1, 128])
if done:
if score > 0 :
model_success_cnt += 1
print("episode:", e, " score:", score)
scores.append(score)
episodes.append(e)
if e % 5 == 0:
pylab.plot(episodes, scores)
pylab.savefig("./play_score.png")
break
if life != info['ale.lives']:
life = info['ale.lives']
for i in range(5):
state, _, _, _ = env.step(1)
state = np.reshape(state, [1, 128])
env.close()
print("random : {}/{} success. rate : {}".format(random_success_cnt,EPISODES,random_success_cnt/EPISODES))
print("model : {}/{} success. rate : {}".format(model_success_cnt,EPISODES,model_success_cnt/EPISODES))
|
[
"pwc99@naver.com"
] |
pwc99@naver.com
|
7ae19c87ae0a743f37e10842eb6d7fe795206b0d
|
4afa76df7f5f42fa97fd90f8da01374654d0d891
|
/train.py
|
4365b894e7248fe4525d2627f482f451ef9b658d
|
[] |
no_license
|
hantek/zmlp
|
923b8ce11c93dca620fe4bae0be5a5d341a3f64f
|
0fce920850f84631f6996e50df2a6089a958a172
|
refs/heads/master
| 2021-01-22T02:34:12.575698
| 2014-12-06T03:58:25
| 2014-12-06T03:58:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,025
|
py
|
import numpy
import numpy.random
import theano
import theano.tensor as T
SharedCPU = theano.tensor.sharedvar.TensorSharedVariable
try:
SharedGPU = theano.sandbox.cuda.var.CudaNdarraySharedVariable
except:
SharedGPU=SharedCPU
class GraddescentMinibatch(object):
def __init__(self, varin, data, cost, params,
truth=None, truth_data=None, supervised=False,
batchsize=100, learningrate=0.1, momentum=0.9,
rng=None, verbose=True):
"""
Using stochastic gradient descent with momentum on data in a minibatch
update manner.
"""
# TODO: check dependencies between varin, cost, and param.
assert isinstance(varin, T.TensorVariable)
if (not isinstance(data, SharedCPU)) and \
(not isinstance(data, SharedGPU)):
raise TypeError("\'data\' needs to be a theano shared variable.")
assert isinstance(cost, T.TensorVariable)
assert isinstance(params, list)
self.varin = varin
self.data = data
self.cost = cost
self.params = params
if supervised:
if (not isinstance(truth_data, SharedCPU)) and \
(not isinstance(truth_data, SharedGPU)):
raise TypeError("\'truth_data\' needs to be a theano " + \
"shared variable.")
assert isinstance(truth, T.TensorVariable)
self.truth_data = truth_data
self.truth = truth
self.verbose = verbose
self.batchsize = batchsize
self.numbatches = self.data.get_value().shape[0] / batchsize
self.momentum = momentum
self.supervised = supervised
if rng is None:
rng = numpy.random.RandomState(1)
assert isinstance(rng, numpy.random.RandomState), \
"rng has to be a random number generater."
self.rng = rng
self.epochcount = 0
self.index = T.lscalar('batch_index_in_sgd')
self.incs = dict([(
p,
theano.shared(value=numpy.zeros(p.get_value().shape,
dtype=theano.config.floatX),
name='inc_' + p.name,
broadcastable=p.broadcastable)
) for p in self.params])
self.grad = T.grad(self.cost, self.params)
self.set_learningrate(learningrate)
def set_learningrate(self, learningrate):
"""
TODO: set_learningrate() is not known to be working after
initialization. Not checked. A unit test should be written on it.
"""
self.learningrate = learningrate
self.inc_updates = [] # updates the parameter increasements (i.e.
# value in the self.incs dictionary.). Due to
# momentum, the increasement itself is
# changing between epochs. Its increasing by:
# from (key) inc_params
# to (value) momentum * inc_params - lr * grad
self.updates = [] # updates the parameters of model during each epoch.
# from (key) params
# to (value) params + inc_params
for _param, _grad in zip(self.params, self.grad):
self.inc_updates.append(
(self.incs[_param],
self.momentum * self.incs[_param] - self.learningrate * _grad
)
)
self.updates.append((_param, _param + self.incs[_param]))
if not self.supervised:
self._updateincs = theano.function(
inputs = [self.index],
outputs = self.cost,
updates = self.inc_updates,
givens = {
self.varin : self.data[self.index * self.batchsize: \
(self.index+1)*self.batchsize]
}
)
else:
self._updateincs = theano.function(
inputs = [self.index],
outputs = self.cost,
updates = self.inc_updates,
givens = {
self.varin : self.data[self.index * self.batchsize: \
(self.index+1)*self.batchsize],
self.truth : self.truth_data[self.index * self.batchsize: \
(self.index+1)*self.batchsize]
}
)
self.n = T.scalar('n')
self.noop = 0.0 * self.n
self._trainmodel = theano.function([self.n], self.noop,
updates = self.updates)
def step(self):
# def inplaceclip(x):
# x[:,:] *= x>0.0
# return x
# def inplacemask(x, mask):
# x[:,:] *= mask
# return x
stepcount = 0.0
cost = 0.
for batch_index in self.rng.permutation(self.numbatches - 1):
stepcount += 1.0
# This is Roland's way of computing cost, still mean over all
# batches. It saves space and don't harm computing time...
# But a little bit unfamilliar to understand at first glance.
cost = (1.0 - 1.0/stepcount) * cost + \
(1.0/stepcount) * self._updateincs(batch_index)
self._trainmodel(0)
self.epochcount += 1
if self.verbose:
print 'epoch: %d, lr: %f, cost: %f' % (
self.epochcount, self.learningrate, cost
)
return cost
class FeedbackAlignment(object):
def __init__(self, model, data, truth_data,
batchsize=100, learningrate=0.1, rng=None, verbose=True):
"""
It works for both linear and nonlinear layers.
Cost is defined intrinsicaly as the MSE between target y vector and
real y vector at the top layer.
Parameters:
------------
model : StackedLayer
data : theano.compile.SharedVariable
truth_data : theano.compile.SharedVariable
"""
if (not isinstance(data, SharedCPU)) and \
(not isinstance(data, SharedGPU)):
raise TypeError("\'data\' needs to be a theano shared variable.")
if (not isinstance(truth_data, SharedCPU)) and \
(not isinstance(truth_data, SharedGPU)):
raise TypeError("\'truth_data\' needs to be a theano shared variable.")
self.varin = model.models_stack[0].varin
self.truth = T.lmatrix('trurh_fba')
self.data = data
self.truth_data = truth_data
self.model = model
self.output = model.models_stack[-1].output()
self.verbose = verbose
self.batchsize = batchsize
self.numbatches = self.data.get_value().shape[0] / batchsize
if rng is None:
rng = numpy.random.RandomState(1)
assert isinstance(rng, numpy.random.RandomState), \
"rng has to be a random number generater."
self.rng = rng
self.error = (self.truth - self.output) * \
self.model.models_stack[-1].activ_prime()
# set fixed random matrix
self.fixed_B = [None, ]
for imod in self.model.models_stack[1:]:
i_layer_B = []
for ipar in imod.params:
rnd = numpy.asarray(
self.rng.uniform(
low = -4 * numpy.sqrt(6. / (imod.n_in + imod.n_out)),
high = 4 * numpy.sqrt(6. / (imod.n_in + imod.n_out)),
size = ipar.get_value().shape
),
dtype=ipar.dtype
)
i_layer_B.append(
theano.shared(value = rnd, name=ipar.name + '_fixed',
borrow=True)
)
self.fixed_B.append(i_layer_B)
self.epochcount = 0
self.index = T.lscalar('batch_index_in_fba')
self._get_cost = theano.function(
inputs = [self.index],
outputs = T.sum(self.error ** 2),
givens = {
self.varin : self.data[self.index * self.batchsize: \
(self.index+1)*self.batchsize],
self.truth : self.truth_data[self.index * self.batchsize: \
(self.index+1)*self.batchsize]
}
)
self.set_learningrate(learningrate)
def set_learningrate(self, learningrate):
self.learningrate = learningrate
layer_error = self.error
self.layer_learning_funcs = []
for i in range(len(self.model.models_stack) - 1, -1, -1):
iupdates = []
iupdates.append((
self.model.models_stack[i].w,
self.model.models_stack[i].w + self.learningrate * \
T.dot(self.model.models_stack[i].varin.T, layer_error)
)) # w
iupdates.append((
self.model.models_stack[i].b,
self.model.models_stack[i].b + self.learningrate * \
T.mean(layer_error, axis=0)
)) # b
if i > 0: # exclude the first layer.
layer_error = T.dot(layer_error, self.fixed_B[i][0].T) * \
self.model.models_stack[i-1].activ_prime()
self.layer_learning_funcs.append(
theano.function(
inputs = [self.index],
outputs = self.model.models_stack[i].output(),
updates = iupdates,
givens = {
self.varin : self.data[
self.index * self.batchsize: \
(self.index+1)*self.batchsize
],
self.truth : self.truth_data[
self.index * self.batchsize: \
(self.index+1)*self.batchsize
]
}
)
)
def step(self):
stepcount = 0.
cost = 0.
for batch_index in self.rng.permutation(self.numbatches - 1):
stepcount += 1.
cost = (1.0 - 1.0/stepcount) * cost + \
(1.0/stepcount) * self._get_cost(batch_index)
for layer_learn in self.layer_learning_funcs:
layer_learn(batch_index)
self.epochcount += 1
if self.verbose:
print 'epoch: %d, lr: %f, cost: %f' % (
self.epochcount, self.learningrate, cost
)
return cost
|
[
"lin.zhouhan@gmail.com"
] |
lin.zhouhan@gmail.com
|
dcb6d1f9b8f18e4873a300301df53135071597f8
|
853f628139c5accdb14a2bcbcb2ea11df5e2ea82
|
/3-5.py
|
3e90e66f83d69f658ce7c67025617164fef60c0b
|
[
"MIT"
] |
permissive
|
MasazI/python-r-stan-bayesian-model
|
cdcc49348a55bbd615925da8de5f260a6235620b
|
05a224958a3f5cbea207001465ac12b6862d9d9f
|
refs/heads/master
| 2022-02-28T13:21:32.779730
| 2019-11-03T13:36:04
| 2019-11-03T13:36:04
| 218,285,214
| 5
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,098
|
py
|
###############
#
# Transform R to Python Copyright (c) 2016 Masahiro Imai Released under the MIT license
#
###############
import os
import numpy as np
import pystan
import pandas
import pickle
import seaborn as sns
import matplotlib.pyplot as plt
import arviz as az
file_beer_sales_2 = pandas.read_csv('3-2-1-beer-sales-2.csv')
print(file_beer_sales_2.head())
# sns.pairplot(file_beer_sales_2)
# plt.show()
print(len(file_beer_sales_2))
sample_num = len(file_beer_sales_2)
temperature_pred = range(11,31)
stan_data = {
'N': sample_num,
'sales': file_beer_sales_2['sales'],
'temperature': file_beer_sales_2['temperature'],
'N_pred': len(temperature_pred),
'temperature_pred': temperature_pred
}
if os.path.exists('3-3-1-simple-lm-pred.pkl'):
sm = pickle.load(open('3-3-1-simple-lm-pred.pkl', 'rb'))
else:
# a model using prior for mu and sigma.
sm = pystan.StanModel(file='3-3-1-simple-lm-pred.stan')
mcmc_result = sm.sampling(
data=stan_data,
seed=1,
chains=4,
iter=2000,
warmup=1000,
thin=1
)
print(mcmc_result)
# mcmc_result.plot()
# plt.show()
mcmc_sample = mcmc_result.extract(permuted=True)
print(mcmc_sample['sales_pred'].shape)
az.plot_forest([mcmc_sample['beta'], mcmc_sample['Intercept']])
plt.show()
# visualization of regression line
df = pandas.DataFrame(mcmc_sample['sales_pred'])
col = np.arange(11,31)
df.columns = col
qua = [0.025, 0.25, 0.50, 0.75, 0.975]
d_est = pandas.DataFrame()
for i in np.arange(len(df.columns)):
for qu in qua:
d_est[qu] = df.quantile(qu)
print(d_est)
x = d_est.index
y1 = d_est[0.025].values
y2 = d_est[0.25].values
y3 = d_est[0.5].values
y4 = d_est[0.75].values
y5 = d_est[0.975].values
plt.fill_between(x,y1,y5,facecolor='blue',alpha=0.1)
plt.fill_between(x,y2,y4,facecolor='blue',alpha=0.5)
plt.plot(x,y3,'k-')
plt.scatter(file_beer_sales_2["temperature"],file_beer_sales_2["sales"],c='b')
plt.show()
# saving compiled model
if not os.path.exists('3-3-1-simple-lm-pred.pkl'):
with open('3-3-1-simple-lm-pred.pkl', 'wb') as f:
pickle.dump(sm, f)
|
[
"masaz.dream@gmail.com"
] |
masaz.dream@gmail.com
|
17bb28c909cb81a12d6e09aa97eceda48b4c70d7
|
2ac2dc7550d4e08d56dff220c65d78799e221dc9
|
/users/forms.py
|
a28532794be55c34c3d0c00b0f403baddb94d9a9
|
[] |
no_license
|
Vladimir-1909/blog_news
|
b152883962ac3521c3294bd1e590f4144dc18966
|
d5a45e70bb44c7004824b83e1e3771c25760c975
|
refs/heads/main
| 2023-03-07T15:47:43.969174
| 2021-02-22T08:37:44
| 2021-02-22T08:37:44
| 341,130,810
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,474
|
py
|
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from .models import Profile, SendMail
class UserRegisterForm(UserCreationForm):
email = forms.EmailField(
label='Введите Почту',
required=True,
widget=forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'Введите Почту',
'autocomplete': 'off'
})
)
username = forms.CharField(
label='Введите Логин',
required=True,
help_text='Нельзя вводить символы: @, /, _',
widget=forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'Введите Логин',
'autocomplete': 'off'
})
)
password1 = forms.CharField(
label='Введите Пароль',
required=True,
help_text='Пароль не долже быть маленьким и простым',
widget=forms.PasswordInput(attrs={
'class': 'form-control',
'placeholder': 'Введите Пароль',
'autocomplete': 'off'
})
)
password2 = forms.CharField(
label='Подтвердите пароль',
required=True,
widget=forms.PasswordInput(attrs={
'class': 'form-control',
'placeholder': 'Подтвердите пароль',
'autocomplete': 'off'
})
)
class Meta:
model = User
fields = ['email', 'username', 'password1', 'password2']
class UserUpdateForm(forms.ModelForm):
username = forms.CharField(
label='Имя пользователя',
required=False,
help_text='Обязательное поле. Не более 150 символов. Только буквы, цифры и символы: @/./+/-/_.',
widget=forms.TextInput(attrs={
'class': 'form-control help-text',
'placeholder': 'Введите Логин'
})
)
email = forms.EmailField(
label='Email',
required=False,
widget=forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'Введите Почту'
})
)
class Meta:
model = User
fields = ['username', 'email']
class ProfileImageForm(forms.ModelForm):
img = forms.ImageField(
label='Изображение профиля',
required=True
)
class Meta:
model = Profile
fields = ['img']
class UpdateGenderSelection(forms.ModelForm):
selections = (
(' ', '-'),
('m', 'Мужской пол.'),
('w', 'Женский пол.')
)
gender = forms.ChoiceField(
choices=selections,
label='Выберите пол',
widget=forms.Select(attrs={'class': 'form-control'})
)
class Meta:
model = Profile
fields = ['gender']
class MailAgreement(forms.ModelForm):
agreement = forms.BooleanField(
label='Соглашение про отправку уведомлений на почту',
label_suffix='',
required=False,
widget=forms.CheckboxInput()
)
class Meta:
model = Profile
fields = ['agreement']
class SendMailUserForm(forms.ModelForm):
subject = forms.CharField(
label='Тема письма',
required=True,
widget=forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'Форма отправки сообщений',
})
)
from_email = forms.EmailField(
label='Ваша почта',
required=True,
help_text='Обязательное поле',
widget=forms.TextInput(attrs={
'class': 'form-control',
'autocomplete': 'off'
})
)
plain_message = forms.CharField(
label='Введите текст сообщения',
required=True,
widget=forms.Textarea(attrs={
'class': 'form-control',
'placeholder': 'Все поля проходят проверки. Если что-то не заполнено, то отображается ошибка',
'autocomplete': 'off'
})
)
class Meta:
model = SendMail
fields = ['subject', 'from_email', 'plain_message']
|
[
"vtgk12ak@gmail.com"
] |
vtgk12ak@gmail.com
|
dc2ea8bc1a8d47ea00f0731625419d553fe1c45e
|
1081b4b2a575b4b786db71289f6e06497da4ae27
|
/PSO/Microgrid optimization/In objective approach/Multiprocessing/MicrogridPSO_module_PSO.py
|
3a081a2119588e545b5c45c775954a51d5982771
|
[] |
no_license
|
cycle13/Miniconda
|
6e468f6d87b8424f662cf3aefdf250c1ec81f97b
|
0d589134a9d65dccad05bb968406c98df26f29ab
|
refs/heads/master
| 2022-01-06T21:51:16.291238
| 2019-07-09T02:11:04
| 2019-07-09T02:11:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,507
|
py
|
import numpy as np
import copy
from MicrogridPSO_module_flowchart import loop_flowchart
# * Initialize particles & to input parameters
def initialize_PSO_parameters(PSO):
# * input initialize parametes
n_iterations = int(PSO.Multiprocessing_parameters["n_iterations"])
n_particles = int(PSO.Multiprocessing_parameters["n_particles"])
w = float(PSO.Multiprocessing_parameters["w"])
c1 = float(PSO.Multiprocessing_parameters["c1"])
c2 = float(PSO.Multiprocessing_parameters["c2"])
return n_iterations, n_particles, w, c1, c2
# * 粒子の数を引数にして初期の粒子を作成する。「3次元」
def initialize__PSO_particles(n_particles):
"""
ランダムな整数のバクトルを作成
"""
velocity_vector = ([np.array([0, 0, 0, 0]) for _ in range(n_particles)])
previous_velocity_vector = ([np.array([0, 0, 0, 0])
for _ in range(n_particles)])
iteration = 0
personal_best_fitness_value = np.array(
[float('inf') for _ in range(n_particles)])
global_best_fitness_value = float('inf')
range_vector = [10000, 20, 100, 30]
particle_position_vector = \
np.array([np.array([np.random.rand() * range_vector[0], np.random.rand() * range_vector[1],
np.random.rand() * range_vector[2], np.random.rand() * range_vector[3]])
for _ in range(n_particles)])
personal_best_position = particle_position_vector
global_best_position = personal_best_position
particle = {"particle_position_vector": particle_position_vector,
"personal_best_position": personal_best_position,
"personal_best_fitness_value": personal_best_fitness_value,
"global_best_fitness_value": global_best_fitness_value,
"global_best_position": global_best_position,
"velocity_vector": velocity_vector,
"previous_velocity_vector": previous_velocity_vector,
"iteration": iteration,
"range_vector": range_vector
}
return particle
# * 制約条件関数
def constrained(position):
x = position[0]
y = position[1]
z = position[2]
v = position[3]
if 20000 > x > 0 and 40 > y > 0 and 200 > z > 0 and 60 > v > 0:
judge = True
else:
judge = False
return judge
# Find optimized cost configurations.
def iterations_PSO(PSO):
# initialized particle
n_iterations, n_particles, w, c1, c2 = initialize_PSO_parameters(PSO)
PSO.particle = initialize__PSO_particles(n_particles)
print(
PSO.fitness_variable_parameters,
"\niterations:",
n_iterations,
"n_particles:",
n_particles,
"w:",
w,
"c1:",
c1,
"c2:",
c2,
"particle:",
PSO.particle)
# * PSO calc
PSO.all_particle_data = np.zeros(shape=(n_iterations, n_particles, 4))
PSO.all_particle_data_with_cost = np.zeros(shape=(n_iterations, n_particles, 6))
PSO.particle_data = np.zeros(shape=(n_particles, 4))
PSO.particle_data_with_cost = np.zeros(shape=(n_particles, 6))
particle_position_vector = PSO.particle["particle_position_vector"]
PSO.personal_best_position = PSO.particle["personal_best_position"]
PSO.personal_best_fitness_value = PSO.particle["personal_best_fitness_value"]
PSO.global_best_fitness_value = PSO.particle["global_best_fitness_value"]
PSO.global_best_position = PSO.particle["global_best_position"]
previous_velocity_vector = PSO.particle["previous_velocity_vector"]
iteration = PSO.particle["iteration"]
original_particle_position_vector = np.array([0, 0, 0, 0])
original_previous_velocity_vector = np.array([0, 0, 0, 0])
PSO.global_best_list = []
PSO.iteration_list = []
PSO.best_cost_list = []
for iteration in range(n_iterations):
print('-------iteration', '=', str(iteration), '-----------')
print(str(particle_position_vector))
for i in range(n_particles):
# * 粒子を設備容量に格納する。
PSO.update_fitness_variable_parameters(
{'pv_cap_max': particle_position_vector[i][0],
'wind_cap_max': particle_position_vector[i][1],
'battery_cap_max': particle_position_vector[i][2],
'diesel_max': particle_position_vector[i][3]})
# * total_checkのリセット
total_check = True
# * フローチャートをループで回して計算結果を取得
df, total_check, variables, total_cost, PSO.SCL, PSO.SEL, success_loops, failed_loops = loop_flowchart(
PSO)
# * whileループの回数をリセット
loop_number = 1
# * フローチャートもしくは、制約条件にエラーがある場合、粒子の位置をランダムにリセット
while total_check is False or constrained(
particle_position_vector[i]) is False:
print(' *particle_position_vector is error.loop=',
loop_number, particle_position_vector[i])
'''
particle_position_vector[i] = [np.random.rand()*PSO.particle["range_vector"][0], np.random.rand()*PSO.particle["range_vector"][1],\
np.random.rand()*PSO.particle["range_vector"][2], np.random.rand()*PSO.particle["range_vector"][3]]
'''
if iteration == 0:
particle_position_vector[i] = [
np.random.rand() * PSO.particle["range_vector"][0],
np.random.rand() * PSO.particle["range_vector"][1],
np.random.rand() * PSO.particle["range_vector"][2],
np.random.rand() * PSO.particle["range_vector"][3]]
loop_number += 1
else:
if loop_number == 1:
original_particle_position_vector = particle_position_vector[i]
original_previous_velocity_vector = previous_velocity_vector[i]
print(
' *original_particle_position_vector',
original_particle_position_vector)
print(
' *original_previous_velocity_vector',
original_previous_velocity_vector)
new_velocity = (
w * original_previous_velocity_vector) + (
c1 * np.random.rand()) * (
PSO.personal_best_position[i] - original_particle_position_vector) + (
c2 * np.random.rand()) * (
PSO.global_best_position - original_particle_position_vector)
particle_position_vector[i] = new_velocity + \
original_particle_position_vector
# ループが3回以上続く場合、ポジションをリセット
if loop_number >= 3:
particle_position_vector[i] = [
np.random.rand() * PSO.particle["range_vector"][0],
np.random.rand() * PSO.particle["range_vector"][1],
np.random.rand() * PSO.particle["range_vector"][2],
np.random.rand() * PSO.particle["range_vector"][3]]
print(
' *loop=',
loop_number,
'new_velocity:',
new_velocity,
'original_particle_position_vector',
original_particle_position_vector)
loop_number += 1
print(
' *particle_position_vector is updated by error.',
particle_position_vector[i])
# * 粒子を設備容量に格納する。
PSO.update_fitness_variable_parameters(
{'pv_cap_max': particle_position_vector[i][0],
'wind_cap_max': particle_position_vector[i][1],
'battery_cap_max': particle_position_vector[i][2],
'diesel_max': particle_position_vector[i][3]})
# * Total_checkのリセット
total_check = True
# * フローチャートをループで回して計算結果を取得
df, total_check, variables, total_cost, PSO.SCL, PSO.SEL, success_loops, failed_loops \
= loop_flowchart(PSO)
# * フローチャートがエラーなく動く場合、PSOに進む。
# * かつ、制約条件をクリアしている時
if total_check and constrained(particle_position_vector[i]):
fitness_cadidate = total_cost
print(
'-----particle_position[',
str(i),
'] ',
fitness_cadidate,
'[yen]. ',
particle_position_vector[i])
if (PSO.personal_best_fitness_value[i] > fitness_cadidate):
print(
' *personal best is updated.particle[',
i,
']:',
particle_position_vector[i])
# ! リストが参照渡しのため、値渡しにして勝手に変更されないようにしている。
# http://amacbee.hatenablog.com/entry/2016/12/07/004510
# https://rcmdnk.com/blog/2015/07/08/computer-python/
PSO.personal_best_fitness_value[i] = copy.deepcopy(
fitness_cadidate)
PSO.personal_best_position[i] = copy.deepcopy(
particle_position_vector[i])
if (PSO.global_best_fitness_value > fitness_cadidate):
print(
' *global best is updated.',
particle_position_vector[i])
print('variables: ', variables)
# ! リストが参照渡しのため、値渡しにして勝手に変更されないようにしている。
PSO.global_best_fitness_value = copy.deepcopy(
fitness_cadidate)
PSO.global_best_position = copy.deepcopy(
particle_position_vector[i])
PSO.best = {
'iterations': iteration,
'particle_number': i,
'global_best_position': PSO.global_best_position,
'global_best_fitness_value': PSO.global_best_fitness_value,
'table': df,
"variables": variables,
"SCL": PSO.SCL,
"SEL": PSO.SEL}
# * 粒子の位置情報を格納
PSO.particle_data[i] = particle_position_vector[i]
PSO.particle_data_with_cost[i] = np.append(np.append(particle_position_vector[i], total_cost), PSO.global_best_fitness_value)
if iteration != 0:
previous_velocity_vector[i] = new_velocity
new_velocity = (
w * previous_velocity_vector[i]) + (
c1 * np.random.rand()) * (
PSO.personal_best_position[i] - particle_position_vector[i]) + (
c2 * np.random.rand()) * (
PSO.global_best_position - particle_position_vector[i])
new_position = new_velocity + particle_position_vector[i]
particle_position_vector[i] = new_position
print(
' *previous_velocity_vector[',
i,
']',
previous_velocity_vector[i])
print(' *new_velocity', new_velocity)
previous_velocity_vector[i] = new_velocity
print(' *particle_position_vector[',
str(i),
'] is updated. particle_position:',
str(particle_position_vector[i]))
# * 各イテレーションの値をリストに保存
PSO.global_best_list.append(PSO.global_best_position)
PSO.iteration_list.append(iteration)
PSO.best_cost_list.append(PSO.global_best_fitness_value)
PSO.all_particle_data[iteration] = PSO.particle_data
PSO.all_particle_data_with_cost[iteration] = PSO.particle_data_with_cost
iteration = iteration + 1
# * 全粒子情報をnpyファイルとして出力
np.save("Result/" + str(PSO.Multiprocessing_parameters["state_name"]) + "all_particle_data.npy", PSO.all_particle_data)
np.save("Result/" + str(PSO.Multiprocessing_parameters["state_name"]) + "all_particle_data_with_cost.npy", PSO.all_particle_data_with_cost)
print(
"The best position is ",
PSO.best["global_best_position"],
" and global_best_fitness_value is",
PSO.best["global_best_fitness_value"],
"Each particles best is:",
PSO.personal_best_position,
" in iteration number ",
iteration,
"and ",
n_particles,
"particles.")
return PSO
|
[
"insideman02468@gmail.com"
] |
insideman02468@gmail.com
|
8f7329745bd74286e1d3df0ea9914dfa18347a3b
|
0e0e755dcee24259fff3223a8f3579e6f1717e52
|
/Final Project/final project.py
|
2d98f59c616276085d2f0eb8e5a7fe6aaa72f1ff
|
[] |
no_license
|
anwittin/Python
|
0e25eb4cf7822d338e9fd64da1d7daa058d249c0
|
1a9eeba502dc74b0279fab8466f6d088a6fc425f
|
refs/heads/master
| 2016-08-12T04:27:13.080084
| 2016-01-01T22:40:11
| 2016-01-01T23:16:36
| 43,617,445
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,384
|
py
|
from sys import exit
import time
def kitchen():
print "This is the kitchen, there is food to cook and a small door at the back too."
print "You're hungry. what do you do?"
print "cook | enter door"
choice = raw_input("> ").lower()
if choice == 'cook':
print "Nice, hopefully you know what you're doing!\n"
print "Smells good, I hope you enjoy it"
print "Here come the zombies!"
dead("You shouldn't have started cooking...")
else:
print "Smart choice zombies like the smell of food."
print "This door leads to a safe room."
safe_room()
def front_room():
print "You're in the living room, there is a body here."
time.sleep(.75)
print "The body looks worse for wear. It may be a zombie."
time.sleep(.75)
print "The body starts moving and is blocking the next room."
time.sleep(.75)
print "What are you going to do? Punch | Stab | Run"
zombie_moved = False
while True:
choice = raw_input("> ").lower()
if choice == "punch":
dead("The zombie is fast and grabs you and eats your face.")
elif choice == "stab":
print "The zombie takes the stabbing and keeps coming at you."
print "What do you do now? Punch | Stab | Run"
elif choice == "run":
print "That was close!"
safe_room()
else:
print "I got no idea what that means."
def bedroom_room():
print "You are in a bedroom."
time.sleep(.75)
print "There is a bed, some clean clothes here and a closet."
time.sleep(.75)
print "What do you want to do? Sleep | Change | Closet | Cry "
choice = raw_input("> ").lower()
if "cry" in choice:
dead("Bummer, your tears brought the zombies.")
elif "closet" in choice:
safe_room()
elif "change" in choice:
tomorrow("A clean set of clothes helps win the smell battle.")
elif "Sleep" in choice:
dead("You gotta find a safer place to sleep!")
else:
print "Type Sleep | Change | Closet | Cry "
def safe_room():
print "You found the safe room, with food, water and weapons."
time.sleep(.75)
print "Good job it looks like you may survive until tomorrow"
time.sleep(.75)
print "What do you want to do? Sleep | Eat | Equip weapons | Go Outside"
while True:
choice = raw_input("> ").lower()
if choice == "sleep":
tomorrow("Sleep until things get better")
elif choice == "eat":
print "Nom Nom Nom"
tomorrow("Full belly to fight again tomorrow")
elif choice == "equip weapons":
print "You're a fighter!"
tomorrow("Mount up")
elif choice == "go outside":
print "That was a bad idea :-/"
dead("The zombies eat your brains.")
else:
print "I got no idea what that means."
def dead(why):
print why,"Good job! You're a zombie now."
print "Play again? Y / N"
choice = raw_input("> ").lower()
if choice == 'y':
start()
else:
print "Thanks for playing"
exit()
def tomorrow(why):
print why, "\nWinner Winner not a zombie dinner!"
print "Play again? Y / N"
choice = raw_input("> ").lower()
if choice == 'y':
start()
else:
print "Thanks for playing"
exit()
def start():
print "You have just escaped from the zombies that are outside."
print "There is a door to the north, east and west."
print "Which one do you take?"
choice = raw_input("> ")
if choice == "west":
front_room()
elif choice == "east":
bedroom_room()
elif choice == "north":
kitchen()
else:
dead("You stumble around the room until you starve.")
start()
|
[
"anwittin@gmail.com"
] |
anwittin@gmail.com
|
b45cc10b7dd6a7c3e4acc055c6c47c9efec8d6f7
|
02e3fca30edaa8d8cc87c21895cb0cf1df112a35
|
/prepare_training_dataset.py
|
d7b4a8c00870b3ae1a809173e5a7253e35f57a78
|
[] |
no_license
|
huster123/DataScience_Project
|
dc151c159d906f5a169c5bdfd7c9da304598aeac
|
6289d49052be64efccaf618553b67b9613207db2
|
refs/heads/master
| 2020-05-01T13:35:36.916908
| 2018-08-13T16:41:37
| 2018-08-13T16:41:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,751
|
py
|
# coding: utf-8
"""
Created on Thu Jun 7 13:07:59 2018
@author: fahad
@purpose : The purpose of this code is to prepare the training dataset and calculate the RUL.
The program will read the required C-MAPSS dataset file and will return the dataset with calculate RUL as
a dataframe. Also, will save the dataframe to a CSV file
"""
# import packages
import numpy as np
import pandas as pd
# define file column heading
col_header = np.array(['UNIT_ID','CYCLE_ID','CONDITION_1','CONDITION_2','CONDITION_3','SENSOR_01','SENSOR_02','SENSOR_03','SENSOR_04',
'SENSOR_05','SENSOR_06','SENSOR_07','SENSOR_08','SENSOR_09','SENSOR_10','SENSOR_11','SENSOR_12','SENSOR_13',
'SENSOR_14','SENSOR_15','SENSOR_16','SENSOR_17','SENSOR_18','SENSOR_19','SENSOR_20','SENSOR_21'])
# funtion to calculate single RUL
def calcRUL(x,y):
rul = y-x
if rul >= 119:
rul = 119
return rul
# function to calculate Rul for a file
def populateRUL(data1):
data1['RUL']=-1
result = pd.DataFrame()
df_ids = data1['UNIT_ID'].unique()
for ids in df_ids:
df_data1 = data1[data1['UNIT_ID'] == ids].copy()
maxc = df_data1['CYCLE_ID'].max()
df_data1['RUL'] = df_data1[df_data1['UNIT_ID'] == ids]['CYCLE_ID'].apply(lambda x: calcRUL(x,maxc))
result = result.append(df_data1)
return result
# populate traing file with RUl column and save results to csv file and create all traing dataframe
def load_Training_data(number):
df = pd.read_csv('CMAPSSData/train_FD00'+str(number)+'.txt', delim_whitespace=True, header=None, names=col_header)
df = populateRUL(df)
df.to_csv('inputData/Train_FD00'+str(number)+'.csv',mode='w', index=False)
return df
|
[
"noreply@github.com"
] |
huster123.noreply@github.com
|
0c41eb9b825513fc3f392c454d24189c96027204
|
c3796ebebb42e55878556a53abad1a2e18fa4020
|
/test/functional/p2p_unrequested_blocks.py
|
f90431fdef055f22d5946052e24c023ed7e313de
|
[
"MIT"
] |
permissive
|
lycion/genex-project
|
a9e54d22138ca81339f76bba166aa9f366fa9dd8
|
fc103e93ee274dc57179d01c32b0235b29e364ca
|
refs/heads/master
| 2020-03-29T02:18:33.445995
| 2018-08-07T23:56:27
| 2018-08-07T23:56:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,842
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2015-2017 The Genex Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test processing of unrequested blocks.
Setup: two nodes, node0+node1, not connected to each other. Node1 will have
nMinimumChainWork set to 0x10, so it won't process low-work unrequested blocks.
We have one P2PInterface connection to node0 called test_node, and one to node1
called min_work_node.
The test:
1. Generate one block on each node, to leave IBD.
2. Mine a new block on each tip, and deliver to each node from node's peer.
The tip should advance for node0, but node1 should skip processing due to
nMinimumChainWork.
Node1 is unused in tests 3-7:
3. Mine a block that forks from the genesis block, and deliver to test_node.
Node0 should not process this block (just accept the header), because it
is unrequested and doesn't have more or equal work to the tip.
4a,b. Send another two blocks that build on the forking block.
Node0 should process the second block but be stuck on the shorter chain,
because it's missing an intermediate block.
4c.Send 288 more blocks on the longer chain (the number of blocks ahead
we currently store).
Node0 should process all but the last block (too far ahead in height).
5. Send a duplicate of the block in #3 to Node0.
Node0 should not process the block because it is unrequested, and stay on
the shorter chain.
6. Send Node0 an inv for the height 3 block produced in #4 above.
Node0 should figure out that Node0 has the missing height 2 block and send a
getdata.
7. Send Node0 the missing block again.
Node0 should process and the tip should advance.
8. Create a fork which is invalid at a height longer than the current chain
(ie to which the node will try to reorg) but which has headers built on top
of the invalid block. Check that we get disconnected if we send more headers
on the chain the node now knows to be invalid.
9. Test Node1 is able to sync when connected to node0 (which should have sufficient
work on its chain).
"""
from test_framework.mininode import *
from test_framework.test_framework import GenexTestFramework
from test_framework.util import *
import time
from test_framework.blocktools import create_block, create_coinbase, create_transaction
class AcceptBlockTest(GenexTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [[], ["-minimumchainwork=0x10"]]
def setup_network(self):
# Node0 will be used to test behavior of processing unrequested blocks
# from peers which are not whitelisted, while Node1 will be used for
# the whitelisted case.
# Node2 will be used for non-whitelisted peers to test the interaction
# with nMinimumChainWork.
self.setup_nodes()
def run_test(self):
# Setup the p2p connections
# test_node connects to node0 (not whitelisted)
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
# min_work_node connects to node1 (whitelisted)
min_work_node = self.nodes[1].add_p2p_connection(P2PInterface())
test_node.wait_for_verack()
min_work_node.wait_for_verack()
# 1. Have nodes mine a block (leave IBD)
[ n.generate(1) for n in self.nodes ]
tips = [ int("0x" + n.getbestblockhash(), 0) for n in self.nodes ]
# 2. Send one block that builds on each tip.
# This should be accepted by node0
blocks_h2 = [] # the height 2 blocks on each node's chain
block_time = int(time.time()) + 1
for i in range(2):
blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))
blocks_h2[i].solve()
block_time += 1
test_node.send_message(msg_block(blocks_h2[0]))
min_work_node.send_message(msg_block(blocks_h2[1]))
for x in [test_node, min_work_node]:
x.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
assert_equal(self.nodes[1].getblockcount(), 1)
self.log.info("First height 2 block accepted by node0; correctly rejected by node1")
# 3. Send another block that builds on genesis.
block_h1f = create_block(int("0x" + self.nodes[0].getblockhash(0), 0), create_coinbase(1), block_time)
block_time += 1
block_h1f.solve()
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h1f.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_h1f.hash)
# 4. Send another two block that build on the fork.
block_h2f = create_block(block_h1f.sha256, create_coinbase(2), block_time)
block_time += 1
block_h2f.solve()
test_node.send_message(msg_block(block_h2f))
test_node.sync_with_ping()
# Since the earlier block was not processed by node, the new block
# can't be fully validated.
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h2f.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
# But this block should be accepted by node since it has equal work.
self.nodes[0].getblock(block_h2f.hash)
self.log.info("Second height 2 block accepted, but not reorg'ed to")
# 4b. Now send another block that builds on the forking chain.
block_h3 = create_block(block_h2f.sha256, create_coinbase(3), block_h2f.nTime+1)
block_h3.solve()
test_node.send_message(msg_block(block_h3))
test_node.sync_with_ping()
# Since the earlier block was not processed by node, the new block
# can't be fully validated.
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h3.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
self.nodes[0].getblock(block_h3.hash)
# But this block should be accepted by node since it has more work.
self.nodes[0].getblock(block_h3.hash)
self.log.info("Unrequested more-work block accepted")
# 4c. Now mine 288 more blocks and deliver; all should be processed but
# the last (height-too-high) on node (as long as it is not missing any headers)
tip = block_h3
all_blocks = []
for i in range(288):
next_block = create_block(tip.sha256, create_coinbase(i + 4), tip.nTime+1)
next_block.solve()
all_blocks.append(next_block)
tip = next_block
# Now send the block at height 5 and check that it wasn't accepted (missing header)
test_node.send_message(msg_block(all_blocks[1]))
test_node.sync_with_ping()
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, all_blocks[1].hash)
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblockheader, all_blocks[1].hash)
# The block at height 5 should be accepted if we provide the missing header, though
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(all_blocks[0]))
test_node.send_message(headers_message)
test_node.send_message(msg_block(all_blocks[1]))
test_node.sync_with_ping()
self.nodes[0].getblock(all_blocks[1].hash)
# Now send the blocks in all_blocks
for i in range(288):
test_node.send_message(msg_block(all_blocks[i]))
test_node.sync_with_ping()
# Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead
for x in all_blocks[:-1]:
self.nodes[0].getblock(x.hash)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash)
# 5. Test handling of unrequested block on the node that didn't process
# Should still not be processed (even though it has a child that has more
# work).
# The node should have requested the blocks at some point, so
# disconnect/reconnect first
self.nodes[0].disconnect_p2ps()
self.nodes[1].disconnect_p2ps()
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
test_node.wait_for_verack()
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
self.log.info("Unrequested block that would complete more-work chain was ignored")
# 6. Try to get node to request the missing block.
# Poke the node with an inv for block at height 3 and see if that
# triggers a getdata on block 2 (it should if block 2 is missing).
with mininode_lock:
# Clear state so we can check the getdata request
test_node.last_message.pop("getdata", None)
test_node.send_message(msg_inv([CInv(2, block_h3.sha256)]))
test_node.sync_with_ping()
with mininode_lock:
getdata = test_node.last_message["getdata"]
# Check that the getdata includes the right block
assert_equal(getdata.inv[0].hash, block_h1f.sha256)
self.log.info("Inv at tip triggered getdata for unprocessed block")
# 7. Send the missing block for the third time (now it is requested)
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 290)
self.nodes[0].getblock(all_blocks[286].hash)
assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[287].hash)
self.log.info("Successfully reorged to longer chain from non-whitelisted peer")
# 8. Create a chain which is invalid at a height longer than the
# current chain, but which has more blocks on top of that
block_289f = create_block(all_blocks[284].sha256, create_coinbase(289), all_blocks[284].nTime+1)
block_289f.solve()
block_290f = create_block(block_289f.sha256, create_coinbase(290), block_289f.nTime+1)
block_290f.solve()
block_291 = create_block(block_290f.sha256, create_coinbase(291), block_290f.nTime+1)
# block_291 spends a coinbase below maturity!
block_291.vtx.append(create_transaction(block_290f.vtx[0], 0, b"42", 1))
block_291.hashMerkleRoot = block_291.calc_merkle_root()
block_291.solve()
block_292 = create_block(block_291.sha256, create_coinbase(292), block_291.nTime+1)
block_292.solve()
# Now send all the headers on the chain and enough blocks to trigger reorg
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(block_289f))
headers_message.headers.append(CBlockHeader(block_290f))
headers_message.headers.append(CBlockHeader(block_291))
headers_message.headers.append(CBlockHeader(block_292))
test_node.send_message(headers_message)
test_node.sync_with_ping()
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_292.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_292.hash)
test_node.send_message(msg_block(block_289f))
test_node.send_message(msg_block(block_290f))
test_node.sync_with_ping()
self.nodes[0].getblock(block_289f.hash)
self.nodes[0].getblock(block_290f.hash)
test_node.send_message(msg_block(block_291))
# At this point we've sent an obviously-bogus block, wait for full processing
# without assuming whether we will be disconnected or not
try:
# Only wait a short while so the test doesn't take forever if we do get
# disconnected
test_node.sync_with_ping(timeout=1)
except AssertionError:
test_node.wait_for_disconnect()
self.nodes[0].disconnect_p2ps()
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
test_node.wait_for_verack()
# We should have failed reorg and switched back to 290 (but have block 291)
assert_equal(self.nodes[0].getblockcount(), 290)
assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
assert_equal(self.nodes[0].getblock(block_291.hash)["confirmations"], -1)
# Now send a new header on the invalid chain, indicating we're forked off, and expect to get disconnected
block_293 = create_block(block_292.sha256, create_coinbase(293), block_292.nTime+1)
block_293.solve()
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(block_293))
test_node.send_message(headers_message)
test_node.wait_for_disconnect()
# 9. Connect node1 to node0 and ensure it is able to sync
connect_nodes(self.nodes[0], 1)
sync_blocks([self.nodes[0], self.nodes[1]])
self.log.info("Successfully synced nodes 1 and 0")
if __name__ == '__main__':
AcceptBlockTest().main()
|
[
"40029035+genexcore@users.noreply.github.com"
] |
40029035+genexcore@users.noreply.github.com
|
9620bd4a537d18089f1bbe6c5d247c907e171b40
|
486ea742752f63c65015fb982798a7fef43c8bae
|
/bot/account_manager/browser_automation_utils/webdrivermanager.py
|
848cc683628670552bb1be960c9d49720e99406d
|
[] |
no_license
|
rrosajp/BingRewardsBot
|
6f17ec00a624cc8406bef5f4c0fbf32ec340521f
|
56d4012dfda61ee7303a6942f87635bdb4e4c820
|
refs/heads/master
| 2021-06-01T10:16:33.546077
| 2016-08-14T08:29:50
| 2016-08-14T08:29:50
| 286,825,267
| 1
| 0
| null | 2020-08-11T18:59:02
| 2020-08-11T18:59:01
| null |
UTF-8
|
Python
| false
| false
| 2,327
|
py
|
import os
import browsertypes
_ABS_PATH_TO_THIS_DIR = os.path.realpath(os.path.dirname(__file__))
def _make_path_absolute(path):
'''
@param path
A relative or absolute path to a webdriver executable __file__
@return
- If path is absolute or None, then return as is.
- Otherwise, if path is relative, then make it absolute by prepending
_ABS_PATH_TO_THIS_DIR to it.
'''
abs_path = path
if not path or os.path.isabs(path):
return path
return os.path.join(_ABS_PATH_TO_THIS_DIR, path)
def get_selenium_webdriver(browser_type, mobile = False):
'''
@param browser_type
Specifies the type of the target browser (e.g., Chrome, Firefox, etc.).
@param mobile
By default, mobile = False, so the webdriver will be returned as is,
with no extra config.
But, if mobile = True, then the webdriver will be configured to spoof
a mobile browser's user-agent.
@return
- The Selenium webdriver object for the target browser type
- None if no such object can be constructed (b/c of missing driver executable,
missing config for specified browser type, etc.)
'''
all_webdriver_config = browsertypes.WEBDRIVER_CONFIG
# If there is no webdriver config for the specified browser type, return None.
driver_config = all_webdriver_config.get(browser_type.value, None)
if not driver_config:
return None
# If the specified browser type is Firefox, then the executable path is not required
# since Selenium offers in-built support for the Firefox webdriver.
if browser_type.value == browsertypes.BrowserType.Firefox.value:
return driver_config[browsertypes.SELENIUM_WEBDRIVER]("", mobile).get_driver()
# For all other browser types, get the path to the webdriver executable that can run
# on the current platform, and create a Selenium webdriver object that uses this path.
# If there is no path for the current platform, then return None.
executable_path = _make_path_absolute(driver_config.get(browsertypes.CURRENT_PLATFORM, None))
return driver_config[browsertypes.SELENIUM_WEBDRIVER](executable_path, mobile).get_driver() if executable_path else None
|
[
"sairambkrishnan@gmail.com"
] |
sairambkrishnan@gmail.com
|
a96bac8257857719d4e612c36e2dc88f720a5690
|
ad212b92beac17c4d061848c1dcd443d02a168c8
|
/monthly_challenge/202008/19_goat_latin.py
|
e866084a3ba4569f5efdb64fd6aa23d3416e864d
|
[] |
no_license
|
21eleven/leetcode-solutions
|
5ec97e4391c8ebaa77f4404a1155f3ef464953b3
|
35c91e6f5f5ed348186b8641e6fc49c825322d32
|
refs/heads/master
| 2023-03-03T10:22:41.726612
| 2021-02-13T21:02:13
| 2021-02-13T21:02:13
| 260,374,085
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,420
|
py
|
"""
A sentence S is given, composed of words separated by spaces. Each word consists of lowercase and uppercase letters only.
We would like to convert the sentence to "Goat Latin" (a made-up language similar to Pig Latin.)
The rules of Goat Latin are as follows:
If a word begins with a vowel (a, e, i, o, or u), append "ma" to the end of the word.
For example, the word 'apple' becomes 'applema'.
If a word begins with a consonant (i.e. not a vowel), remove the first letter and append it to the end, then add "ma".
For example, the word "goat" becomes "oatgma".
Add one letter 'a' to the end of each word per its word index in the sentence, starting with 1.
For example, the first word gets "a" added to the end, the second word gets "aa" added to the end and so on.
Return the final sentence representing the conversion from S to Goat Latin.
Example 1:
Input: "I speak Goat Latin"
Output: "Imaa peaksmaaa oatGmaaaa atinLmaaaaa"
"""
class Solution:
def toGoatLatin(self, S: str) -> str:
words = S.split()
vowels = set(["a", "e", "i", "o", "u"])
goat = []
idx = 2
a = 'a'
for w in words:
if w[0].lower() in vowels:
goat.append(f"{w}m{a*idx}")
else:
goat.append(f"{w[1:]}{w[0]}m{a*idx}")
idx += 1
return ' '.join(goat)
|
[
"noahlidell@gmail.com"
] |
noahlidell@gmail.com
|
c7e654ae56b455cb14bc56c0dedaa2ea56c31dd7
|
f268c12874985a5028150d64cffdcb2470b32dd4
|
/build.py
|
b8f53a09af44c659a9e1befc5a913ad4322ec625
|
[] |
no_license
|
sagarambalam/fsdse-python-assignment-31
|
49b39bb6fc01caece448dda89fba17a45770d535
|
d75be72c5e06592c7c791b203bd2c02753886493
|
refs/heads/master
| 2021-01-21T23:04:24.852859
| 2017-06-23T05:13:43
| 2017-06-23T05:13:43
| 95,184,731
| 0
| 0
| null | 2017-06-23T05:02:25
| 2017-06-23T05:02:25
| null |
UTF-8
|
Python
| false
| false
| 262
|
py
|
def solution(dic1):
list_a = sorted(dic1.values())
list_1 = list_a[0]
list_2 = list_a[1]
final_list=[]
for i in range(len(list_a)):
for j in range(len(list_a)):
final_list.append(list_1[i]+list_2[j])
return final_list
|
[
"sagar.ambalam@gmail.com"
] |
sagar.ambalam@gmail.com
|
6e7000c867060f75c285361bd18e96b9365fac25
|
90ee22848a633676fe0da4a0a061e6d278d318fb
|
/Arrays/leet.py
|
7ce365448183389917db26fee33a9050a4ba9fdd
|
[] |
no_license
|
VinothRajasekar/practice
|
459d33dbe9a0b4f55485baee1389beba3d1abc04
|
c395f2fc9028c33e2d52c2c0c86f0c1077d29c8e
|
refs/heads/master
| 2020-04-18T19:54:11.577376
| 2019-11-07T03:04:12
| 2019-11-07T03:04:12
| 167,724,045
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 144
|
py
|
def hello():
print("hello world")
val = set()
val.add("1")
val.add("2")
val.remove("2")
print(val)
c = hello()
print(c)
|
[
"vinu.barro@gmail.com"
] |
vinu.barro@gmail.com
|
473edc044398c5b3eca2579faca5a7c518d2a277
|
10e5b1b2e42a2ff6ec998ed900071e8b5da2e74e
|
/array/0509_fibonacci_number/0509_fibonacci_number.py
|
5d815145881d946e9ff8a001d2a66e9ff2dcd44e
|
[] |
no_license
|
zdyxry/LeetCode
|
1f71092d687316de1901156b74fbc03588f0b0a5
|
b149d1e8a83b0dfc724bd9dc129a1cad407dd91f
|
refs/heads/master
| 2023-01-29T11:59:14.162531
| 2023-01-26T03:20:23
| 2023-01-26T03:20:23
| 178,754,208
| 6
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 806
|
py
|
# -*- coding: utf-8 -*-
class Solution(object):
def fib(self, N):
if N <= 1:
return N
return self.fib(N - 1) + self.fib(N - 2)
def fib2(self, N):
array = [i for i in xrange(N)]
for i in xrange(2, N+1):
array[i] = array[i-1] + array[i-2]
return array[-1]
def fib3(self, N):
if N <=1:
return N
left = 0
right =1
for i in range(2,N+1):
left, right = right, left + right
return right
def fib4(self, N):
array =[i for i in range(N+1)]
return self.fibola(array, N)
def fibola(self, array, N):
if N <= 1:
return N
array[N] = self.fibola(array, N-1) + array[N-2]
return array[N]
print(Solution().fib4(6))
|
[
"zdyxry@gmail.com"
] |
zdyxry@gmail.com
|
1350a94623bca2d8ec71d8f2452ae5975888cf01
|
ff3b1f10d179ffae5b63a4a86c46c0d50858c122
|
/env/bin/django-admin.py
|
f8b5cecf0a7d827708532a25d8ff2790739def6f
|
[] |
no_license
|
Petrovich80/urok2
|
de9e50a5d670e0ed836d799b0f24129efcd69cfb
|
0f55ed9c3fc9067372d843f8e451e409ec601c78
|
refs/heads/master
| 2023-05-10T01:41:32.383023
| 2021-06-08T08:40:47
| 2021-06-08T08:40:47
| 375,018,778
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 148
|
py
|
#!/home/petrov0803/app2/env/bin/python3.8
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
[
"petrov0803@gmail.com"
] |
petrov0803@gmail.com
|
b5915b18fbbba281d99d4d188ad3de150336d99e
|
dbaad22aa8aa6f0ebdeacfbe9588b281e4e2a106
|
/0423 Pandas/1-複習-pandas/Pandas1_csv_-plot1.py
|
58fb6438f52cd86d1347b1d57e7b87de2c826879
|
[
"MIT"
] |
permissive
|
ccrain78990s/Python-Exercise
|
b4ecec6a653afd90de855a64fbf587032705fa8f
|
a9d09d5f3484efc2b9d9a53b71307257a51be160
|
refs/heads/main
| 2023-07-18T08:31:39.557299
| 2021-09-06T15:26:19
| 2021-09-06T15:26:19
| 357,761,471
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,289
|
py
|
#!/usr/bin/env python
# -*- coding=utf-8 -*-
__author__ = "Chen"
# 0423 練習
"""
資料出處:
公開發行公司股票發行概況統計表_New
https://data.gov.tw/dataset/103533
主要欄位說明:
年月、上市家數、上市資本額_十億元、上市成長率、上市面值_十億元、上市市值_十億元、
上櫃家數、上櫃資本額_十億元、上櫃成長率、上櫃面值_十億元、上櫃市值_十億元、
未上市上櫃家數、未上市上櫃資本額_十億元、公告日期
"""
import pandas as pd
df = pd.read_csv('每月_公開發行公司股票發行概況統計表.csv')
print(df.head())
print(type(df))
# 查看自資料訊息
print("=====資料訊息=====")
print(df.info())
# 資料大小
print("=====資料大小=====")
print(df.shape)
print("=====欄位名稱=====")
print(df.columns)
print("=====索引指數=====")
print(df.index)
print("=====統計描述=====")
print(df.describe())
print("**小練習***************************************************************")
print("========1.把 [年月,上市家數,上市資本額_十億元] 印出來========")
print(df[['年月','上市家數','上市資本額_十億元']])
#df2=df[['年月','上市家數','上市資本額_十億元']]
print("========2.找出 2019 年的資料========")
print(df[(df['年月']<=201999) & (df['年月']>=201900) ])
print("========3.找出 上市成長率 最高的年月========")
#print(df.上市成長率.max())
#print(df.上市成長率.idxmax())
max1=df.上市成長率.idxmax()
print(df[max1:max1+1])
print("========4.找出 2019 年的[上市成長率] 最高的月份========")
df2=df[(df['年月']<=201999) & (df['年月']>=201900) ]
max2=df2.上市成長率.idxmax()
print(df[max2:max2+1])
print("========5.找出 2018 年的資料========")
print(df[(df['年月']<=201899) & (df['年月']>=201800) ])
"""
未做完 可以參考老師解答
print("========6.比較 2017 和 2018 年的[上市資本額_十億元] 情況 (差異)========")
df3=df[(df['年月']<=201799) & (df['年月']>=201700) ]
df4=df[(df['年月']<=201899) & (df['年月']>=201800) ]
df5=df3[['年月','上市資本額_十億元']]
df6=df4[['年月','上市資本額_十億元']]
#df7=pd.concat([df5, df6], ignore_index=True)
df7=pd.merge(df5, df6,how='')
print(df7)
"""
|
[
"47476106+ccrain78990s@users.noreply.github.com"
] |
47476106+ccrain78990s@users.noreply.github.com
|
4a29bb1b1e74602ea2d8737fb776aeaa75db8ac5
|
0ccee825e9315b5490496be46706ddfe38b36dad
|
/paddleball.py
|
68363f367699f85e31782e7e94bfb0a034580ad9
|
[] |
no_license
|
Drewleks/python-for-kids
|
dd86bee3f110c779fbfa64aab982e2fec18fd02f
|
2896df13f511456ca1682b077ca82086082e7c27
|
refs/heads/master
| 2020-04-07T15:58:46.274090
| 2019-06-19T20:28:54
| 2019-06-19T20:28:54
| 158,509,999
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,421
|
py
|
from tkinter import *
import random
import time
class Ball:
def __init__(self, canvas, paddle, color):
self.canvas = canvas
self.paddle = paddle
self.id = canvas.create_oval(10, 10, 25, 25, fill=color)
self.canvas.move(self.id, 245, 100)
starts = [-3, -2, -1, 1, 2, 3]
random.shuffle(starts)
self.x = starts[0]
self.y = -3
self.canvas_height = self.canvas.winfo_height()
self.canvas_width = self.canvas.winfo_width()
self.hit_bottom = False
def hit_paddle(self, pos):
paddle_pos = self.canvas.coords(self.paddle.id)
if pos[2] >= paddle_pos[0] and pos[0] <= paddle_pos[2]:
if pos[3] >= paddle_pos[1] and pos[3] <= paddle_pos[3]:
return True
return False
def draw(self):
self.canvas.move(self.id, self.x, self.y)
pos = self.canvas.coords(self.id)
if pos[1] <= 0:
self.y = 3
if pos[3] >= self.canvas_height:
self.hit_bottom = True
if self.hit_paddle(pos) == True:
self.y = -3
if pos[0] <= 0:
self.x = 3
if pos[2] >= self.canvas_width:
self.x = -3
class Paddle:
def __init__(self, canvas, color):
self.canvas = canvas
self.id = canvas.create_rectangle(0, 0, 100, 10, fill=color)
self.canvas.move(self.id, 200, 300)
self.x = 0
self.canvas_width = self.canvas.winfo_width()
self.canvas.bind_all('<KeyPress-Left>', self.turn_left)
self.canvas.bind_all('<KeyPress-Right>', self.turn_right)
def turn_left(self, evt):
self.x = -3
def turn_right(self, evt):
self.x = 3
def draw(self):
self.canvas.move(self.id, self.x, 0)
pos = self.canvas.coords(self.id)
if pos[0] <= 0:
self.x = 0
elif pos[2] >= self.canvas_width:
self.x = 0
tk = Tk()
tk.title("Игра")
tk.resizable(0, 0)
tk.wm_attributes("-topmost", 1)
canvas = Canvas(tk, width=500, height=400, bd=0, highlightthickness=0)
canvas.pack()
tk.update()
paddle = Paddle(canvas, 'Blue')
ball = Ball(canvas, paddle, 'red')
while 1:
if ball.hit_bottom == False:
ball.draw()
paddle.draw()
tk.update_idletasks()
tk.update()
time.sleep(0.01)
|
[
"noreply@github.com"
] |
Drewleks.noreply@github.com
|
2caa36497292851a2824c6d22461f476df9e29db
|
8d113f0a487dab55c733ff63da5bba9e20f69b69
|
/config/settings.py
|
bca76446204a4d8a3e3373d62517eb9c85a8dc70
|
[
"MIT"
] |
permissive
|
AktanKasymaliev/django-video-hosting
|
c33d341a7709a21869c44a15eb6a3b6e9a783f54
|
b201ed3421025da22b43405452bde617ea26a90f
|
refs/heads/main
| 2023-07-18T08:10:00.289537
| 2021-09-02T20:15:41
| 2021-09-02T20:15:41
| 387,730,116
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,039
|
py
|
"""
Django settings for config project.
Generated by 'django-admin startproject' using Django 3.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get("DJANGO_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = eval(os.environ.get("DJANGO_DEBUG"))
ALLOWED_HOSTS = ['localhost', '127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
"videoApp",
"channels",
"django_cleanup",
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
ASGI_APPLICATION = 'config.asgi.application'
CHANNEL_LAYERS = {
'default': {
'BACKEND': 'channels_redis.core.RedisChannelLayer',
'CONFIG': {
'hosts': [('127.0.0.1', 6379)],
}
}
}
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ.get("DATABASE_NAME"),
'USER': os.environ.get("DATABASE_USER"),
'PASSWORD': os.environ.get("DATABASE_PASSW"),
'HOST': os.environ.get("DATABASE_HOST"),
'PORT': os.environ.get("DATABASE_PORT"),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_DIR = os.path.join(BASE_DIR, 'static')
STATICFILES_DIRS = [STATIC_DIR]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
[
"aktan.kasymaliev@icloud.com"
] |
aktan.kasymaliev@icloud.com
|
7fd01682debf28b599f6a3f86aa54b2e11ffb67d
|
7f392d3d784a95e0ba99118070bdfcf508d225c2
|
/projectManager/sproject_manager/views.py
|
2f5ea3c271647f97cfbee1aef53180bcdf4ff0c1
|
[
"MIT"
] |
permissive
|
ufajardo/task-tracker
|
0656b86edbe98b5e5b3c8dfab43b14f785e43c97
|
5817c6d1b563a9a6d2c1112bcd0b73531a316a8c
|
refs/heads/master
| 2021-08-07T07:26:26.395368
| 2019-05-30T20:44:31
| 2019-05-30T20:44:31
| 186,176,679
| 0
| 0
| null | 2020-06-05T21:03:17
| 2019-05-11T20:04:53
|
Python
|
UTF-8
|
Python
| false
| false
| 3,036
|
py
|
from django.shortcuts import render, get_object_or_404, redirect
from django.utils import timezone
from rest_framework import generics, viewsets
from .serializers import ProjectSerializer, TaskSerializer
from .models import Project, Task
from .forms import TaskForm, ProjForm
# Create your views here.
def index(request):
project_list = Project.objects.all()
task_list = Task.objects.all()
context = {
'project_list': project_list,
'task_list': task_list,
}
return render(request, 'sproject_manager/index.html', context)
def create_task(request):
task = TaskForm(request.POST or None)
if task.is_valid():
task.save()
return redirect('project_manager:index')
context = {
'taskform': task
}
return render(request, 'sproject_manager/task-create.html', context)
def create_proj(request):
proj = ProjForm(request.POST or None)
if proj.is_valid():
proj.save()
return redirect('project_manager:index')
context = {
'proj': proj
}
return render(request, 'sproject_manager/proj-create.html', context)
def update_task(request, id):
task = get_object_or_404(Task, id=id)
taskform = TaskForm(request.POST or None, instance=task)
if request.POST:
if taskform.is_valid():
data = request.POST.copy()
status = data.get('status')
if status == "ACTIVE":
obj = taskform.save(commit=False)
obj.start_date = timezone.now()
obj.save()
return redirect('project_manager:index')
elif status == "CLOSED":
obj = taskform.save(commit=False)
obj.end_date = timezone.now()
obj.save()
return redirect('project_manager:index')
elif status == "PENDING":
taskform.save()
return redirect('project_manager:index')
context = {
'task': task,
'taskform': taskform
}
return render(request, 'sproject_manager/task-details.html', context)
def proj_details(request, id):
proj = get_object_or_404(Project, id=id)
task_list = Task.objects.filter(project=id)
context = {
'proj': proj,
'task_list': task_list,
}
return render(request, 'sproject_manager/proj-details.html', context)
class ProjectList(viewsets.ModelViewSet):
serializer_class = TaskSerializer
def get_queryset(self):
"""
This view should return a list of all the purchases for
the user as determined by the username portion of the URL.
"""
id = self.kwargs['id']
return Task.objects.filter(project=id)
def delete_task(request, id):
task = get_object_or_404(Task, id=id)
if request.POST:
Task.objects.filter(id=id).delete()
return redirect('project_manager:index')
context = {
'task': task,
}
return render(request, 'sproject_manager/task-delete.html', context)
|
[
"ulysses.fajardo214@gmail.com"
] |
ulysses.fajardo214@gmail.com
|
534e606f6d54fae55bbd0424ff0939fd209fb081
|
d75cb7e463247cb88196e90fe3e9c8ed4595775b
|
/SchoolJudgeSummary/delete_db.py
|
2681952472a2f7e0519ae19eeb3537421caf0d27
|
[] |
no_license
|
xscrat/SchoolJudgeSrc
|
7bcd993c5096df52de40117be8b52de893437e28
|
bca646254417232f15fe991ce75f92288590ecc5
|
refs/heads/master
| 2023-03-09T05:27:53.020785
| 2021-03-03T02:54:05
| 2021-03-03T02:54:05
| 343,808,403
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 235
|
py
|
# -- coding:utf-8 --
import requests
import globals
r = requests.post('http://%s:%s/delete_db/' % (globals.server_ip, globals.server_port))
if r.status_code == 200:
print('删库成功')
else:
print('删库失败')
|
[
"xjfscrat@gmail.com"
] |
xjfscrat@gmail.com
|
07f54965bf19a638d7de2870978fd0fccb3c3b59
|
635670997e25d7fd578701995fe0422dd5671528
|
/src/models_VAE/best_models/vae/encoder.py
|
48db109dad68d468093e78e6d9e4cbd35e10fc19
|
[] |
no_license
|
QuangNamVu/thesis
|
5126c0281d93e7a5c2c3a5784363d7f6c6baadfd
|
01a404de2dfb70f13f3e61a9a8f3b73c88d93502
|
refs/heads/master
| 2022-12-24T10:08:33.472729
| 2019-12-21T16:27:07
| 2019-12-21T16:27:07
| 174,741,015
| 0
| 3
| null | 2022-12-14T06:56:36
| 2019-03-09T20:09:03
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,892
|
py
|
import tensorflow as tf
from tensorpack import *
from tf_utils.ar_layers import *
from tf_utils.common import *
def encoder(self, x):
is_training = get_current_tower_context().is_training
# [M, T, D] => [M, T, f0]
fc_l1 = gaussian_dense(name='encode_l1', inputs=x, out_C=self.hps.f[0])
activate_l1 = tf.nn.elu(fc_l1)
out_l1 = tf.layers.dropout(inputs=activate_l1, rate=self.hps.dropout_rate, training=is_training)
# [M, T, f0] => [M, T, f1]
fc_l2 = gaussian_dense(name='encode_l2', inputs=out_l1, out_C=self.hps.f[0])
activate_l2 = tf.nn.tanh(fc_l2)
out_l2 = tf.layers.dropout(inputs=activate_l2, rate=self.hps.dropout_rate, training=is_training)
cell = tf.nn.rnn_cell.LSTMCell(num_units=self.hps.lstm_units, state_is_tuple=True)
# z: [M, T, o]
# h: [M, o]
# c: [M, o]
# [M, T, f1] => [M, T, o]
outputs, state = tf.nn.dynamic_rnn(cell, out_l2, sequence_length=[self.hps.T] * self.hps.M, dtype=tf.float32,
parallel_iterations=64)
# [M, T, o] => [M, T * o] => [M, n_z]
next_seq = tf.reshape(outputs, shape=[-1, self.hps.T * self.hps.lstm_units])
state_c = state.c
if self.hps.is_VDE:
# z_lst = tf.contrib.layers.fully_connected(inputs=next_seq, out_C=2 * self.hps.n_z)
z_lst = gaussian_dense(name='encode_fc1', inputs=next_seq, out_C=2 * self.hps.n_z)
else:
rs_l3 = tf.reshape(out_l2, [-1, self.hps.T * self.hps.f[1]])
z_lst = gaussian_dense(name='encode_fc2', inputs=rs_l3, out_C=2 * self.hps.n_z)
z_mu, z_std1 = split(z_lst, split_dim=1, split_sizes=[self.hps.n_z, self.hps.n_z])
z_std = 1e-10 + tf.nn.softplus(z_std1)
if self.hps.is_VAE:
noise = tf.random_normal(shape=tf.shape(z_mu), mean=0.0, stddev=1.0)
z = z_mu + noise * z_std
else:
z = z_mu
return z_mu, z_std, z, state_c
|
[
"you@example.com"
] |
you@example.com
|
aa2d46b8434f07ea0404ccb72de436955c2f5a6f
|
d260346cf55f36c0a97b2b68fa65cabbf6f16564
|
/witheppy/runner.py
|
e8cf1cb86ca49af4d13cd292146c2921fc0ec00c
|
[
"MIT"
] |
permissive
|
pyenergyplus/witheppy
|
d27ba26f761d270b8787166c2c71193fcddb374b
|
cf1870827d43a0068cb63b93634a3deaa862241c
|
refs/heads/master
| 2023-07-07T05:04:28.885150
| 2023-06-29T20:39:26
| 2023-06-29T20:39:26
| 153,138,024
| 10
| 2
|
MIT
| 2023-06-29T20:39:27
| 2018-10-15T15:32:30
|
Python
|
UTF-8
|
Python
| false
| false
| 1,468
|
py
|
# Copyright (c) 2019 Santosh Philip
# =======================================================================
# Distributed under the MIT License.
# (See accompanying file LICENSE or copy at
# http://opensource.org/licenses/MIT)
# =======================================================================
"""helpers that enhance idf.run"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
def eplaunch_run(idf):
"""Run the E+ simulation exactly as EPLaunch would run a single file
with it's default setting
EPLaunch does the following by default:
- expands the Template objects
- puts the out output files in the same folder as the idf
- Uses idf filename as the output prefix
- Uses Capitals for the output suffix
Parameters
----------
idf : modelbuilder.IDF
the idf file. The idf file needs a weather file when opened
Returns
-------
NoneType
"""
fname = idf.idfname
weather = None # add weather args after bugfix for issue #236
if not weather:
wfile = idf.epw
else:
wfile = weather
folder = os.path.dirname(fname)
noext = os.path.splitext(os.path.basename(fname))[0]
idf.run(
weather=wfile,
expandobjects=True,
output_directory=folder,
output_prefix=noext,
output_suffix="C",
)
|
[
"santosh_philip@yahoo.com"
] |
santosh_philip@yahoo.com
|
0a6f99febf158ce23215714249263dc107358160
|
2724412db1fc69b67b74a7d1c4ca4731962908d3
|
/tests/test_streams.py
|
fd7c66a0bdc95eac88148387db0573a5c90b4496
|
[
"BSD-3-Clause"
] |
permissive
|
Tijani-Dia/websockets
|
a981267685e681df822307bce4ec7eb781e9927d
|
ed9a7b446c7147f6f88dbeb1d86546ad754e435e
|
refs/heads/main
| 2023-08-23T13:10:16.030126
| 2021-10-08T20:18:24
| 2021-10-28T20:17:30
| 425,114,573
| 1
| 0
|
BSD-3-Clause
| 2021-11-05T23:56:39
| 2021-11-05T23:56:39
| null |
UTF-8
|
Python
| false
| false
| 6,055
|
py
|
from websockets.streams import StreamReader
from .utils import GeneratorTestCase
class StreamReaderTests(GeneratorTestCase):
def setUp(self):
self.reader = StreamReader()
def test_read_line(self):
self.reader.feed_data(b"spam\neggs\n")
gen = self.reader.read_line(32)
line = self.assertGeneratorReturns(gen)
self.assertEqual(line, b"spam\n")
gen = self.reader.read_line(32)
line = self.assertGeneratorReturns(gen)
self.assertEqual(line, b"eggs\n")
def test_read_line_need_more_data(self):
self.reader.feed_data(b"spa")
gen = self.reader.read_line(32)
self.assertGeneratorRunning(gen)
self.reader.feed_data(b"m\neg")
line = self.assertGeneratorReturns(gen)
self.assertEqual(line, b"spam\n")
gen = self.reader.read_line(32)
self.assertGeneratorRunning(gen)
self.reader.feed_data(b"gs\n")
line = self.assertGeneratorReturns(gen)
self.assertEqual(line, b"eggs\n")
def test_read_line_not_enough_data(self):
self.reader.feed_data(b"spa")
self.reader.feed_eof()
gen = self.reader.read_line(32)
with self.assertRaises(EOFError) as raised:
next(gen)
self.assertEqual(
str(raised.exception),
"stream ends after 3 bytes, before end of line",
)
def test_read_line_too_long(self):
self.reader.feed_data(b"spam\neggs\n")
gen = self.reader.read_line(2)
with self.assertRaises(RuntimeError) as raised:
next(gen)
self.assertEqual(
str(raised.exception),
"read 5 bytes, expected no more than 2 bytes",
)
def test_read_line_too_long_need_more_data(self):
self.reader.feed_data(b"spa")
gen = self.reader.read_line(2)
with self.assertRaises(RuntimeError) as raised:
next(gen)
self.assertEqual(
str(raised.exception),
"read 3 bytes, expected no more than 2 bytes",
)
def test_read_exact(self):
self.reader.feed_data(b"spameggs")
gen = self.reader.read_exact(4)
data = self.assertGeneratorReturns(gen)
self.assertEqual(data, b"spam")
gen = self.reader.read_exact(4)
data = self.assertGeneratorReturns(gen)
self.assertEqual(data, b"eggs")
def test_read_exact_need_more_data(self):
self.reader.feed_data(b"spa")
gen = self.reader.read_exact(4)
self.assertGeneratorRunning(gen)
self.reader.feed_data(b"meg")
data = self.assertGeneratorReturns(gen)
self.assertEqual(data, b"spam")
gen = self.reader.read_exact(4)
self.assertGeneratorRunning(gen)
self.reader.feed_data(b"gs")
data = self.assertGeneratorReturns(gen)
self.assertEqual(data, b"eggs")
def test_read_exact_not_enough_data(self):
self.reader.feed_data(b"spa")
self.reader.feed_eof()
gen = self.reader.read_exact(4)
with self.assertRaises(EOFError) as raised:
next(gen)
self.assertEqual(
str(raised.exception),
"stream ends after 3 bytes, expected 4 bytes",
)
def test_read_to_eof(self):
gen = self.reader.read_to_eof(32)
self.reader.feed_data(b"spam")
self.assertGeneratorRunning(gen)
self.reader.feed_eof()
data = self.assertGeneratorReturns(gen)
self.assertEqual(data, b"spam")
def test_read_to_eof_at_eof(self):
self.reader.feed_eof()
gen = self.reader.read_to_eof(32)
data = self.assertGeneratorReturns(gen)
self.assertEqual(data, b"")
def test_read_to_eof_too_long(self):
gen = self.reader.read_to_eof(2)
self.reader.feed_data(b"spam")
with self.assertRaises(RuntimeError) as raised:
next(gen)
self.assertEqual(
str(raised.exception),
"read 4 bytes, expected no more than 2 bytes",
)
def test_at_eof_after_feed_data(self):
gen = self.reader.at_eof()
self.assertGeneratorRunning(gen)
self.reader.feed_data(b"spam")
eof = self.assertGeneratorReturns(gen)
self.assertFalse(eof)
def test_at_eof_after_feed_eof(self):
gen = self.reader.at_eof()
self.assertGeneratorRunning(gen)
self.reader.feed_eof()
eof = self.assertGeneratorReturns(gen)
self.assertTrue(eof)
def test_feed_data_after_feed_data(self):
self.reader.feed_data(b"spam")
self.reader.feed_data(b"eggs")
gen = self.reader.read_exact(8)
data = self.assertGeneratorReturns(gen)
self.assertEqual(data, b"spameggs")
gen = self.reader.at_eof()
self.assertGeneratorRunning(gen)
def test_feed_eof_after_feed_data(self):
self.reader.feed_data(b"spam")
self.reader.feed_eof()
gen = self.reader.read_exact(4)
data = self.assertGeneratorReturns(gen)
self.assertEqual(data, b"spam")
gen = self.reader.at_eof()
eof = self.assertGeneratorReturns(gen)
self.assertTrue(eof)
def test_feed_data_after_feed_eof(self):
self.reader.feed_eof()
with self.assertRaises(EOFError) as raised:
self.reader.feed_data(b"spam")
self.assertEqual(
str(raised.exception),
"stream ended",
)
def test_feed_eof_after_feed_eof(self):
self.reader.feed_eof()
with self.assertRaises(EOFError) as raised:
self.reader.feed_eof()
self.assertEqual(
str(raised.exception),
"stream ended",
)
def test_discard(self):
gen = self.reader.read_to_eof(32)
self.reader.feed_data(b"spam")
self.reader.discard()
self.assertGeneratorRunning(gen)
self.reader.feed_eof()
data = self.assertGeneratorReturns(gen)
self.assertEqual(data, b"")
|
[
"aymeric.augustin@m4x.org"
] |
aymeric.augustin@m4x.org
|
f09d1140e195dbf860ff3a408fbcd67171d8c3f0
|
834d7ea5179414f17d37f3bb58164b8f6ac11b24
|
/bin/MadGraph5_aMCatNLO/cards/production/2017/13TeV/bbDM_2HDMa/Multigrid_producer.py
|
465a227c5e5e12cba4fea295a8cb75925b14a0c1
|
[] |
no_license
|
diracyoon/genproductions
|
aa9ee41ac1dde9e14ed039496c3259328ece7073
|
a7740f4d28c7bfff4e71827dc807d57d974e29b7
|
refs/heads/master
| 2021-01-11T11:22:44.685243
| 2018-11-23T14:05:36
| 2018-11-23T14:05:36
| 72,719,084
| 1
| 0
| null | 2016-11-03T07:21:29
| 2016-11-03T07:21:29
| null |
UTF-8
|
Python
| false
| false
| 3,326
|
py
|
import sys
import os
verbose = False
old_cardsPath = 'Template'
prefix = 'bbDM_2HDMa_MH3_AA_MH4_XX_Mchi_YY'
os.system('mkdir '+ old_cardsPath+'/'+prefix)
os.system('cp '+old_cardsPath+'/'+'run_card.dat'+' '+ old_cardsPath+'/'+prefix+'/bbDM_2HDMa_MH3_AA_MH4_XX_Mchi_YY_run_card.dat' )
os.system('cp '+old_cardsPath+'/'+'proc_card.dat'+' '+ old_cardsPath+'/'+prefix+'/bbDM_2HDMa_MH3_AA_MH4_XX_Mchi_YY_proc_card.dat' )
os.system('cp '+old_cardsPath+'/'+'extramodels.dat'+' '+ old_cardsPath+'/'+prefix+'/bbDM_2HDMa_MH3_AA_MH4_XX_Mchi_YY_extramodels.dat' )
os.system('cp '+old_cardsPath+'/'+'customizecards.dat'+' '+ old_cardsPath+'/'+prefix+'/bbDM_2HDMa_MH3_AA_MH4_XX_Mchi_YY_customizecards.dat' )
os.system('cp '+old_cardsPath+'/'+'cuts.f'+' '+ old_cardsPath+'/'+prefix+'/bbDM_2HDMa_MH3_AA_MH4_XX_Mchi_YY_cuts.f' )
d_cardspath = os.path.join(old_cardsPath, prefix)
if verbose: print (d_cardspath)
d_run_card = os.path.join(d_cardspath,prefix+'_run_card.dat')
d_proc_card = os.path.join(d_cardspath,prefix+'_proc_card.dat')
d_extramodels = os.path.join(d_cardspath,prefix+'_extramodels.dat')
d_customizecards = os.path.join(d_cardspath,prefix+'_customizecards.dat')
d_cutcards = os.path.join(d_cardspath,prefix+'_cuts.f')
if verbose: print (d_run_card, d_proc_card, d_extramodels, d_customizecards)
def change_cards(d_cardname, cardname,MH3, MH4, Mchi):
f = open(d_cardname, 'r')
fout = open (cardname, 'w')
for line in f:
line = line.replace('AA', str(MH3))
line = line.replace('XX', str(MH4))
line = line.replace('YY', str(Mchi))
fout.write(line)
fout.close()
print ("Cardname",cardname)
def submitgrid(MH3, MH4, Mchi):
cardspath = d_cardspath.replace("AA",str(MH3)).replace("XX",str(MH4)).replace("YY",str(Mchi))
print ("cardpath",cardspath)
os.system('mkdir '+cardspath)
run_card = d_run_card.replace("AA",str(MH3)).replace("XX",str(MH4)).replace("YY",str(Mchi))
proc_card = d_proc_card.replace("AA",str(MH3)).replace("XX",str(MH4)).replace("YY",str(Mchi))
extramodels = d_extramodels.replace("AA",str(MH3)).replace("XX",str(MH4)).replace("YY",str(Mchi))
customizecards = d_customizecards.replace("AA",str(MH3)).replace("XX",str(MH4)).replace("YY",str(Mchi))
cutcards = d_cutcards.replace("AA",str(MH3)).replace("XX",str(MH4)).replace("YY",str(Mchi))
change_cards(d_run_card, run_card, MH3, MH4, Mchi)
change_cards(d_proc_card, proc_card,MH3 , MH4, Mchi)
change_cards(d_extramodels, extramodels, MH3, MH4, Mchi)
change_cards(d_customizecards, customizecards, MH3, MH4, Mchi)
change_cards(d_cutcards, cutcards, MH3,MH4, Mchi)
outdir = prefix.replace("AA",str(MH3)).replace("XX",str(MH4)).replace("YY",str(Mchi))
print ("output dir",outdir)
os.system('nohup ./submit_cmsconnect_gridpack_generation.sh '+ outdir +' '+ cardspath +' 4 "4 Gb" > mysubmit_'+outdir+'.debug 2>&1 &')
Mchi = [10]
MH3 = [600, 700, 800, 900, 1000, 1100, 1200]
for k in MH3:
for i in Mchi:
for j in range(100, 1200, 100):
if k > 1000 and j > 1000:
break
if j < k:
mh3 = k
mh4 = j
mchi = i
print("MH3=", mh3, "MH4= ", mh4)
submitgrid(mh3, mh4, mchi)
|
[
"tiwariPC@users.noreply.github.com"
] |
tiwariPC@users.noreply.github.com
|
209064d49d8c00a51c9b5b172250822afd75a2ac
|
c88ea9215c3f87c5a06e8f98ea57931144d4c022
|
/tests/strategy/filtering/test_thunk.py
|
9e1bdabd3eadf83a7e9079a3ccbb0d41ec00701e
|
[] |
no_license
|
dylandoamaral/certum
|
980e01dac2febfb1c9da7cb7be5301a5a6a91f3d
|
d8595cb4c3a101f9a78cddc1aa2c51cccac9095a
|
refs/heads/main
| 2023-03-13T23:00:25.733011
| 2021-03-14T10:06:14
| 2021-03-14T10:06:14
| 332,206,245
| 1
| 0
| null | 2021-03-14T10:06:15
| 2021-01-23T12:34:39
|
Python
|
UTF-8
|
Python
| false
| false
| 927
|
py
|
import pytest
from certum.error import Error
from certum.strategy.filtering.thunk import ThunkFiltering
@pytest.mark.parametrize(
"errors, result",
[
([Error(["x"], "my message")], 1),
([Error(["x", "b"], "my message")], 1),
([Error(["x"], "my message"), Error(["x", "b"], "my message")], 1),
(
[
Error(["x"], "my message"),
Error(["x", "b"], "my message"),
Error(["x", "b", "e"], "my message"),
],
1,
),
(
[
Error(["x"], "my message"),
Error(["x", "b"], "my message"),
Error(["y"], "my message"),
],
2,
),
],
)
def test_thunk(errors, result):
"""A thunk filtering strategy should keep the elements with the minimum depth."""
assert len(ThunkFiltering().filter(errors)) == result
|
[
"noreply@github.com"
] |
dylandoamaral.noreply@github.com
|
8dcbda8c71b778dae427d765d9b4621c3b6fc340
|
00af09f4ac6f98203910d86c3791c152184ace9a
|
/Lib/ctypes/test/test_arrays.py
|
ad3451f5cf9457610f9655188ac524bc3ba9104d
|
[] |
no_license
|
orf53975/CarnosOS
|
621d641df02d742a2452fde2f28a28c74b32695a
|
d06849064e4e9f30ef901ad8cf90960e1bec0805
|
refs/heads/master
| 2023-03-24T08:06:48.274566
| 2017-01-05T16:41:01
| 2017-01-05T16:41:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,024
|
py
|
<<<<<<< HEAD
<<<<<<< HEAD
import unittest
from ctypes import *
from ctypes.test import need_symbol
formats = "bBhHiIlLqQfd"
formats = c_byte, c_ubyte, c_short, c_ushort, c_int, c_uint, \
c_long, c_ulonglong, c_float, c_double, c_longdouble
class ArrayTestCase(unittest.TestCase):
def test_simple(self):
# create classes holding simple numeric types, and check
# various properties.
init = list(range(15, 25))
for fmt in formats:
alen = len(init)
int_array = ARRAY(fmt, alen)
ia = int_array(*init)
# length of instance ok?
self.assertEqual(len(ia), alen)
# slot values ok?
values = [ia[i] for i in range(len(init))]
self.assertEqual(values, init)
# change the items
from operator import setitem
new_values = list(range(42, 42+alen))
[setitem(ia, n, new_values[n]) for n in range(alen)]
values = [ia[i] for i in range(len(init))]
self.assertEqual(values, new_values)
# are the items initialized to 0?
ia = int_array()
values = [ia[i] for i in range(len(init))]
self.assertEqual(values, [0] * len(init))
# Too many initializers should be caught
self.assertRaises(IndexError, int_array, *range(alen*2))
CharArray = ARRAY(c_char, 3)
ca = CharArray(b"a", b"b", b"c")
# Should this work? It doesn't:
# CharArray("abc")
self.assertRaises(TypeError, CharArray, "abc")
self.assertEqual(ca[0], b"a")
self.assertEqual(ca[1], b"b")
self.assertEqual(ca[2], b"c")
self.assertEqual(ca[-3], b"a")
self.assertEqual(ca[-2], b"b")
self.assertEqual(ca[-1], b"c")
self.assertEqual(len(ca), 3)
# cannot delete items
from operator import delitem
self.assertRaises(TypeError, delitem, ca, 0)
def test_numeric_arrays(self):
alen = 5
numarray = ARRAY(c_int, alen)
na = numarray()
values = [na[i] for i in range(alen)]
self.assertEqual(values, [0] * alen)
na = numarray(*[c_int()] * alen)
values = [na[i] for i in range(alen)]
self.assertEqual(values, [0]*alen)
na = numarray(1, 2, 3, 4, 5)
values = [i for i in na]
self.assertEqual(values, [1, 2, 3, 4, 5])
na = numarray(*map(c_int, (1, 2, 3, 4, 5)))
values = [i for i in na]
self.assertEqual(values, [1, 2, 3, 4, 5])
def test_classcache(self):
self.assertIsNot(ARRAY(c_int, 3), ARRAY(c_int, 4))
self.assertIs(ARRAY(c_int, 3), ARRAY(c_int, 3))
def test_from_address(self):
# Failed with 0.9.8, reported by JUrner
p = create_string_buffer(b"foo")
sz = (c_char * 3).from_address(addressof(p))
self.assertEqual(sz[:], b"foo")
self.assertEqual(sz[::], b"foo")
self.assertEqual(sz[::-1], b"oof")
self.assertEqual(sz[::3], b"f")
self.assertEqual(sz[1:4:2], b"o")
self.assertEqual(sz.value, b"foo")
@need_symbol('create_unicode_buffer')
def test_from_addressW(self):
p = create_unicode_buffer("foo")
sz = (c_wchar * 3).from_address(addressof(p))
self.assertEqual(sz[:], "foo")
self.assertEqual(sz[::], "foo")
self.assertEqual(sz[::-1], "oof")
self.assertEqual(sz[::3], "f")
self.assertEqual(sz[1:4:2], "o")
self.assertEqual(sz.value, "foo")
def test_cache(self):
# Array types are cached internally in the _ctypes extension,
# in a WeakValueDictionary. Make sure the array type is
# removed from the cache when the itemtype goes away. This
# test will not fail, but will show a leak in the testsuite.
# Create a new type:
class my_int(c_int):
pass
# Create a new array type based on it:
t1 = my_int * 1
t2 = my_int * 1
self.assertIs(t1, t2)
def test_subclass(self):
class T(Array):
_type_ = c_int
_length_ = 13
class U(T):
pass
class V(U):
pass
class W(V):
pass
class X(T):
_type_ = c_short
class Y(T):
_length_ = 187
for c in [T, U, V, W]:
self.assertEqual(c._type_, c_int)
self.assertEqual(c._length_, 13)
self.assertEqual(c()._type_, c_int)
self.assertEqual(c()._length_, 13)
self.assertEqual(X._type_, c_short)
self.assertEqual(X._length_, 13)
self.assertEqual(X()._type_, c_short)
self.assertEqual(X()._length_, 13)
self.assertEqual(Y._type_, c_int)
self.assertEqual(Y._length_, 187)
self.assertEqual(Y()._type_, c_int)
self.assertEqual(Y()._length_, 187)
def test_bad_subclass(self):
import sys
with self.assertRaises(AttributeError):
class T(Array):
pass
with self.assertRaises(AttributeError):
class T(Array):
_type_ = c_int
with self.assertRaises(AttributeError):
class T(Array):
_length_ = 13
with self.assertRaises(OverflowError):
class T(Array):
_type_ = c_int
_length_ = sys.maxsize * 2
with self.assertRaises(AttributeError):
class T(Array):
_type_ = c_int
_length_ = 1.87
if __name__ == '__main__':
unittest.main()
=======
import unittest
from ctypes import *
from ctypes.test import need_symbol
formats = "bBhHiIlLqQfd"
formats = c_byte, c_ubyte, c_short, c_ushort, c_int, c_uint, \
c_long, c_ulonglong, c_float, c_double, c_longdouble
class ArrayTestCase(unittest.TestCase):
def test_simple(self):
# create classes holding simple numeric types, and check
# various properties.
init = list(range(15, 25))
for fmt in formats:
alen = len(init)
int_array = ARRAY(fmt, alen)
ia = int_array(*init)
# length of instance ok?
self.assertEqual(len(ia), alen)
# slot values ok?
values = [ia[i] for i in range(len(init))]
self.assertEqual(values, init)
# change the items
from operator import setitem
new_values = list(range(42, 42+alen))
[setitem(ia, n, new_values[n]) for n in range(alen)]
values = [ia[i] for i in range(len(init))]
self.assertEqual(values, new_values)
# are the items initialized to 0?
ia = int_array()
values = [ia[i] for i in range(len(init))]
self.assertEqual(values, [0] * len(init))
# Too many initializers should be caught
self.assertRaises(IndexError, int_array, *range(alen*2))
CharArray = ARRAY(c_char, 3)
ca = CharArray(b"a", b"b", b"c")
# Should this work? It doesn't:
# CharArray("abc")
self.assertRaises(TypeError, CharArray, "abc")
self.assertEqual(ca[0], b"a")
self.assertEqual(ca[1], b"b")
self.assertEqual(ca[2], b"c")
self.assertEqual(ca[-3], b"a")
self.assertEqual(ca[-2], b"b")
self.assertEqual(ca[-1], b"c")
self.assertEqual(len(ca), 3)
# cannot delete items
from operator import delitem
self.assertRaises(TypeError, delitem, ca, 0)
def test_numeric_arrays(self):
alen = 5
numarray = ARRAY(c_int, alen)
na = numarray()
values = [na[i] for i in range(alen)]
self.assertEqual(values, [0] * alen)
na = numarray(*[c_int()] * alen)
values = [na[i] for i in range(alen)]
self.assertEqual(values, [0]*alen)
na = numarray(1, 2, 3, 4, 5)
values = [i for i in na]
self.assertEqual(values, [1, 2, 3, 4, 5])
na = numarray(*map(c_int, (1, 2, 3, 4, 5)))
values = [i for i in na]
self.assertEqual(values, [1, 2, 3, 4, 5])
def test_classcache(self):
self.assertIsNot(ARRAY(c_int, 3), ARRAY(c_int, 4))
self.assertIs(ARRAY(c_int, 3), ARRAY(c_int, 3))
def test_from_address(self):
# Failed with 0.9.8, reported by JUrner
p = create_string_buffer(b"foo")
sz = (c_char * 3).from_address(addressof(p))
self.assertEqual(sz[:], b"foo")
self.assertEqual(sz[::], b"foo")
self.assertEqual(sz[::-1], b"oof")
self.assertEqual(sz[::3], b"f")
self.assertEqual(sz[1:4:2], b"o")
self.assertEqual(sz.value, b"foo")
@need_symbol('create_unicode_buffer')
def test_from_addressW(self):
p = create_unicode_buffer("foo")
sz = (c_wchar * 3).from_address(addressof(p))
self.assertEqual(sz[:], "foo")
self.assertEqual(sz[::], "foo")
self.assertEqual(sz[::-1], "oof")
self.assertEqual(sz[::3], "f")
self.assertEqual(sz[1:4:2], "o")
self.assertEqual(sz.value, "foo")
def test_cache(self):
# Array types are cached internally in the _ctypes extension,
# in a WeakValueDictionary. Make sure the array type is
# removed from the cache when the itemtype goes away. This
# test will not fail, but will show a leak in the testsuite.
# Create a new type:
class my_int(c_int):
pass
# Create a new array type based on it:
t1 = my_int * 1
t2 = my_int * 1
self.assertIs(t1, t2)
def test_subclass(self):
class T(Array):
_type_ = c_int
_length_ = 13
class U(T):
pass
class V(U):
pass
class W(V):
pass
class X(T):
_type_ = c_short
class Y(T):
_length_ = 187
for c in [T, U, V, W]:
self.assertEqual(c._type_, c_int)
self.assertEqual(c._length_, 13)
self.assertEqual(c()._type_, c_int)
self.assertEqual(c()._length_, 13)
self.assertEqual(X._type_, c_short)
self.assertEqual(X._length_, 13)
self.assertEqual(X()._type_, c_short)
self.assertEqual(X()._length_, 13)
self.assertEqual(Y._type_, c_int)
self.assertEqual(Y._length_, 187)
self.assertEqual(Y()._type_, c_int)
self.assertEqual(Y()._length_, 187)
def test_bad_subclass(self):
import sys
with self.assertRaises(AttributeError):
class T(Array):
pass
with self.assertRaises(AttributeError):
class T(Array):
_type_ = c_int
with self.assertRaises(AttributeError):
class T(Array):
_length_ = 13
with self.assertRaises(OverflowError):
class T(Array):
_type_ = c_int
_length_ = sys.maxsize * 2
with self.assertRaises(AttributeError):
class T(Array):
_type_ = c_int
_length_ = 1.87
if __name__ == '__main__':
unittest.main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
import unittest
from ctypes import *
from ctypes.test import need_symbol
formats = "bBhHiIlLqQfd"
formats = c_byte, c_ubyte, c_short, c_ushort, c_int, c_uint, \
c_long, c_ulonglong, c_float, c_double, c_longdouble
class ArrayTestCase(unittest.TestCase):
def test_simple(self):
# create classes holding simple numeric types, and check
# various properties.
init = list(range(15, 25))
for fmt in formats:
alen = len(init)
int_array = ARRAY(fmt, alen)
ia = int_array(*init)
# length of instance ok?
self.assertEqual(len(ia), alen)
# slot values ok?
values = [ia[i] for i in range(len(init))]
self.assertEqual(values, init)
# change the items
from operator import setitem
new_values = list(range(42, 42+alen))
[setitem(ia, n, new_values[n]) for n in range(alen)]
values = [ia[i] for i in range(len(init))]
self.assertEqual(values, new_values)
# are the items initialized to 0?
ia = int_array()
values = [ia[i] for i in range(len(init))]
self.assertEqual(values, [0] * len(init))
# Too many initializers should be caught
self.assertRaises(IndexError, int_array, *range(alen*2))
CharArray = ARRAY(c_char, 3)
ca = CharArray(b"a", b"b", b"c")
# Should this work? It doesn't:
# CharArray("abc")
self.assertRaises(TypeError, CharArray, "abc")
self.assertEqual(ca[0], b"a")
self.assertEqual(ca[1], b"b")
self.assertEqual(ca[2], b"c")
self.assertEqual(ca[-3], b"a")
self.assertEqual(ca[-2], b"b")
self.assertEqual(ca[-1], b"c")
self.assertEqual(len(ca), 3)
# cannot delete items
from operator import delitem
self.assertRaises(TypeError, delitem, ca, 0)
def test_numeric_arrays(self):
alen = 5
numarray = ARRAY(c_int, alen)
na = numarray()
values = [na[i] for i in range(alen)]
self.assertEqual(values, [0] * alen)
na = numarray(*[c_int()] * alen)
values = [na[i] for i in range(alen)]
self.assertEqual(values, [0]*alen)
na = numarray(1, 2, 3, 4, 5)
values = [i for i in na]
self.assertEqual(values, [1, 2, 3, 4, 5])
na = numarray(*map(c_int, (1, 2, 3, 4, 5)))
values = [i for i in na]
self.assertEqual(values, [1, 2, 3, 4, 5])
def test_classcache(self):
self.assertIsNot(ARRAY(c_int, 3), ARRAY(c_int, 4))
self.assertIs(ARRAY(c_int, 3), ARRAY(c_int, 3))
def test_from_address(self):
# Failed with 0.9.8, reported by JUrner
p = create_string_buffer(b"foo")
sz = (c_char * 3).from_address(addressof(p))
self.assertEqual(sz[:], b"foo")
self.assertEqual(sz[::], b"foo")
self.assertEqual(sz[::-1], b"oof")
self.assertEqual(sz[::3], b"f")
self.assertEqual(sz[1:4:2], b"o")
self.assertEqual(sz.value, b"foo")
@need_symbol('create_unicode_buffer')
def test_from_addressW(self):
p = create_unicode_buffer("foo")
sz = (c_wchar * 3).from_address(addressof(p))
self.assertEqual(sz[:], "foo")
self.assertEqual(sz[::], "foo")
self.assertEqual(sz[::-1], "oof")
self.assertEqual(sz[::3], "f")
self.assertEqual(sz[1:4:2], "o")
self.assertEqual(sz.value, "foo")
def test_cache(self):
# Array types are cached internally in the _ctypes extension,
# in a WeakValueDictionary. Make sure the array type is
# removed from the cache when the itemtype goes away. This
# test will not fail, but will show a leak in the testsuite.
# Create a new type:
class my_int(c_int):
pass
# Create a new array type based on it:
t1 = my_int * 1
t2 = my_int * 1
self.assertIs(t1, t2)
def test_subclass(self):
class T(Array):
_type_ = c_int
_length_ = 13
class U(T):
pass
class V(U):
pass
class W(V):
pass
class X(T):
_type_ = c_short
class Y(T):
_length_ = 187
for c in [T, U, V, W]:
self.assertEqual(c._type_, c_int)
self.assertEqual(c._length_, 13)
self.assertEqual(c()._type_, c_int)
self.assertEqual(c()._length_, 13)
self.assertEqual(X._type_, c_short)
self.assertEqual(X._length_, 13)
self.assertEqual(X()._type_, c_short)
self.assertEqual(X()._length_, 13)
self.assertEqual(Y._type_, c_int)
self.assertEqual(Y._length_, 187)
self.assertEqual(Y()._type_, c_int)
self.assertEqual(Y()._length_, 187)
def test_bad_subclass(self):
import sys
with self.assertRaises(AttributeError):
class T(Array):
pass
with self.assertRaises(AttributeError):
class T(Array):
_type_ = c_int
with self.assertRaises(AttributeError):
class T(Array):
_length_ = 13
with self.assertRaises(OverflowError):
class T(Array):
_type_ = c_int
_length_ = sys.maxsize * 2
with self.assertRaises(AttributeError):
class T(Array):
_type_ = c_int
_length_ = 1.87
if __name__ == '__main__':
unittest.main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
|
[
"Weldon@athletech.org"
] |
Weldon@athletech.org
|
c3bb7b99c92804951ac54fb144661450664dff9e
|
5312c056c32595007ca3f35c63088cfb76f2d408
|
/convolutions/sequences.py
|
a859c50dc06ce90146df6163bbc3a1035b974eae
|
[] |
no_license
|
namisama269/COMP3121-3821
|
9a46b3b59fd0b4a8017a0085fabf3e2f5acca801
|
3fd5b94a7a591ea8ce5209ac2af90119f6dc69e8
|
refs/heads/main
| 2023-08-23T04:25:40.814329
| 2021-11-03T04:14:36
| 2021-11-03T04:14:36
| 408,246,072
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 709
|
py
|
"""
Operations on 2 sequences A and B
A = [a0,a1,...,an], B = [b0,b1,...,bm]
"""
def add(A, B):
pass
def pmul(A, B):
pass
def dot(A, B):
if len(A) != len(B):
raise ValueError("A and B must have same length")
return [A[i]*B[i] for i in range(len(A))]
def concat(A, B):
pass
def convolution(A, B):
"""
Brute force linear convolution of 2 sequences
"""
n = len(A)
m = len(B)
conv = [0] * (n+m-1)
for i in range(n):
for k in range(m):
conv[i+k] += A[i]*B[k]
return conv
def padzero(A, newLen):
while len(A) < newLen:
A.append(0)
if __name__ == "__main__":
A = [1,1]
B = [1,2,1]
print(convolution(A,B))
|
[
"namwarlockclash@gmail.com"
] |
namwarlockclash@gmail.com
|
7023c1ee2710feaa888de4d2ad13b09a40472335
|
7209bab118b61d6ae36cca7f725226518bb9c0ec
|
/Drivingschool/urls.py
|
53f7eee7661676e0ac01be06f1349a4aae4bdb20
|
[] |
no_license
|
saaid960/driving1
|
a965e117d7b41a038bcf8f73c1b1e3f9dc04e1f2
|
63c1327ba64cb5e0ad1fafb02de86b641e599c07
|
refs/heads/master
| 2023-04-12T04:20:43.062582
| 2021-04-25T06:03:09
| 2021-04-25T06:03:09
| 344,208,582
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,420
|
py
|
"""Drivingschool URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from home import views
from home.views import home, registerpage, loginpage, logoutuser, video, index, addTodo, deleteTodo
urlpatterns = [
path('admin/', admin.site.urls),
path("Apply/", views.registerpage, name="Apply"),
path('', views.home, name="home"),
path('todo/', views.index, name="index"),
path('add/', addTodo),
path('deletetodo/<int:i_id>/', deleteTodo),
path('register/', registerpage, name="register"),
path('login/', views.loginpage, name="login"),
path('logout/', views.logoutuser, name="logout"),
path('video/', views.video, name="video"),
]
urlpatterns += staticfiles_urlpatterns()
|
[
"saaid7600@gmail.com"
] |
saaid7600@gmail.com
|
47f8ac38fd7d7de07b5f87d9f323644cd57cd32f
|
385f35db47c36e1b8c2927c80cc49376f6dbf42c
|
/chatbot(Bahdanau attention) small dataset/data1.py
|
3f9f94b710709f388694420aec29c40c2ce44187
|
[] |
no_license
|
UraliRana/Chatbot
|
fc0c3f74348158a4b99234d76951dba03b565d57
|
a300e3dd4b1901c74e98f1685db005239ba22191
|
refs/heads/master
| 2023-01-20T18:11:13.215351
| 2023-01-18T03:57:04
| 2023-01-18T03:57:04
| 272,152,672
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,037
|
py
|
import os
import random
#data_dir ="data/custom"
import pandas as pd
import csv
import nltk
from nltk import word_tokenize
from nltk.corpus import wordnet as wn
df=pd.read_csv("ch1.txt.csv")
data_src=[]
data_dest=[]
data_test_ques=[]
data_test_ans=[]
data_train_ques=[]
data_train_ans=[]
data_train_ans1=[]
data_train_ques1=[]
data_test_ques1=[]
data_test_ans1=[]
data_test_ques2=[]
data_test_ans2=[]
data_train_ques2=[]
data_train_ans2=[]
a=len(df['source'])
def load_data(string="",robot="",start="", end=""):
for i in range(a):
if df.loc[i].at['source']=='human':
string=string+df.loc[i].at['text']
if robot!="":
data_dest.append(start + robot + end)
robot=""
#print(type(string))
else:
if string!="":
data_src.append(string)
string=""
if df.loc[i].at['source']=='robot':
robot=robot+df.loc[i].at['text']
if robot!="":
data_dest.append(start + robot + end)
robot=""
if string!="":
data_src.append(string)
string=""
print(len(data_src))
#print("human:",data_src)
#print("robot:",data_dest)
print(len(data_dest))
def input1(input1=True):
return data_src
def output1(output1=True):
return data_dest
def word_syns(data_dest1,data_src3):
for idx in range(len(data_dest1)):
answer=data_dest1[idx]
n_answer1=answer
#sentences=[]
words = word_tokenize(answer)
taged_tokens=nltk.pos_tag(words)
#print("Pos tag of word answer:",taged_tokens1)
sentence=data_src3[idx]
for word,tag in taged_tokens:
#print(word)
synonymList ={}
syno=[]
if word!='ssss' and word!='r' and word!='i'and word!='o'and word!='t' and word!='y'and word!='af' and word!='s' and word!='d'and word!='c'and word!='ti' and word!='u' and word!='da' and word!='te'and word!='si'and word!='la' and word!='le'and word!='el' and word!='al' and word!='se'and word!='e'and word!='n' and word!='se' and word!='es'and word!='d':
if tag=='NN'or tag== 'VBN':
wordNetSynset = wn.synsets(word)
if len(wordNetSynset) != 0:
#print("word:",word)
#print("Pos tag of word:",tag)
for synSet in wordNetSynset:
for synWords in synSet.lemma_names():
if synWords not in syno:
syno.append(synWords)
synonymList[word]=syno
#print("list of syno:",syno)
#print("list of syno:",synonymList)
ns='/'.join(syno)
#print(ns)
n_answer1=n_answer1.replace(word,ns)
#print("sentence:",sentence)
#print("augmented_sentence:",n_sentence)
for key in synonymList:
for i in range(len(synonymList[key])):
n_answer=answer
n_answer = n_answer.replace(word,synonymList[word][i])
#sentences.append(n_sentence)
if n_answer not in data_train_ans1:
data_train_ans1.append(n_answer)
data_train_ques1.append(sentence)
else:
if answer not in data_train_ans1:
data_train_ans1.append(answer)
data_train_ques1.append(sentence)
#print(sentence)
#print("lis of sentence:",data_train_ques2)
#print("lis of sentence:",data_train_ans2)
#print(n_sentence)
#data_train_ques1.append(n_sentence)
#print("new list:",data_train_ans1)
print(len(data_train_ques1))
print(len(data_train_ans1))
return data_train_ques1,data_train_ans1
def word_syn1(data_train_ques1,data_train_ans1):
for idx in range(len(data_train_ques1)):
#print(idx)
answer1=data_train_ans1[idx]
question=data_train_ques1[idx]
words1 = word_tokenize(question)
taged_tokens=nltk.pos_tag(words1)
for word,tag in taged_tokens:
#print(word)
synonymList1 ={}
syno1=[]
if word!='ssss'and word!='r' and word!='i'and word!='o'and word!='t' and word!='y'and word!='af' and word!='s' and word!='d'and word!='c'and word!='ti' and word!='u' and word!='da' and word!='te'and word!='si'and word!='la' and word!='le'and word!='el' and word!='al' and word!='se'and word!='e'and word!='n' and word!='se' and word!='es'and word!='d':
if tag=='NN'or tag== 'VBN':
wordNetSynset = wn.synsets(word)
if len(wordNetSynset) != 0:
#print("word:",word)
#print("Pos tag of word:",tag)
for synSet in wordNetSynset:
for synWords in synSet.lemma_names():
if synWords not in syno1:
syno1.append(synWords)
synonymList1[word]=syno1
#print("sentence:",syno1)
#print("augmented_sentence:",n_sentence)
for key in synonymList1:
for i in range(len(synonymList1[key])):
n_sentence=question
n_sentence=n_sentence.replace(word,synonymList1[word][i])
#if question in data_train_ques3:
data_train_ques2.append(n_sentence)
data_train_ans2.append(answer1)
#if question not in data_train_ques3:
else:
if question not in data_train_ques2:
data_train_ques2.append(question)
data_train_ans2.append(answer1)
#print(data_train_ques3)
print(len(data_train_ques2))
print(len(data_train_ans2))
return data_train_ques2,data_train_ans2
def prepare_seq2seq_files(data_train_ques2,data_train_ans2,TESTSET_SIZE =50000):
# choose 30,000 (TESTSET_SIZE) items to put into testset
test_ids = random.sample([i for i in range(len(data_train_ques2))],TESTSET_SIZE)
for i in range(len(data_train_ques2)):
if i in test_ids:
data_test_ques.append(data_train_ques2[i]+'\n')
data_test_ans.append(data_train_ans2[i]+ '\n' )
else:
data_train_ques.append(data_train_ques2[i]+'\n')
data_train_ans.append(data_train_ans2[i]+ '\n' )
#if i%100== 0:
# print ("written lines",i)
def train_encoder(input1=True):
return data_train_ques
def train_decoder(output1=True):
return data_train_ans
def test_encoder(input1=True):
return data_test_ques
def test_decoder(output1=True):
return data_test_ans
def word_syns2(data_dest3,data_src5):
for idx in range(len(data_dest3)):
answer2=data_dest3[idx]
n_answer2=answer2
#sentences=[]
words3 = word_tokenize(answer2)
taged_tokens3=nltk.pos_tag(words3)
#print("Pos tag of word answer:",taged_tokens1)
sentence2=data_src5[idx]
for word,tag in taged_tokens3:
#print(word)
synonymList3 ={}
syno3=[]
if word!='ssss' and word!='r' and word!='i'and word!='o'and word!='t' and word!='y'and word!='af' and word!='s' and word!='d'and word!='c'and word!='ti' and word!='u' and word!='da' and word!='te'and word!='si'and word!='la' and word!='le'and word!='el' and word!='al' and word!='se'and word!='e'and word!='n' and word!='se' and word!='es'and word!='d':
if tag=='NN'or tag== 'VBN':
wordNetSynset = wn.synsets(word)
if len(wordNetSynset) != 0:
#print("word:",word)
#print("Pos tag of word:",tag)
for synSet in wordNetSynset:
for synWords in synSet.lemma_names():
if synWords not in syno3:
syno3.append(synWords)
synonymList3[word]=syno3
#print("list of syno:",syno)
#print("list of syno:",synonymList)
ns3='/'.join(syno3)
#print(ns3)
n_answer2=n_answer2.replace(word,ns3)
#print("sentence:",sentence)
#print("augmented_sentence:",n_sentence)
for key in synonymList3:
for i in range(len(synonymList3[key])):
n_answer3=answer2
n_answer3 = n_answer3.replace(word,synonymList3[word][i])
#sentences.append(n_sentence)
if n_answer3 not in data_test_ans1:
data_test_ans1.append(n_answer3)
data_test_ques1.append(sentence2)
else:
if answer2 not in data_test_ans1:
data_test_ans1.append(answer2)
data_test_ques1.append(sentence2)
#print(sentence)
#print("lis of sentence:",data_train_ques2)
#print("lis of sentence:",data_train_ans2)
#print(n_sentence)
#data_train_ques1.append(n_sentence)
#print("new list:",data_train_ans1)
print(len(data_test_ques1))
print(len(data_test_ans1))
return data_test_ques1,data_test_ans1
def word_syn3(data_test_ques1,data_test_ans1):
for idx in range(len(data_test_ques1)):
#print(idx)
answer2=data_test_ans1[idx]
question1=data_test_ques1[idx]
words1 = word_tokenize(question1)
taged_tokens1=nltk.pos_tag(words1)
for word,tag in taged_tokens1:
#print(word)
synonymList5 ={}
syno5=[]
if word!='ssss'and word!='r' and word!='i'and word!='o'and word!='t' and word!='y'and word!='af' and word!='s' and word!='d'and word!='c'and word!='ti' and word!='u' and word!='da' and word!='te'and word!='si'and word!='la' and word!='le'and word!='el' and word!='al' and word!='se'and word!='e'and word!='n' and word!='se' and word!='es'and word!='d':
if tag=='NN'or tag== 'VBN':
wordNetSynset = wn.synsets(word)
if len(wordNetSynset) != 0:
#print("word:",word)
#print("Pos tag of word:",tag)
for synSet in wordNetSynset:
for synWords in synSet.lemma_names():
if synWords not in syno5:
syno5.append(synWords)
synonymList5[word]=syno5
#print("sentence:",syno5)
#print("augmented_sentence:",n_sentence)
for key in synonymList5:
for i in range(len(synonymList5[key])):
n_sentence1=question1
n_sentence1=n_sentence1.replace(word,synonymList5[word][i])
#if question in data_train_ques3:
data_test_ques2.append(n_sentence1)
data_test_ans2.append(answer2)
#if question not in data_train_ques3:
else:
if question1 not in data_test_ques2:
data_test_ques2.append(question1)
data_test_ans2.append(answer2)
#print(data_train_ques3)
print(len(data_test_ques2))
print(len(data_test_ans2))
return data_test_ques2,data_test_ans2
|
[
"noreply@github.com"
] |
UraliRana.noreply@github.com
|
5fdba8f4725b9411b49d8f846a12dd1a5db0e84d
|
37c5ba847d3e466a916d58f8707c0e04704f9144
|
/ariac_behaviors/ariac_flexbe_behaviors/src/ariac_flexbe_behaviors/get_order_sm.py
|
12309fd2f1b750050ce3f829501d711014c49cdb
|
[] |
no_license
|
JustPtrck/roboticsP4
|
32bcf2076c11b51fecc19b8ea4dc21defe209f89
|
5904b11d6981fe2a19aed34cc235ef8f49fbcbc1
|
refs/heads/master
| 2022-09-01T06:37:09.519532
| 2020-05-19T10:50:04
| 2020-05-19T10:50:04
| 261,726,933
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,454
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###########################################################
# WARNING: Generated code! #
# ************************** #
# Manual changes may get lost if file is generated again. #
# Only code inside the [MANUAL] tags will be kept. #
###########################################################
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, ConcurrencyContainer, PriorityContainer, Logger
from ariac_flexbe_states.start_assignment_state import StartAssignment
from ariac_logistics_flexbe_states.get_part_from_products_state import GetPartFromProductsState
from ariac_flexbe_behaviors.transportparttoagv_sm import TransportPartToAgvSM
from ariac_support_flexbe_states.add_numeric_state import AddNumericState
from ariac_support_flexbe_states.equal_state import EqualState
from ariac_flexbe_behaviors.notify_shipment_ready_sm import notify_shipment_readySM
from ariac_flexbe_states.messageLogger import MessageLogger
from ariac_logistics_flexbe_states.get_products_from_shipment_state import GetProductsFromShipmentState
from ariac_logistics_flexbe_states.get_order_state import GetOrderState
from ariac_support_flexbe_states.replace_state import ReplaceState
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
# [/MANUAL_IMPORT]
'''
Created on Sun Apr 19 2020
@author: Gerard Harkema
'''
class get_orderSM(Behavior):
'''
Tests the starting and stopping of the assignment
'''
def __init__(self):
super(get_orderSM, self).__init__()
self.name = 'get_order'
# parameters of this behavior
# references to used behaviors
self.add_behavior(TransportPartToAgvSM, 'TransportPartToAgv')
self.add_behavior(notify_shipment_readySM, 'notify_shipment_ready')
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
# [/MANUAL_INIT]
# Behavior comments:
def create(self):
# x:667 y:540, x:729 y:205
_state_machine = OperatableStateMachine(outcomes=['finished', 'fail'])
_state_machine.userdata.shipments = []
_state_machine.userdata.part_type = ''
_state_machine.userdata.material_locations = []
_state_machine.userdata.number_of_shipments = 0
_state_machine.userdata.order_id = ''
_state_machine.userdata.products = []
_state_machine.userdata.number_of_products = 0
_state_machine.userdata.agv_id = ''
_state_machine.userdata.shipment_type = ''
_state_machine.userdata.pose = []
_state_machine.userdata.product_index = 0
_state_machine.userdata.shipment_index = 1
_state_machine.userdata.add_one = 1
_state_machine.userdata.part_pose = []
_state_machine.userdata.zero = 0
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
# [/MANUAL_CREATE]
with _state_machine:
# x:30 y:40
OperatableStateMachine.add('Start',
StartAssignment(),
transitions={'continue': 'GetOrder'},
autonomy={'continue': Autonomy.Off})
# x:905 y:34
OperatableStateMachine.add('GetProductPose',
GetPartFromProductsState(),
transitions={'continue': 'message_3', 'invalid_index': 'fail'},
autonomy={'continue': Autonomy.Off, 'invalid_index': Autonomy.Off},
remapping={'products': 'products', 'index': 'product_index', 'type': 'part_type', 'pose': 'part_pose'})
# x:1091 y:123
OperatableStateMachine.add('TransportPartToAgv',
self.use_behavior(TransportPartToAgvSM, 'TransportPartToAgv'),
transitions={'finished': 'IncrementProductIndex', 'failed': 'fail'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'agv_id': 'agv_id', 'part_type': 'part_type', 'part_pose': 'part_pose'})
# x:1095 y:229
OperatableStateMachine.add('IncrementProductIndex',
AddNumericState(),
transitions={'done': 'ShipmentReady?'},
autonomy={'done': Autonomy.Off},
remapping={'value_a': 'product_index', 'value_b': 'add_one', 'result': 'product_index'})
# x:1092 y:486
OperatableStateMachine.add('IncrementShipmentIndex',
AddNumericState(),
transitions={'done': 'notify_shipment_ready'},
autonomy={'done': Autonomy.Off},
remapping={'value_a': 'shipment_index', 'value_b': 'add_one', 'result': 'shipment_index'})
# x:1094 y:329
OperatableStateMachine.add('ShipmentReady?',
EqualState(),
transitions={'true': 'ResetProductIndex', 'false': 'GetProductPose'},
autonomy={'true': Autonomy.Off, 'false': Autonomy.Off},
remapping={'value_a': 'product_index', 'value_b': 'number_of_products'})
# x:474 y:461
OperatableStateMachine.add('OrderReady?',
EqualState(),
transitions={'true': 'ResetShiptIndex', 'false': 'GetProductsShipment'},
autonomy={'true': Autonomy.Off, 'false': Autonomy.Off},
remapping={'value_a': 'shipment_index', 'value_b': 'number_of_shipments'})
# x:857 y:483
OperatableStateMachine.add('notify_shipment_ready',
self.use_behavior(notify_shipment_readySM, 'notify_shipment_ready'),
transitions={'finished': 'OrderReady?', 'failed': 'fail'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit})
# x:402 y:33
OperatableStateMachine.add('message',
MessageLogger(),
transitions={'continue': 'GetProductsShipment'},
autonomy={'continue': Autonomy.Off},
remapping={'message': 'order_id'})
# x:766 y:40
OperatableStateMachine.add('message_2',
MessageLogger(),
transitions={'continue': 'GetProductPose'},
autonomy={'continue': Autonomy.Off},
remapping={'message': 'shipment_type'})
# x:1090 y:36
OperatableStateMachine.add('message_3',
MessageLogger(),
transitions={'continue': 'TransportPartToAgv'},
autonomy={'continue': Autonomy.Off},
remapping={'message': 'part_type'})
# x:560 y:34
OperatableStateMachine.add('GetProductsShipment',
GetProductsFromShipmentState(),
transitions={'continue': 'message_2', 'invalid_index': 'fail'},
autonomy={'continue': Autonomy.Off, 'invalid_index': Autonomy.Off},
remapping={'shipments': 'shipments', 'index': 'shipment_index', 'shipment_type': 'shipment_type', 'agv_id': 'agv_id', 'products': 'products', 'number_of_products': 'number_of_products'})
# x:203 y:32
OperatableStateMachine.add('GetOrder',
GetOrderState(),
transitions={'continue': 'message'},
autonomy={'continue': Autonomy.Off},
remapping={'order_id': 'order_id', 'shipments': 'shipments', 'number_of_shipments': 'number_of_shipments'})
# x:1090 y:407
OperatableStateMachine.add('ResetProductIndex',
ReplaceState(),
transitions={'done': 'IncrementShipmentIndex'},
autonomy={'done': Autonomy.Off},
remapping={'value': 'zero', 'result': 'product_index'})
# x:338 y:318
OperatableStateMachine.add('ResetShiptIndex',
ReplaceState(),
transitions={'done': 'GetOrder'},
autonomy={'done': Autonomy.Off},
remapping={'value': 'zero', 'result': 'shipment_index'})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
# [/MANUAL_FUNC]
|
[
"patrickverwimp@gmail.com"
] |
patrickverwimp@gmail.com
|
9aba1899299285447301d3bd0bbab2728dbffe70
|
2f5e366b7dc799e77ea7a4200e7a491f78a538b9
|
/src/Writer.py
|
84833457bb852ecf7460d82bd22e39d03afacd74
|
[] |
no_license
|
dspray95/swarm-ai-python
|
111e0555b8ec2bd823980691b4e39f5db453da26
|
156b072689badb2542b4cc8032b3733a7d33bc53
|
refs/heads/master
| 2020-03-11T19:02:26.953261
| 2018-04-21T14:27:54
| 2018-04-21T14:27:54
| 130,195,277
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 598
|
py
|
class Writer:
def __init__(self, path):
self.path = path + "\\data"
def write_file(self, aggression, distribution, dead, productivity, time):
csv = open(self.path + "swarmdata.csv", "w")
title_row = "aggression,distribution,dead,productivity,time\n"
csv.write(title_row)
for index, aggression in enumerate(aggression):
row = aggression[index] + "," + distribution[index] + "," + dead[index] + "," + productivity[index] + "," + time[index]
row = row.replace("\n", '')
row = row + "\n"
csv.write(row)
|
[
"dspray95@gmail.com"
] |
dspray95@gmail.com
|
b77ad5adbfe3bdc3c5a57d4185371cc854289ac2
|
a07124716edd86159dff277010132ba9c5cd0f75
|
/Text-Based Browser/task/tests.py
|
3ca883bce21f71b1767281f280b941e8d1d999d1
|
[
"MIT"
] |
permissive
|
drtierney/hyperskill-TextBasedBrowser-Python
|
27a15fa0bd44a927a9552d4815a0b4ab69375710
|
a4f2ac60643559e580b75a02078a679e5f1f0a2c
|
refs/heads/main
| 2023-08-28T04:24:51.693648
| 2021-10-25T17:34:58
| 2021-10-25T17:34:58
| 415,304,838
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,637
|
py
|
from hstest.stage_test import StageTest
from hstest.test_case import TestCase
from hstest.check_result import CheckResult
import os
import shutil
import sys
if sys.platform.startswith("win"):
import _locale
# pylint: disable=protected-access
_locale._getdefaultlocale = (lambda *args: ['en_US', 'utf8'])
CheckResult.correct = lambda: CheckResult(True, '')
CheckResult.wrong = lambda feedback: CheckResult(False, feedback)
class TextBasedBrowserTest(StageTest):
def generate(self):
return [
TestCase(
stdin='bloomberg.com\nbloomberg\nexit',
attach=('Bloomberg', 'New York Times', 'bloomberg'),
args=['tb_tabs']
),
TestCase(
stdin='nytimes.com\nnytimes\nexit',
attach=('New York Times', 'Bloomberg', 'nytimes'),
args=['tb_tabs']
),
TestCase(
stdin='nytimescom\nexit',
args=['tb_tabs']
),
TestCase(
stdin='blooomberg.com\nexit',
args=['tb_tabs']
),
TestCase(
stdin='blooomberg.com\nnytimes.com\nexit',
attach=(None, 'New York Times', 'Bloomberg', 'nytimes'),
args=['tb_tabs']
),
TestCase(
stdin='nytimescom\nbloomberg.com\nexit',
attach=(None, 'Bloomberg', 'New York Times', 'bloomberg'),
args=['tb_tabs']
),
TestCase(
stdin='bloomberg.com\nnytimes.com\nback\nexit',
attach={
'This New Liquid Is Magnetic, and Mesmerizing': (1, 'New York Times'),
'The Space Race: From Apollo 11 to Elon Musk': (2, 'Bloomberg')
},
args=['tb_tabs']
),
TestCase(
stdin='nytimes.com\nbloomberg.com\nback\nexit',
attach={
'This New Liquid Is Magnetic, and Mesmerizing': (2, 'New York Times'),
'The Space Race: From Apollo 11 to Elon Musk': (1, 'Bloomberg')
},
args=['tb_tabs']
),
]
def _check_files(self, path_for_tabs: str, right_word: str) -> int:
"""
Helper which checks that browser saves visited url in files and
provides access to them.
:param path_for_tabs: directory which must contain saved tabs
:param right_word: Word-marker which must be in right tab
:return: True, if right_words is present in saved tab
"""
for path, dirs, files in os.walk(path_for_tabs):
for file in files:
with open(os.path.join(path_for_tabs, file), 'r') as tab:
try:
content = tab.read()
except UnicodeDecodeError:
return -1
if right_word in content:
return 1
break
return 0
def check(self, reply, attach):
# Incorrect URL
if attach is None:
if 'error' in reply.lower():
return CheckResult.correct()
else:
return CheckResult.wrong('There was no "error" word, but should be.')
# Correct URL
if isinstance(attach, tuple):
if len(attach) == 4:
_, *attach = attach
if 'error' not in reply.lower():
return CheckResult.wrong('There was no "error" word, but should be.')
right_word, wrong_word, correct_file_name = attach
path_for_tabs = 'tb_tabs'
if not os.path.isdir(path_for_tabs):
return CheckResult.wrong(
"Can't find a directory \"" + path_for_tabs + "\" "
"in which you should save your web pages.")
check_files_result = self._check_files(path_for_tabs, right_word)
if not check_files_result:
return CheckResult.wrong(
"Seems like you did\'n save the web page "
"\"" + right_word + "\" into the "
"directory \"" + path_for_tabs + "\". "
"This file with page should be named \"" + correct_file_name + "\"")
elif check_files_result == -1:
return CheckResult.wrong('An error occurred while reading your saved tab. '
'Perhaps you used the wrong encoding?')
try:
shutil.rmtree(path_for_tabs)
except PermissionError:
return CheckResult.wrong("Impossible to remove the directory for tabs. Perhaps you haven't closed some file?")
if wrong_word in reply:
return CheckResult.wrong('It seems like you printed wrong variable')
if right_word in reply:
return CheckResult.correct()
return CheckResult.wrong('You printed neither bloomberg_com nor nytimes_com')
if isinstance(attach, dict):
for key, value in attach.items():
count, site = value
real_count = reply.count(key)
if reply.count(key) != count:
return CheckResult.wrong(
f'The site "{site}" should be displayed {count} time(s).\n'
f'Actually displayed: {real_count} time(s).'
)
return CheckResult.correct()
TextBasedBrowserTest().run_tests()
|
[
"d.r.tierney@hotmail.co.uk"
] |
d.r.tierney@hotmail.co.uk
|
1d4c1a6a7d097727c67229681c7abf1bfe13d42a
|
05e99875ebdb5b95fe478bc9a0dedfc6adae5442
|
/render.py
|
db6d102a7db6785bf36f50afef50cde267b9eb80
|
[] |
no_license
|
dstathis/renderdocs
|
96d7fea587f7760aa3526eb3c58100e8ea8cc2ba
|
cac27f25f66f1c72ab918b4e00670a211154bf79
|
refs/heads/master
| 2022-12-18T20:00:25.220294
| 2020-09-28T21:13:01
| 2020-09-28T21:13:01
| 296,403,587
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,644
|
py
|
#!/usr/bin/env python3
from __future__ import print_function
import pickle
import os.path
import subprocess
from pathlib import Path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/documents.readonly',
'https://www.googleapis.com/auth/drive.readonly']
# The ID of a sample document.
DOCUMENT_ID = '1jwFlPG_q5lr80NYI19L_3mErAv4Yk2-g6uzK113Qiks'
docdir = Path('docs')
def get_doc(service, doc_id):
# Retrieve the documents contents from the Docs service.
document = service.documents().get(documentId=doc_id).execute()
fname = f'{document["title"]}.md'.replace(' ', '_')
contents = document['body']['content']
text = []
for elt in contents:
if 'paragraph' in elt:
text_elements = elt['paragraph']['elements']
for text_element in text_elements:
chars = text_element.get('textRun')
if chars:
text.append(chars['content'])
with (docdir / fname).open('w') as f:
f.write('\n'.join(text))
return fname
def main():
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
docs_service = build('docs', 'v1', credentials=creds)
drive_service = build('drive', 'v2', credentials=creds)
docdir.mkdir(exist_ok=True)
with Path('id.txt').open() as f:
folder_id = f.read().strip()
items = drive_service.children().list(folderId=folder_id).execute()['items']
with open('home.md', 'w') as f:
for doc in items:
fname = get_doc(docs_service, doc['id'])
f.write(f'[{fname}](docs/{fname}) \n')
subprocess.run(['grip', '-b', 'home.md'])
if __name__ == '__main__':
main()
|
[
"dylan@theone.ninja"
] |
dylan@theone.ninja
|
4b14a84a25716004baaf55a0e43796fab1a29293
|
a137466dbaa5d704cd5a15ab9dfd17907b24be04
|
/algo2/mrdqn/agent.py
|
21a44e7aca50bb7bc677d14406d87263a932f502
|
[
"Apache-2.0"
] |
permissive
|
xlnwel/g2rl
|
92c15b8b9d0cd75b6d2dc8df20e6717e1a621ff6
|
e1261fdd2ce70724a99ddd174616cf013917b241
|
refs/heads/master
| 2023-08-30T10:29:44.169523
| 2021-11-08T07:50:43
| 2021-11-08T07:50:43
| 422,582,891
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,158
|
py
|
import tensorflow as tf
from utility.tf_utils import softmax, log_softmax, explained_variance
from utility.rl_utils import *
from utility.rl_loss import retrace
from core.decorator import override
from algo.mrdqn.base import RDQNBase, get_data_format, collect
class Agent(RDQNBase):
""" MRDQN methods """
@tf.function
def _learn(self, obs, action, reward, discount, mu, mask,
IS_ratio=1, state=None, prev_action=None, prev_reward=None):
obs, action, mu, mask, target, state, add_inp, terms = \
self._compute_target_and_process_data(
obs, action, reward, discount, mu, mask,
state, prev_action, prev_reward)
with tf.GradientTape() as tape:
x, _ = self._compute_embed(obs, mask, state, add_inp)
qs = self.q(x)
q = tf.reduce_sum(qs * action, -1)
error = target - q
value_loss = tf.reduce_mean(.5 * error**2, axis=-1)
value_loss = tf.reduce_mean(IS_ratio * value_loss)
terms['value_loss'] = value_loss
tf.debugging.assert_shapes([
[q, (None, self._sample_size)],
[target, (None, self._sample_size)],
[error, (None, self._sample_size)],
[IS_ratio, (None,)],
[value_loss, ()]
])
terms['value_norm'] = self._value_opt(tape, value_loss)
if 'actor' in self.model:
with tf.GradientTape() as tape:
pi, logpi = self.actor.train_step(x)
pi_a = tf.reduce_sum(pi * action, -1)
reinforce = tf.minimum(1. / mu, self._loo_c) * error * pi_a
v = tf.reduce_sum(qs * pi, axis=-1)
regularization = -tf.reduce_sum(pi * logpi, axis=-1)
loo_loss = -(self._v_pi_coef * v + self._reinforce_coef * reinforce)
tf.debugging.assert_shapes([
[pi, (None, self._sample_size, self._action_dim)],
[qs, (None, self._sample_size, self._action_dim)],
[v, (None, self._sample_size)],
[reinforce, (None, self._sample_size)],
[regularization, (None, self._sample_size)],
])
loo_loss = tf.reduce_mean(loo_loss, axis=-1)
regularization = tf.reduce_mean(regularization, axis=-1)
actor_loss = loo_loss - self._tau * regularization
actor_loss = tf.reduce_mean(IS_ratio * actor_loss)
terms.update(dict(
reinforce=reinforce,
v=v,
loo_loss=loo_loss,
regularization=regularization,
actor_loss=actor_loss,
ratio=tf.reduce_mean(pi_a / mu),
pi_min=tf.reduce_min(pi),
pi_std=tf.math.reduce_std(pi)
))
terms['actor_norm'] = self._actor_opt(tape, actor_loss)
if self._is_per:
priority = self._compute_priority(tf.abs(error))
terms['priority'] = priority
terms.update(dict(
q=q,
q_std=tf.math.reduce_std(q),
error=error,
error_std=tf.math.reduce_std(error),
mu_min=tf.reduce_min(mu),
mu=mu,
mu_inv=tf.reduce_mean(1/mu),
mu_std=tf.math.reduce_std(mu),
target=target,
explained_variance_q=explained_variance(target, q)
))
return terms
@override(RDQNBase)
def _compute_target(self, obs, action, reward, discount,
mu, mask, state, add_inp):
terms = {}
x, _ = self._compute_embed(obs, mask, state, add_inp, online=False)
if self._burn_in_size:
bis = self._burn_in_size
ss = self._sample_size
_, reward = tf.split(reward, [bis, ss], 1)
_, discount = tf.split(discount, [bis, ss], 1)
_, next_mu_a = tf.split(mu, [bis+1, ss], 1)
_, next_x = tf.split(x, [bis+1, ss], 1)
_, next_action = tf.split(action, [bis+1, ss], 1)
else:
_, next_mu_a = tf.split(mu, [1, self._sample_size], 1)
_, next_x = tf.split(x, [1, self._sample_size], 1)
_, next_action = tf.split(action, [1, self._sample_size], 1)
next_qs = self.target_q(next_x)
regularization = None
if 'actor' in self.model:
next_pi, next_logpi = self.target_actor.train_step(next_x)
if self._probabilistic_regularization == 'entropy':
regularization = tf.reduce_sum(
self._tau * next_pi * next_logpi, axis=-1)
else:
if self._probabilistic_regularization is None:
if self._double: # don't suggest to use double Q here, but implement it anyway
online_x, _ = self._compute_embed(obs, mask, state, add_inp)
next_online_x = tf.split(online_x, [bis+1, ss-1], 1)
next_online_qs = self.q(next_online_x)
next_pi = self.q.compute_greedy_action(next_online_qs, one_hot=True)
else:
next_pi = self.target_q.compute_greedy_action(next_qs, one_hot=True)
elif self._probabilistic_regularization == 'prob':
next_pi = softmax(next_qs, self._tau)
elif self._probabilistic_regularization == 'entropy':
next_pi = softmax(next_qs, self._tau)
next_logpi = log_softmax(next_qs, self._tau)
regularization = tf.reduce_sum(next_pi * next_logpi, axis=-1)
terms['next_entropy'] = - regularization / self._tau
else:
raise ValueError(self._probabilistic_regularization)
discount = discount * self._gamma
target = retrace(
reward, next_qs, next_action,
next_pi, next_mu_a, discount,
lambda_=self._lambda,
axis=1, tbo=self._tbo,
regularization=regularization)
return target, terms
|
[
"122134545@qq.com"
] |
122134545@qq.com
|
13be7231187d37ac75c66c3d2fed7e893c559c16
|
e93ba589366452ccb47ccbaf1ec4c613ba8bbc73
|
/clase4_map.py
|
539de9b06741a829ff7b01afd7cef891a524be75
|
[] |
no_license
|
ericksc/pythonbasico
|
d340209683084ca7059767fea39c33987f8fcac2
|
13110acef5b2af02bc11954e38dc3443a0c43419
|
refs/heads/master
| 2023-06-19T05:44:13.349958
| 2021-07-16T16:17:26
| 2021-07-16T16:17:26
| 369,639,836
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 57
|
py
|
a = [1,2,3, 'hola', 4, 5, True]
b = list(map( type, a))
|
[
"ecsa@gft.com"
] |
ecsa@gft.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.