blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cd2e908fa2051c2286f673c0666bb709854d6945
|
564d6a4d305a8ac6a7e01c761831fb2081c02d0f
|
/sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2020_06_01/aio/operations/_private_endpoint_connections_operations.py
|
1bc316723af7557d717797d60243efab5a65bdf7
|
[
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] |
permissive
|
paultaiton/azure-sdk-for-python
|
69af4d889bac8012b38f5b7e8108707be679b472
|
d435a1a25fd6097454b7fdfbbdefd53e05029160
|
refs/heads/master
| 2023-01-30T16:15:10.647335
| 2020-11-14T01:09:50
| 2020-11-14T01:09:50
| 283,343,691
| 0
| 0
|
MIT
| 2020-07-28T22:43:43
| 2020-07-28T22:43:43
| null |
UTF-8
|
Python
| false
| false
| 18,249
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PrivateEndpointConnectionsOperations:
"""PrivateEndpointConnectionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerservice.v2020_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def list(
self,
resource_group_name: str,
resource_name: str,
**kwargs
) -> "models.PrivateEndpointConnectionListResult":
"""Gets a list of private endpoint connections in the specified managed cluster.
Gets a list of private endpoint connections in the specified managed cluster. The operation
returns properties of each private endpoint connection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnectionListResult, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2020_06_01.models.PrivateEndpointConnectionListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PrivateEndpointConnectionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateEndpointConnectionListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections'} # type: ignore
async def get(
self,
resource_group_name: str,
resource_name: str,
private_endpoint_connection_name: str,
**kwargs
) -> "models.PrivateEndpointConnection":
"""Gets the private endpoint connection.
Gets the details of the private endpoint connection by managed cluster and resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2020_06_01.models.PrivateEndpointConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PrivateEndpointConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
async def update(
self,
resource_group_name: str,
resource_name: str,
private_endpoint_connection_name: str,
parameters: "models.PrivateEndpointConnection",
**kwargs
) -> "models.PrivateEndpointConnection":
"""Updates a private endpoint connection.
Updates a private endpoint connection in the specified managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection.
:type private_endpoint_connection_name: str
:param parameters: Parameters supplied to the Update a private endpoint connection operation.
:type parameters: ~azure.mgmt.containerservice.v2020_06_01.models.PrivateEndpointConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2020_06_01.models.PrivateEndpointConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PrivateEndpointConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PrivateEndpointConnection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
resource_name: str,
private_endpoint_connection_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
resource_name: str,
private_endpoint_connection_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes a private endpoint connection.
Deletes the private endpoint connection in the specified managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
private_endpoint_connection_name=private_endpoint_connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
|
[
"noreply@github.com"
] |
paultaiton.noreply@github.com
|
23d85ee54462c005ae289bf372738995317e1cce
|
a17940af2be6e7c67264174faecdefa1a3105f44
|
/stock/strategy/StrategyManager.py
|
b8985f99038058a64856911a1c69267cdb164118
|
[] |
no_license
|
jacegem/stock-py
|
ea1c338ab3415b87f7649168e62d4f2b617fd9d2
|
74febf21206638940312b158543513cbcd8fc73d
|
refs/heads/master
| 2021-01-25T11:44:10.638863
| 2017-06-19T06:55:10
| 2017-06-19T06:55:10
| 93,945,070
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 843
|
py
|
from .BuyRA_5 import BuyRA_5
from .BuyGC_5 import BuyGC_5
from .SellDC_5 import SellDC_5
from .SellMAX_10 import SellMAX_10
from .BuySupportLevel_3M import BuySupportLevel_3M
from .BuyMomentum import BuyMomentum
from .SellMomentum import SellMomentum
class StrategyManager:
def __init__(self):
self.buy_map = {
'MOMENTUM': BuyMomentum(),
'RA_5': BuyRA_5(),
'GC_5': BuyGC_5(),
'SUPPORT_LEVEL_3M': BuySupportLevel_3M(),
}
self.sell_map = {
'MOMENTUM': SellMomentum(),
'MAX_10': SellMAX_10(),
'DC_5': SellDC_5(),
}
def get_strategy(self, buy_code, sell_code):
buy_func = self.buy_map.get(buy_code, BuyRA_5())
sell_func = self.sell_map.get(sell_code, SellMAX_10())
return buy_func, sell_func
|
[
"jacegem@gmail.com"
] |
jacegem@gmail.com
|
25f5826c6fa77f2baeb027c010aae4dd3814867b
|
439468d3f5f0fdf85f591a37f3dc7cea0ffcc0f5
|
/trade/migrations/0001_initial.py
|
a3e40353ae8464c244c2af2c377f1c069e994d0e
|
[
"MIT"
] |
permissive
|
moewahed/trade_cycle
|
b8b2b371e2ca1a0f4d7af27201838940d81e2c9c
|
8ace51f08781a568ef087234b65a7864236dfcaf
|
refs/heads/master
| 2022-12-02T17:08:01.808874
| 2020-08-17T01:21:25
| 2020-08-17T01:21:25
| 288,055,227
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,956
|
py
|
# Generated by Django 2.2 on 2020-08-16 16:23
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('desc', models.TextField()),
('category', models.IntegerField(blank=True, choices=[(1, 'Technology'), (2, 'Cars'), (3, 'Games'), (4, 'Software')], null=True, verbose_name='Category')),
('create_at', models.DateTimeField(auto_now_add=True)),
('update_at', models.DateTimeField(auto_now=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='item', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-create_at'],
},
),
migrations.CreateModel(
name='Review',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('review', models.TextField()),
('create_at', models.DateTimeField(auto_now_add=True)),
('update_at', models.DateTimeField(auto_now=True)),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='review', to='trade.Item')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='review', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-create_at'],
},
),
]
|
[
"moewahed@gmail.com"
] |
moewahed@gmail.com
|
95d7d954bf9140193d8d1c74e59af032b1efdcaa
|
b82af11310120c28b1897bf8f8003198cdeebe05
|
/AIShooter/classes/Game.py
|
51bdadee16d47c76d745e85f541897fa179ae720
|
[
"Apache-2.0"
] |
permissive
|
SanteriHetekivi/AIShooter
|
72fb487ea51a26098e0326f595b5af76d5f032f0
|
f73461c9075cf5b0789679782fe21dee54f2775e
|
refs/heads/main
| 2023-02-05T21:46:27.545650
| 2020-12-19T16:53:26
| 2020-12-19T16:53:26
| 322,720,277
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,869
|
py
|
from __future__ import annotations
import pygame
import math
# Exceptions
from .Exceptions.End import End
# Helpers
from .Helpers.Cords import Cords
from .Helpers.Timer import Timer
# Elements
from .Elements.Screen import Screen
from .Elements.Player import Player
class Game():
"""Game class that houses main game logic.
"""
def __init__(self: Game, width: int = 1920, height: int = 1080) -> Game:
"""Initialise new game.
Args:
self (Game): Itself.
width (int, optional): Window resolution width. Defaults to 800.
height (int, optional): Window resolution height. Defaults to 600.
Returns:
Game: Itself.
"""
self._fps_cap = 360
self._resolution = Cords(width, height)
self._scale = Cords(
self._resolution.x/1920,
self._resolution.y/1080
)
self._surface = pygame.display.set_mode(
(self._resolution.x, self._resolution.y)
)
self._init_fps()
self._init_frame_limiter()
pygame.init()
def run(self: Game) -> Game:
"""Run the game.
Args:
self (Game): Itself.
Returns:
Game: Itself.
"""
# Initialize counters and timers.
self._init_fps()
self._init_frame_limiter()
timer = Timer()
screen = Screen()
screen.add_child(
Player()
)
# While running.
while True:
# Collect events.
events = []
for event in pygame.event.get():
events.append(event)
try:
screen._frame(
timer.curr(True),
events,
self._surface,
self._scale
)
except End as end:
print("Game ended: {0}".format(end))
break
pygame.display.update()
# Limit framerate to given cap.
self._frame_limiter()
# Count and print FPS every second.
self._fps()
pygame.quit()
return self
def _init_fps(self: Game) -> Game:
"""Initialize FPS counter and timer.
Args:
self (Game): Itself.
Returns:
Game: Itself.
"""
self._frame_counter = 0
self._fps_timer = Timer()
return self
def _fps(self: Game) -> bool:
"""Count FPS and print average every second.
Args:
self (Game): Itself.
Returns:
bool: Was FPS printed.
"""
self._frame_counter += 1
curr = self._fps_timer.curr()
print_fps = (curr > 1)
if print_fps:
print("FPS: ", self._frame_counter/curr)
self._frame_counter = 0
self._fps_timer.start()
return print_fps
def _init_frame_limiter(self: Game) -> Game:
"""Initialize frame limiter variables.
Args:
self (Game): Itself.
Returns:
Game: Itself.
"""
self._frame_timer = Timer()
self._fps_cap_seconds = 1/self._fps_cap
return self
def _frame_limiter(self: Game) -> bool:
"""Handle frame limiter.
Args:
self (Game): Itself.
Returns:
bool: Was frames limited.
"""
limit = (self._frame_timer.curr() < self._fps_cap_seconds)
if limit:
pygame.time.wait(
math.floor(
(
self._fps_cap_seconds
-
self._frame_timer.curr()
)
*
1000
)
)
self._frame_timer.start()
return limit
|
[
"santeri@hetekivi.com"
] |
santeri@hetekivi.com
|
99d281da6484b8a817d5c5fbe357d4afde32a498
|
6627c834ec5bc1c559866bb5f0cb7b7211521c54
|
/covid/urls.py
|
5acb882d8c120637efb3886ae12ec5bc3646d423
|
[
"MIT"
] |
permissive
|
care2donate/care2donate
|
955c182dd0db617f7b442dd27fd6ff029dab6f70
|
5f99e7169653a96b6e6db44f90afee17758a4480
|
refs/heads/main
| 2023-05-14T08:26:55.745208
| 2021-06-07T18:38:19
| 2021-06-07T18:38:19
| 366,801,553
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 155
|
py
|
from django.urls import path
from covid.api.home import CovidHomeAPIView
urlpatterns = [
path('', CovidHomeAPIView.as_view(), name='covid home'),
]
|
[
"arjuntheprogrammer@gmail.com"
] |
arjuntheprogrammer@gmail.com
|
24564c4712ce48a666376c8554ce77319d247e09
|
bb3f59785ab029ed3545110ef8a2a0462160d7e8
|
/Train/train.py
|
cb73248de7546c4c100ee03a5f74d86565d8ae70
|
[] |
no_license
|
Taoooo9/SMP2020v1
|
638599092ccc6f0fb5df92b015e6a180de6270f0
|
1cc232921bf00557961111d8625ef98bad1f307c
|
refs/heads/master
| 2023-01-06T19:46:02.312998
| 2020-10-29T07:56:29
| 2020-10-29T07:56:29
| 308,256,137
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,604
|
py
|
import json
import torch
import numpy as np
import os
import time
import torch.nn as nn
from transformers import AdamW, get_linear_schedule_with_warmup
from DataProcessing.data_batchiter import create_tra_batch, create_eval_batch
from Model.loss import *
from Model.vat import VATLoss
def train(bert_model, domain_model, usual_tra_data_set, virus_tra_data_set, usual_dev_data_set, virus_dev_data_set,
usual_vat_data, virus_vat_data, tag_vocab, config, domain_vocab, tokenizer, usual_eval_data, virus_eval_data):
usual_tra_data_set.extend(virus_tra_data_set)
usual_vat_data.extend(virus_vat_data)
batch_num = int(np.ceil(len(usual_tra_data_set) / float(config.tra_batch_size)))
bert_no_decay = ['bias', 'LayerNorm.weight']
bert_optimizer_grouped_parameters = [
{'params': [p for n, p in bert_model.named_parameters() if not any(nd in n for nd in bert_no_decay)],
'weight_decay': config.weight_decay},
{'params': [p for n, p in bert_model.named_parameters() if any(nd in n for nd in bert_no_decay)], 'weight_decay': 0.0}
]
domain_no_decay = ['bias', 'LayerNorm.weight']
domain_optimizer_grouped_parameters = [
{'params': [p for n, p in domain_model.named_parameters() if not any(nd in n for nd in domain_no_decay)],
'weight_decay': config.weight_decay},
{'params': [p for n, p in domain_model.named_parameters() if any(nd in n for nd in domain_no_decay)],
'weight_decay': 0.0}
]
domain_optimizer = AdamW(domain_optimizer_grouped_parameters, lr=config.domain_lr, eps=config.epsilon)
domain_scheduler = get_linear_schedule_with_warmup(domain_optimizer, num_warmup_steps=0, num_training_steps=config.epoch * batch_num)
optimizer_bert = AdamW(bert_optimizer_grouped_parameters, lr=config.bert_lr, eps=config.epsilon)
scheduler_bert = get_linear_schedule_with_warmup(optimizer_bert, num_warmup_steps=0, num_training_steps=config.epoch * batch_num)
# Get start!
global_step = 0
best_tra_f1 = 0
best_cla_acc = 0
best_domain_acc = 0
best_dev_usual = 0
best_dev_virus = 0
critierion = LabelSmoothing(config)
vat_loss = VATLoss(config)
for epoch in range(0, config.epoch):
gold_label = []
predict_ids = []
cla_score = 0
domain_score = 0
print('\nThe epoch is starting.')
epoch_start_time = time.time()
batch_iter = 0
print('The epoch is :', str(epoch))
for all_batch in create_tra_batch(usual_tra_data_set, usual_vat_data, tag_vocab, config.tra_batch_size, config,
tokenizer, domain_vocab, shuffle=True):
start_time = time.time()
word_batch = all_batch[0]
vat_word_batch = all_batch[1]
bert_model.train()
domain_model.train()
batch_size = word_batch[0][0].size(0)
input_tensor = word_batch[0]
target = word_batch[1]
domain_target = word_batch[2]
gold_label.extend(target)
ul_input_tensor = vat_word_batch[0]
lds = vat_loss(bert_model, ul_input_tensor)
logits, last_hidden = bert_model(input_tensor)
domain_logits = domain_model(last_hidden)
cla_loss = critierion(logits, target)
correct, predict_id, accuracy = class_loss(logits, target)
domain_loss, domain_correct, domain_accuracy = domain_cla_loss(domain_logits, domain_target)
predict_ids.extend(predict_id)
loss = (cla_loss + domain_loss + config.vat_alpha * lds) / config.update_every
loss.backward()
cla_loss_value = cla_loss.item()
domain_loss_value = domain_loss.item()
vat_loss_value = (config.vat_alpha * lds).item()
during_time = float(time.time() - start_time)
print('Step:{}, Epoch:{}, batch_iter:{}, cla_accuracy:{:.4f}({}/{}),'
'domain_accuracy:{:.4f}({}/{}), time:{:.2f}, '
'cla_loss:{:.6f}, domain_loss:{:.6f}, vat_loss:{:.6f}'.format(global_step, epoch, batch_iter, accuracy,
correct, batch_size, domain_accuracy, domain_correct,
batch_size, during_time, cla_loss_value, domain_loss_value,
vat_loss_value))
batch_iter += 1
if batch_iter % config.update_every == 0 or batch_iter == batch_num:
if config.clip_max_norm_use:
nn.utils.clip_grad_norm_(bert_model.parameters(), max_norm=config.clip)
nn.utils.clip_grad_norm_(domain_model.parameters(), max_norm=config.clip)
optimizer_bert.step()
domain_optimizer.step()
scheduler_bert.step()
domain_scheduler.step()
bert_model.zero_grad()
domain_model.zero_grad()
global_step += 1
cla_score += correct
domain_score += domain_correct
if batch_iter % config.test_interval == 0 or batch_iter == batch_num:
print("now bert lr is {}".format(optimizer_bert.param_groups[0].get("lr")), '\n')
dev_usual_score, weight = evaluate(bert_model, usual_dev_data_set, config, tag_vocab, domain_vocab, tokenizer)
if best_dev_usual < dev_usual_score:
print('the best usual_dev score is: acc:{}'.format(dev_usual_score))
best_dev_usual = dev_usual_score
decoder(bert_model, usual_eval_data, config, tag_vocab, tokenizer, domain_vocab, weight)
if os.path.exists(config.save_model_path):
torch.save(bert_model.state_dict(), config.usual_model_pkl)
else:
os.makedirs(config.save_model_path)
torch.save(bert_model.state_dict(), config.usual_model_pkl)
dev_virus_score, weight = evaluate(bert_model, virus_dev_data_set, config, tag_vocab, domain_vocab,
tokenizer, test=True)
if best_dev_virus < dev_virus_score:
print('the best virus_dev score is: acc:{}'.format(dev_virus_score) + '\n')
best_dev_virus = dev_virus_score
decoder(bert_model, virus_eval_data, config, tag_vocab, tokenizer, domain_vocab, weight, test=True)
if os.path.exists(config.save_model_path):
torch.save(bert_model.state_dict(), config.virus_model_pkl)
else:
os.makedirs(config.save_model_path)
torch.save(bert_model.state_dict(), config.virus_model_pkl)
epoch_time = float(time.time() - epoch_start_time)
tra_score = get_Macro_F1_score(gold_label, predict_ids, tag_vocab)
all_cla_score = 100.0 * cla_score / len(usual_tra_data_set)
all_domain_score = 100.0 * domain_score / len(usual_tra_data_set)
if tra_score > best_tra_f1:
best_tra_f1 = tra_score
print('the best_train F1 is:{:.2f}'.format(best_tra_f1))
if all_cla_score > best_cla_acc:
best_cla_acc = all_cla_score
print('the best_train cla_score is:{}({}/{})'.format(best_cla_acc, cla_score, len(usual_tra_data_set)))
if all_domain_score > best_domain_acc:
best_domain_acc = all_domain_score
print('the best_train domain_score is:{}({}/{})'.format(best_domain_acc, domain_score, len(usual_tra_data_set)))
print("epoch_time is:", epoch_time)
def evaluate(bert_model, dev_data, config, tag_vocab, domain_vocab, tokenizer, test=False):
bert_model.eval()
get_score = 0
start_time = time.time()
all_logit = []
gold_label = []
predict_ids = []
for word_batch in create_eval_batch(dev_data, tag_vocab, config.test_batch_size, config, tokenizer, domain_vocab):
input_tensor = word_batch[0]
target = word_batch[1]
gold_label.extend(target)
logits, _ = bert_model(input_tensor)
new_logits = torch.Tensor(logits.data.tolist())
all_logit.append(new_logits)
correct, predict_id, accuracy = class_loss(logits, target)
predict_ids.extend(predict_id)
get_score += correct
all_logit = torch.cat(all_logit, dim=0)
optimized_f1 = F1Optimized(all_logit, gold_label, tag_vocab, config)
optimized_f1.optimized()
weight = optimized_f1.res
new_f1 = optimized_f1.cau_f1()
if test:
dev_score = get_Macro_F1_score(gold_label, predict_ids, tag_vocab)
print('the current_test virus_score is: F1:{:.2f}'.format(dev_score))
print('the current_test virus_score is: New F1:{:.2f}'.format(new_f1))
else:
dev_score = get_Macro_F1_score(gold_label, predict_ids, tag_vocab)
print('the current_dev usual_score is: F1:{:.2f}'.format(dev_score))
print('the current_dev usual_score is: New F1:{:.2f}'.format(new_f1))
during_time = float(time.time() - start_time)
print('spent time is:{:.4f}'.format(during_time))
return new_f1, weight
def decoder(bert_model, eval_data, config, tag_vocab, tokenizer, domain_vocab, weight, test=False):
bert_model.eval()
data_ids = []
all_logit = []
for word_batch in create_eval_batch(eval_data, tag_vocab, config.test_batch_size, config, tokenizer, domain_vocab):
input_tensor = word_batch[0]
data_id = word_batch[3]
data_ids.extend(data_id)
logits, _ = bert_model(input_tensor)
new_logits = torch.Tensor(logits.data.tolist())
all_logit.append(new_logits)
if test:
path = config.save_virus_path
else:
path = config.save_usual_path
all_logit = torch.cat(all_logit, dim=0)
all_logit = all_logit.data.numpy()
new_logits = torch.from_numpy(all_logit * weight)
predict_ids = smp_eval(new_logits)
json_list = []
for index, predict_id in zip(data_ids, predict_ids):
submit_dic = {}
submit_dic["id"] = index[0]
submit_dic["label"] = tag_vocab.id2word(predict_id)
json_list.append(submit_dic)
json_list = sorted(json_list, key=lambda d: d['id'])
json_str = json.dumps(json_list)
with open(path, 'w', encoding='utf8') as f:
f.write(json_str)
print('Write over.')
|
[
"taoooo9@gmail.com"
] |
taoooo9@gmail.com
|
88a5a1067b423e15e1d15501ce064c6b96938b33
|
5ed3c397cd5ed7969eb4800f3fd24cc237f0ee4a
|
/Face Eye Nose Smile detection/face_eye_smile detection.py
|
39dc666c53ea2ea4dda468198b2eee6ad2c2cb68
|
[] |
no_license
|
Abheenav/Computer-Vision-projects
|
c8b0f4855583725282f42515f5642d4b1c1692d8
|
a9207e21dc61ff2c05a3270e8d200cc94a1c65c7
|
refs/heads/main
| 2023-01-28T01:56:29.023595
| 2020-12-11T15:06:16
| 2020-12-11T15:06:16
| 320,330,021
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,128
|
py
|
import cv2
import numpy as np
face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
eye_cascade = cv2.CascadeClassifier("haarcascade_eye.xml")
smile_cascade = cv2.CascadeClassifier('haarcascade_smile.xml')
cap = cv2.VideoCapture(0)
while True:
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectangle(img, (x,y), (x+w, y+h), (255,0,0), 2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color, (ex,ey), (ex+ey, ey+eh), (0,255,0), 2)
smiles = smile_cascade.detectMultiScale(roi_gray, 1.8, 20)
for (sx, sy, sw, sh) in smiles:
cv2.rectangle(roi_color, (sx, sy), ((sx + sw), (sy + sh)), (0, 0, 255), 2)
cv2.imshow('img', img)
k= cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release
cv2.destroyAllWindows()
|
[
"noreply@github.com"
] |
Abheenav.noreply@github.com
|
60607275cfa0745e5d06c72bd59b9cdb435ba236
|
8644a009e5d54f4fe61d2c69709550f6beb6af43
|
/templates/venv/bin/pip3
|
05651f1e028c606f522533377950a467dbd2245c
|
[] |
no_license
|
AmiAsmarino/RegWebApplication
|
3cbbbb783022ead604d8f4e6303a494f881c6b86
|
464cbc614b5a4788fbade15cd22dda2fac816832
|
refs/heads/master
| 2020-04-25T00:23:10.720808
| 2019-03-25T11:59:13
| 2019-03-25T11:59:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 271
|
#!/home/amelonbetty/Desktop/lecture0/Taxi-web-application/templates/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"aha176@post.uit.no"
] |
aha176@post.uit.no
|
|
25ecb94f9f0e5d019fb5f3cf19a40f75d0a454f4
|
4565e3096be7a0948419ff7c272d3bd80152f014
|
/contrib/bitrpc/bitrpc.py
|
d8db628ccefd2e605bd689861ffbe55b0f5b7250
|
[
"MIT"
] |
permissive
|
DIBBITSDEV/DIBBITS-src
|
1b8f47051ba12592fc0ebd5d97a16da467948927
|
852e38e40a79f5dacf52def4dda65951cb74d388
|
refs/heads/master
| 2020-12-24T16:43:03.797360
| 2015-06-17T00:06:19
| 2015-06-17T00:06:19
| 37,562,552
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,835
|
py
|
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:7332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:7332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Bitcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
[
"theDIBBITSdev@gmail.com"
] |
theDIBBITSdev@gmail.com
|
af35bbaaee6c3c655a02a6c2c1e4ad644e4079eb
|
a047e29d8ac746e6ff90ee2b46c7ae5ced01b091
|
/src/lightgbm_pred_fare.py
|
4c171d8ad48ab23921208927dc671c625e890771
|
[] |
no_license
|
xf37/nyc-taxi-fare
|
e637948e251bcfcbe1047c2704eff127158d6884
|
89d76acff34fecace6e82ecf2f71fb41c686d2a6
|
refs/heads/master
| 2020-04-26T16:15:35.740797
| 2019-05-16T23:20:59
| 2019-05-16T23:20:59
| 173,672,726
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,766
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pandas as pd
from sklearn.model_selection import train_test_split
import lightgbm as lgb
import gc
from sklearn.externals import joblib
###############################################################################
## set up parameter
###############################################################################
# file for saving processed dataset
df_train_file = ('~/taxi_fare_prediction/all/'
'processed_filtered_data/processed_train1_geoid.csv')
###############################################################################
## load dataset
###############################################################################
df_train = pd.read_csv(df_train_file)
df_train = df_train.drop(columns = ['pickup_datetime'])
###############################################################################
## fit model
###############################################################################
# get feature and label
Y = df_train['fare_amount']
X = df_train.drop(columns=['fare_amount'])
# split the data into train and test
X_train, X_test, Y_train, Y_test = train_test_split(X, Y,
random_state = 10,
test_size = 0.1)
# remove unused dataset
del df_train
del Y
del X
gc.collect()
# set up lgb parameter
params = {
'boosting_type':'gbdt',
'objective': 'regression',
'nthread': 4,
'num_leaves': 31,
'learning_rate': 0.05,
'max_depth': -1,
'subsample': 0.8,
'bagging_fraction' : 1,
'max_bin' : 5000 ,
'bagging_freq': 20,
'colsample_bytree': 0.6,
'metric': 'rmse',
'min_split_gain': 0.5,
'min_child_weight': 1,
'min_child_samples': 10,
'scale_pos_weight':1,
'zero_as_missing': True,
'seed':0,
'num_rounds':50000
}
# fit the model
def LGBMmodel(X_train,X_test,y_train,y_test,params):
matrix_train = lgb.Dataset(X_train, y_train)
matrix_test = lgb.Dataset(X_test, y_test)
model=lgb.train(params=params,
train_set=matrix_train,
num_boost_round=100000,
early_stopping_rounds=500,
verbose_eval=100,
valid_sets=matrix_test)
return model
# Train the model
model = LGBMmodel(X_train, X_test, Y_train, Y_test, params)
# remove unused dataset
del X_train
del Y_train
del X_test
del Y_test
gc.collect()
###############################################################################
## save result
###############################################################################
# Save to file in the current working directory
pkl_filename = ('~/taxi_fare_prediction/'
'simple_model/lightgbm_train1_geo.pkl')
joblib.dump(model, pkl_filename)
###############################################################################
## prediction
###############################################################################
# file for saving processed dataset
df_test_file = ('~/taxi_fare_prediction/all/'
'processed_filtered_data/processed_test1_geoid.csv')
df_test = pd.read_csv(df_test_file)
X_quiz = df_test.drop(columns=['key'])
# Predicte the 'fare_amount' and save file
prediction = model.predict(X_quiz, num_iteration = model.best_iteration)
submission = pd.DataFrame(
{'key': df_test.key, 'fare_amount': prediction},
columns = ['key', 'fare_amount']
)
submission.to_csv('~/taxi_fare_prediction/all/submission/'
'test_submission/submission_train1_geo.csv',
index = False
)
|
[
"noreply@github.com"
] |
xf37.noreply@github.com
|
7b6c76290c750c25b1d931dea7a3066b227643ae
|
801ee3ece7b42e5032f68a5a80b7d9145b1dce01
|
/Classification/DecisionTrees/treePlotter.py
|
eeff537f0e08575850ff199fefaee90f2cc76ace
|
[] |
no_license
|
lakshmimohandevt85/MachineLearningInAction
|
a19423dcf2b1fc9fd1b193623f0b3052445f56ab
|
f658c3d957a1207ae15b46ff36c09b6276bb35aa
|
refs/heads/master
| 2020-03-16T09:04:28.701206
| 2014-12-11T23:12:55
| 2014-12-11T23:12:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,412
|
py
|
import matplotlib.pyplot as plt
decisionNode = dict(boxstyle="sawtooth", fc="0.8")
leafNode = dict(boxstyle="round4", fc="0.8")
arrow_args = dict(arrowstyle="<-")
def plotNode(nodeText, centerPt, parentPt, nodeType):
createPlot.ax1.annotate(nodeText, xy=parentPt, xycoords='axes fraction',
xytext=centerPt, textcoords='axes fraction', va='center',
ha='center', bbox=nodeType, arrowprops=arrow_args)
def plotMidText(counterPt, parentPt, textString):
xMid = (parentPt[0] - counterPt[0]) / 2.0 + counterPt[0]
yMid = (parentPt[1] - counterPt[1]) / 2.0 + counterPt[1]
createPlot.ax1.text(xMid, yMid, textString)
def plotTree(myTree, parentPt, nodeText):
numLeaves = getNumLeaves(myTree)
treeDepth = getTreeDepth(myTree)
firstString = myTree.keys()[0]
counterPt = (plotTree.xOff + ((1.0 + float(numLeaves)) / 2.0) / plotTree.totalW, plotTree.yOff)
plotMidText(counterPt, parentPt, nodeText)
plotNode(firstString, counterPt, parentPt, decisionNode)
secondDict = myTree[firstString]
plotTree.yOff = plotTree.yOff - 1.0 / plotTree.totalD
for key in secondDict.keys():
if type(secondDict[key]).__name__ == 'dict':
plotTree(secondDict[key], counterPt, str(key))
else:
plotTree.xOff = plotTree.xOff + 1.0 / plotTree.totalW
plotNode(secondDict[key], (plotTree.xOff, plotTree.yOff), counterPt, leafNode)
plotMidText((plotTree.xOff, plotTree.yOff), counterPt, str(key))
plotTree.yOff = plotTree.yOff + 1.0 / plotTree.totalD
# def createPlot():
# fig=plt.figure(1,facecolor='white')
# fig.clf()
# createPlot.ax1=plt.subplot(111, frameon=False)
# plotNode(' a decision node',(0.5,0.1),(0.1,0.5), decisionNode)
# plotNode('a leaf node',(0.8,0.1),(0.3,0.8),leafNode)
# plt.show()
def getNumLeaves(myTree):
numLeaves = 0
firstStr = myTree.keys()[0]
secondDict = myTree[firstStr]
for key in secondDict.keys():
if type(secondDict[key]).__name__ == 'dict':
numLeaves += getNumLeaves(secondDict[key])
else:
numLeaves += 1
return numLeaves
def getTreeDepth(myTree):
maxDepth = 0
firstStr = myTree.keys()[0]
secondDict = myTree[firstStr]
for key in secondDict.keys():
if type(secondDict[key]).__name__ == 'dict':
thisDepth = 1 + getTreeDepth(secondDict[key])
else:
thisDepth = 1
if thisDepth > maxDepth:
maxDepth = thisDepth
return maxDepth
def retrieveTree(i):
listOfTrees = [{'no surfacing': {0: 'no', 1: {'flippers': \
{0: 'no', 1: 'yes'}}}},
{'no surfacing': {0: 'no', 1: {'flippers': \
{0: {'head': {0: 'no', 1: 'yes'}}, 1: 'no'}}}}
]
return listOfTrees[i]
def createPlot(inTree):
fig = plt.figure(1, facecolor='white')
fig.clf()
axprops = dict(xticks=[], yticks=[])
createPlot.ax1 = plt.subplot(111, frameon=False, **axprops)
plotTree.totalW = float(getNumLeaves(inTree))
plotTree.totalD = float(getTreeDepth(inTree))
plotTree.xOff = -0.5 / plotTree.totalW
plotTree.yOff = 1.0
plotTree(inTree, (0.5, 1.0), '')
plt.show()
if __name__ == "__main__":
myTree = retrieveTree(0)
createPlot(myTree)
|
[
"amine.benhalloum@gmail.com"
] |
amine.benhalloum@gmail.com
|
0db7fdafd65677bdcb62704e84103ac5f931f197
|
83de24182a7af33c43ee340b57755e73275149ae
|
/aliyun-python-sdk-dms-enterprise/aliyunsdkdms_enterprise/request/v20181101/DeleteProxyAccessRequest.py
|
50c17619b4d0a9effdb2c3f786c49b901fd3ef1d
|
[
"Apache-2.0"
] |
permissive
|
aliyun/aliyun-openapi-python-sdk
|
4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f
|
83fd547946fd6772cf26f338d9653f4316c81d3c
|
refs/heads/master
| 2023-08-04T12:32:57.028821
| 2023-08-04T06:00:29
| 2023-08-04T06:00:29
| 39,558,861
| 1,080
| 721
|
NOASSERTION
| 2023-09-14T08:51:06
| 2015-07-23T09:39:45
|
Python
|
UTF-8
|
Python
| false
| false
| 1,659
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdms_enterprise.endpoint import endpoint_data
class DeleteProxyAccessRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'dms-enterprise', '2018-11-01', 'DeleteProxyAccess','dms-enterprise')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ProxyAccessId(self): # Long
return self.get_query_params().get('ProxyAccessId')
def set_ProxyAccessId(self, ProxyAccessId): # Long
self.add_query_param('ProxyAccessId', ProxyAccessId)
def get_Tid(self): # Long
return self.get_query_params().get('Tid')
def set_Tid(self, Tid): # Long
self.add_query_param('Tid', Tid)
|
[
"sdk-team@alibabacloud.com"
] |
sdk-team@alibabacloud.com
|
9ea2a48ff14efbd8b2f62bffba30f9d5e9e98747
|
4f4846bf58fd1eeaa491326d699a8e07e471faed
|
/cart/migrations/0001_initial.py
|
157dc104032331dc839c3c435ba34d2a06fda0dc
|
[
"MIT"
] |
permissive
|
Code-Institute-Submissions/coachs-plan-1
|
f3d7c1ceccd8b1ce7c6d9a01bec576a748f45ff6
|
f522ed57436970cabf8007de3bd6cd9f2931e752
|
refs/heads/master
| 2023-01-15T10:15:30.409617
| 2020-11-22T18:43:43
| 2020-11-22T18:43:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,811
|
py
|
# Generated by Django 3.1.1 on 2020-10-25 11:38
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('plans', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='CartItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('plan', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='plans.plan')),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order_ref', models.UUIDField(default=uuid.uuid4, editable=False)),
('purchased', models.BooleanField(default=False)),
('items', models.ManyToManyField(to='cart.CartItem')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='CompletedOrder',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('payment_date', models.DateTimeField(auto_now_add=True)),
('payment_total', models.FloatField()),
('stripe_ref', models.CharField(max_length=150)),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cart.order')),
],
),
]
|
[
"ronanmcclelland@gmail.com"
] |
ronanmcclelland@gmail.com
|
02697ca8ee4df1a5ee8dcd1f41edb051ec9f8c45
|
9fcbc1abf7d512d61164f6807c2a3206a9e45aa9
|
/Wolptinger/utils.py
|
0eb78c3b6213d133aac5f9bdbb2bb79e7f7f49e6
|
[
"MIT"
] |
permissive
|
siriusctrl/Storm-auto-scheduler
|
2b862ec662c6284b8d6cbf49cdef9cc6239aa008
|
36fddf50e51b023d346e44b979e50c47f46fb177
|
refs/heads/main
| 2023-08-31T20:26:14.359968
| 2021-07-08T02:51:00
| 2021-07-08T02:51:00
| 359,676,451
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,536
|
py
|
import os
import torch
from torch.autograd import Variable
USE_CUDA = torch.cuda.is_available()
FLOAT = torch.cuda.FloatTensor if USE_CUDA else torch.FloatTensor
# colorful print functions
def prRed(prt): print(f"\033[91m {prt}\033[00m")
def prGreen(prt): print(f"\033[92m {prt}\033[00m")
def prYellow(prt): print(f"\033[93m {prt}\033[00m")
def prLightPurple(prt): print(f"\033[94m {prt}\033[00m")
def prPurple(prt): print(f"\033[95m {prt}\033[00m")
def prCyan(prt): print(f"\033[96m {prt}\033[00m")
def prLightGray(prt): print(f"\033[97m {prt}\033[00m")
def prBlack(prt): print(f"\033[98m {prt}\033[00m")
def to_numpy(var):
return var.cpu().data.numpy() if USE_CUDA else var.data.numpy()
def to_tensor(ndarray, volatile=False, requires_grad=False, dtype=FLOAT):
return Variable(
torch.from_numpy(ndarray), volatile=volatile, requires_grad=requires_grad
).type(dtype)
def soft_update(target, source, tau=0.05):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(
target_param.data * (1.0 - tau) + param.data * tau
)
def hard_update(target, source):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(param.data)
def get_output_folder(parent_dir, env_name):
"""Return save folder.
Assumes folders in the parent_dir have suffix -run{run
number}. Finds the highest run number and sets the output folder
to that number + 1. This is just convenient so that if you run the
same script multiple times tensorboard can plot all of the results
on the same plots with different names.
Parameters
----------
parent_dir: str
Path of the directory containing all experiment runs.
Returns
-------
parent_dir/run_dir
Path to this run's save directory.
"""
os.makedirs(parent_dir, exist_ok=True)
experiment_id = 0
for folder_name in os.listdir(parent_dir):
if not os.path.isdir(os.path.join(parent_dir, folder_name)):
continue
try:
folder_name = int(folder_name.split('-run')[-1])
if folder_name > experiment_id:
experiment_id = folder_name
except:
pass
experiment_id += 1
parent_dir = os.path.join(parent_dir, env_name)
parent_dir = parent_dir + '-run{}'.format(experiment_id)
os.makedirs(parent_dir, exist_ok=True)
return parent_dir
if __name__ == '__main__':
prLightPurple(FLOAT([1,2,3]))
|
[
"morryniu123@gmail.com"
] |
morryniu123@gmail.com
|
8fedd093e85de40992e82add29e5672a2ed72652
|
a63d907ad63ba6705420a6fb2788196d1bd3763c
|
/src/datamgr/datamanager/dmonitor/monitor/process_delay_alert.py
|
26e1cc4e376be900f097e0a1a7f157b7052bad35
|
[
"MIT"
] |
permissive
|
Tencent/bk-base
|
a38461072811667dc2880a13a5232004fe771a4b
|
6d483b4df67739b26cc8ecaa56c1d76ab46bd7a2
|
refs/heads/master
| 2022-07-30T04:24:53.370661
| 2022-04-02T10:30:55
| 2022-04-02T10:30:55
| 381,257,882
| 101
| 51
|
NOASSERTION
| 2022-04-02T10:30:56
| 2021-06-29T06:10:01
|
Python
|
UTF-8
|
Python
| false
| false
| 17,748
|
py
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import json
import logging
import time
import gevent
from gevent import monkey
from dmonitor.alert.alert_codes import AlertCode, AlertLevel, AlertStatus, AlertType
from dmonitor.base import BaseDmonitorTaskGreenlet
from dmonitor.metrics.base import DataRelativeDelay
from dmonitor.settings import DMONITOR_TOPICS
monkey.patch_all()
def process_delay_alert():
logging.info("Start to execute process delay monitor task")
task_configs = {
"consumer_configs": {
"type": "kafka",
"alias": "op",
"topic": "data_delay_metric",
"partition": False,
"group_id": "dmonitor_process_delay",
"batch_message_max_count": 100000,
"batch_message_timeout": 5,
},
"task_pool_size": 50,
}
try:
task = ProcessDelayAlertTaskGreenlet(configs=task_configs)
task.start()
task.join()
except Exception as e:
logging.error(
"Raise exception({error}) when init process delay alert task".format(
error=e
),
exc_info=True,
)
class ProcessDelayAlertTaskGreenlet(BaseDmonitorTaskGreenlet):
DETECT_INTERVAL = 60
PENDING_TIME = 60
CACHE_REFRESH_INTERVAL = 60
ALERT_CODE = AlertCode.PROCESS_TIME_DELAY.value
ALERT_MESSAGE = (
"{entity_display}持续{lasted_time_display}处理时间延迟超过{max_delay_time_display},"
"当前处理时间延迟时间为{cur_delay_time_display}"
)
ALERT_MESSAGE_EN = (
"Process Time about {entity_display_en} had been delayed more than {max_delay_time_display_en} and "
"lasted for {lasted_time_display_en}. The current process time delay is {cur_delay_time_display_en}"
)
ALERT_FULL_MESSAGE = (
"{entity_display}持续{lasted_time_display}处理时间延迟超过{max_delay_time_display}, "
"当前处理时间延迟时间为{cur_delay_time_display}"
)
ALERT_FULL_MESSAGE_EN = (
"Process Time about {entity_display_en} had been delayed more than {max_delay_time_display_en} "
"and lasted for {lasted_time_display_en}. The current process time delay is {cur_delay_time_display_en}"
)
def __init__(self, *args, **kwargs):
"""初始化生成延迟指标的任务
:param task_configs: 缓存同步任务配置
{
'consumer_configs': {
'type': 'kafka',
'alias': 'op',
'topic': 'bkdata_data_monitor_metrics591',
'partition': False,
'group_id': 'dmonitor',
'batch_message_max_count': 5000,
'batch_message_timeout': 0.1,
},
'task_pool_size': 100,
}
"""
configs = kwargs.pop("configs", {})
super(ProcessDelayAlertTaskGreenlet, self).__init__(*args, **kwargs)
self.init_consumer(configs.get("consumer_configs"))
self.init_task_pool(configs.get("task_pool_size"))
now = time.time()
self._metric_cache = {}
self._flow_infos = {}
self._alert_configs = []
self._last_detect_time = now + self.PENDING_TIME
self._cache_last_refresh_time = None
self.refresh_metadata_cache(now)
def refresh_metadata_cache(self, now):
"""刷新处理延迟监控依赖的元数据信息
:param now: 当前刷新缓存的时间
"""
if (
self._cache_last_refresh_time
and now - self._cache_last_refresh_time < self.CACHE_REFRESH_INTERVAL
):
return
gevent.joinall(
[
gevent.spawn(
self.refresh_metadata,
self._flow_infos,
self.fetch_flow_infos_from_redis,
update=False,
),
gevent.spawn(
self.refresh_metadata,
self._alert_configs,
self.fetch_alert_configs,
update=False,
),
]
)
self.generate_metrics_slots(now)
if self._cache_last_refresh_time:
self.clear_metrics_slots(int(float(now - self._cache_last_refresh_time)))
self._cache_last_refresh_time = now
def generate_metrics_slots(self, now):
for alert_config in self._alert_configs:
for target in alert_config.get("monitor_target", []):
flow_id, node_id = self.get_flow_node_by_target(target)
if (not self.check_alert_config_valid(alert_config)) or (
not self.check_flow_valid(flow_id)
):
self.remove_alert_config_by_flow_id(flow_id)
continue
# 生成flow的指标槽位
if flow_id not in self._metric_cache:
self._metric_cache[flow_id] = {
"alert_configs": {},
"nodes": {},
}
if node_id is None:
self.add_alert_config(
self._metric_cache[flow_id]["alert_configs"],
target,
alert_config,
)
else:
if node_id not in self._metric_cache[flow_id]["nodes"]:
self._metric_cache[flow_id]["nodes"][node_id] = {
"alert_configs": {},
}
self.add_alert_config(
self._metric_cache[flow_id]["nodes"][node_id]["alert_configs"],
target,
alert_config,
)
def clear_metrics_slots(self, recent_updated):
disabled_alert_configs = self.fetch_disabled_alert_configs(
recent_updated=recent_updated
)
for alert_config in disabled_alert_configs:
for target in alert_config.get("monitor_target", []):
flow_id, node_id = self.get_flow_node_by_target(target)
if not alert_config.get("active"):
self.remove_alert_config_by_flow_id(flow_id)
def check_alert_config_valid(self, alert_config):
if self.ALERT_CODE not in alert_config["monitor_config"]:
return False
if (
alert_config["monitor_config"][self.ALERT_CODE].get("monitor_status", "off")
== "off"
):
return False
return True
def check_flow_valid(self, flow_id):
if not flow_id or str(flow_id) not in self._flow_infos:
return False
flow_info = self._flow_infos[str(flow_id)]
# 如果flow不在运行中,则删除该flow的指标缓存
if (
flow_info.get("flow_type") == "dataflow"
and flow_info.get("status") != "running"
):
return False
return True
def remove_alert_config_by_flow_id(self, flow_id):
if flow_id in self._metric_cache:
del self._metric_cache[flow_id]
def add_alert_config(self, alert_configs, target, alert_config):
alert_config_id = alert_config.get("id")
if alert_config_id in alert_configs:
if not self.same_alert_config(
alert_configs[alert_config_id]["config"], alert_config
):
alert_configs[alert_config_id] = {
"metrics": {},
"target": target,
"config": alert_config,
}
else:
alert_configs[alert_config_id] = {
"metrics": {},
"target": target,
"config": alert_config,
}
def same_alert_config(self, alert_config, other_alert_config):
process_delay_config = alert_config["monitor_config"].get(self.ALERT_CODE, {})
other_process_delay_config = other_alert_config["monitor_config"].get(
self.ALERT_CODE, {}
)
for key in process_delay_config.keys():
if process_delay_config[key] != other_process_delay_config[key]:
return False
return True
def handle_monitor_value(self, message, now):
"""处理各个模块上报的任务埋点
:param message: 延迟原始指标
{
"time": 1542960360.000001,
"database": "monitor_data_metrics",
"data_delay_max": {
"waiting_time": 1542960360,
"data_time": 1542960360,
"delay_time": 1542960360,
"output_time": 1542960360,
"tags": {
"module": "stream",
"component": "flink",
"cluster": null,
"storage": "channel_11",
"logical_tag": "591_test1119str",
"physical_tag": "171_1fe25fadfef54a4899d781fc9d1e55d3|591_test1119str|0"
}
}
}
:param now: 当前处理数据的时间
"""
try:
if "data_relative_delay" in message:
metric = DataRelativeDelay.from_message(message)
flow_id = metric.get_tag("flow_id")
node_id = metric.get_tag("node_id")
storage = metric.get_tag("storage")
if not storage or storage == "None":
return
logical_key = self.gen_logical_key(metric.tags)
self.monitor_metric(flow_id, node_id, logical_key, metric, now)
except Exception as e:
logging.error(
"Combine data error: {}, message: {}".format(e, json.dumps(message)),
exc_info=True,
)
def monitor_metric(self, flow_id, node_id, logical_key, metric, now):
if flow_id not in self._metric_cache:
return
for alert_config_item in self._metric_cache[flow_id]["alert_configs"].values():
history_metrics = alert_config_item.get("metrics", {})
if node_id not in history_metrics:
history_metrics[node_id] = {}
self.monitor_by_alert_config(
alert_config_item,
history_metrics[node_id],
metric,
flow_id,
node_id,
logical_key,
now,
)
if node_id not in self._metric_cache[flow_id]["nodes"]:
return
for alert_config_item in self._metric_cache[flow_id]["nodes"][node_id][
"alert_configs"
].values():
history_metrics = alert_config_item.get("metrics", {})
self.monitor_by_alert_config(
alert_config_item,
history_metrics,
metric,
flow_id,
node_id,
logical_key,
now,
)
def monitor_by_alert_config(
self,
alert_config_item,
history_metrics,
metric,
flow_id,
node_id,
logical_key,
now,
):
alert_config = alert_config_item.get("config", {})
target = alert_config_item.get("target", {})
max_delay_time = alert_config["monitor_config"][self.ALERT_CODE].get(
"delay_time", 300
)
max_lasted_time = alert_config["monitor_config"][self.ALERT_CODE].get(
"lasted_time", 600
)
if logical_key not in history_metrics:
history_metrics[logical_key] = metric
if metric.get_metric("relative_delay") > max_delay_time:
lasted_time = now - history_metrics[logical_key].timestamp
if lasted_time > max_lasted_time:
self.generate_alert(
alert_config,
target,
flow_id,
node_id,
max_delay_time,
lasted_time,
metric,
now,
)
else:
del history_metrics[logical_key]
def generate_alert(
self,
alert_config,
target,
flow_id,
node_id,
delay_time,
lasted_time,
metric,
now,
):
flow_info = self._flow_infos.get(str(flow_id), {})
max_delay_time_display, max_delay_time_display_en = self.convert_display_time(
delay_time
)
lasted_time_display, lasted_time_display_en = self.convert_display_time(
lasted_time
)
cur_delay_time_display, cur_delay_time_display_en = self.convert_display_time(
metric.get_metric("relative_delay"), precision="second"
)
logical_tag = str(metric.get_tag("logical_tag"))
entity_display, entity_display_en = self.get_logical_tag_display(
logical_tag, metric.tags, flow_info
)
message = self.ALERT_MESSAGE.format(
entity_display=entity_display,
max_delay_time_display=max_delay_time_display,
lasted_time_display=lasted_time_display,
cur_delay_time_display=cur_delay_time_display,
)
message_en = self.ALERT_MESSAGE_EN.format(
entity_display_en=entity_display_en,
max_delay_time_display_en=max_delay_time_display_en,
lasted_time_display_en=lasted_time_display_en,
cur_delay_time_display_en=cur_delay_time_display_en,
)
full_message = self.ALERT_FULL_MESSAGE.format(
entity_display=entity_display,
logical_tag=logical_tag,
max_delay_time_display=max_delay_time_display,
lasted_time_display=lasted_time_display,
cur_delay_time_display=cur_delay_time_display,
)
full_message_en = self.ALERT_FULL_MESSAGE_EN.format(
entity_display_en=entity_display_en,
logical_tag=logical_tag,
max_delay_time_display_en=max_delay_time_display_en,
lasted_time_display_en=lasted_time_display_en,
cur_delay_time_display_en=cur_delay_time_display_en,
)
alert_info = {
"time": now,
"database": "monitor_data_metrics",
"dmonitor_alerts": {
"message": message,
"message_en": message_en,
"full_message": full_message,
"full_message_en": full_message_en,
"alert_status": AlertStatus.INIT.value,
"tags": {
"alert_level": AlertLevel.WARNING.value,
"alert_code": AlertCode.PROCESS_TIME_DELAY.value,
"alert_type": AlertType.DATA_MONITOR.value,
"flow_id": flow_id,
"node_id": node_id,
"alert_config_id": alert_config.get("id"),
"data_set_id": logical_tag,
"generate_type": alert_config.get("generate_type"),
},
},
}
if target.get("target_type") == "dataflow":
if flow_info:
alert_info["dmonitor_alerts"]["tags"].update(
{
"project_id": flow_info.get("project_id"),
"bk_app_code": flow_info.get("bk_app_code"),
}
)
alert_info["dmonitor_alerts"]["tags"].update(metric.tags)
elif target.get("target_type") == "rawdata":
if flow_info:
alert_info["dmonitor_alerts"]["tags"].update(
{
"bk_biz_id": flow_info.get("bk_biz_id"),
"bk_app_code": flow_info.get("bk_app_code"),
"raw_data_id": flow_info.get("id"),
}
)
alert_info["dmonitor_alerts"]["tags"].update(metric.tags)
alert_message = json.dumps(alert_info)
self.produce_metric(DMONITOR_TOPICS["dmonitor_alerts"], alert_message)
self.produce_metric(DMONITOR_TOPICS["data_cleaning"], alert_message)
|
[
"terrencehan@tencent.com"
] |
terrencehan@tencent.com
|
867fded854c844c11bfafb50702d85bc8070ece4
|
e6e2172f39ecbeca5bdf599887cf3bc95479c029
|
/model.py
|
fcee46a3b27066f38cfcc230726b44c82711911a
|
[] |
no_license
|
oscarsuen/vae
|
b78bb12aa523d84e6e9942027f2fb8c9376453bf
|
4606c3be010324d2b98e454b88e86bfad94af07c
|
refs/heads/master
| 2022-11-09T18:44:33.795240
| 2020-07-01T04:41:37
| 2020-07-01T04:41:37
| 276,277,856
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,862
|
py
|
import tensorflow as tf
import tensorflow_datasets as tfds
import tensorflow_probability as tfp
tfk = tf.keras
tfkl = tfk.layers
tfpl = tfp.layers
tfd = tfp.distributions
tf.enable_eager_execution()
mnist, mnist_info = tfds.load(name='mnist', with_info=True, as_supervised=False)
def _preprocess(sample):
image = tf.cast(sample['image'], tf.float32) / 255.
# image = image < tf.random.uniform(tf.shape(image))
image = image < 0.5
return image, image
data_train = mnist['train'] \
.map(_preprocess) \
.batch(256) \
.prefetch(tf.data.experimental.AUTOTUNE) \
.shuffle(int(1e4))
data_eval = mnist['test'] \
.map(_preprocess) \
.batch(256) \
.prefetch(tf.data.experimental.AUTOTUNE)
input_shape = mnist_info.features['image'].shape
encoded_size = 16
base_depth = 32
prior = tfd.Independent(tfd.Normal(loc=tf.zeros(encoded_size), scale=1), reinterpreted_batch_ndims=1)
encoder = tfk.Sequential([
tfkl.InputLayer(input_shape=input_shape),
tfkl.Lambda(lambda x: tf.cast(x, tf.float32) - 0.5),
tfkl.Conv2D(1*base_depth, 5, strides=1, padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2D(1*base_depth, 5, strides=2, padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2D(2*base_depth, 5, strides=1, padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2D(2*base_depth, 5, strides=2, padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2D(4*base_depth, 7, strides=1, padding='valid', activation=tf.nn.leaky_relu),
tfkl.Flatten(),
tfkl.Dense(tfpl.MultivariateNormalTriL.params_size(encoded_size), activation=None),
tfpl.MultivariateNormalTriL(encoded_size, activity_regularizer=tfpl.KLDivergenceRegularizer(prior)),
])
decoder = tfk.Sequential([
tfkl.InputLayer(input_shape=[encoded_size]),
tfkl.Reshape([1, 1, encoded_size]),
tfkl.Conv2DTranspose(2*base_depth, 7, strides=1, padding='valid', activation=tf.nn.leaky_relu),
tfkl.Conv2DTranspose(2*base_depth, 5, strides=1, padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2DTranspose(2*base_depth, 5, strides=2, padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2DTranspose(1*base_depth, 5, strides=1, padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2DTranspose(1*base_depth, 5, strides=2, padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2DTranspose(1*base_depth, 5, strides=1, padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2D(filters=1, kernel_size=5, strides=1, padding='same', activation=None),
tfkl.Flatten(),
tfpl.IndependentBernoulli(input_shape, tfd.Bernoulli.logits),
])
vae = tfk.Model(inputs=encoder.inputs, outputs=decoder(encoder.outputs[0]))
vae.compile(optimizer=tfk.optimizers.Adam(learning_rate=1e-3), loss=lambda x, rv_x: -rv_x.log_prob(x))
|
[
"suen.oscar@gmail.com"
] |
suen.oscar@gmail.com
|
24ac4bd5c923e9550e5ca2d112098516d7300877
|
90ccca2223ce2c17e9257495a3cd1fdc9bf2ddae
|
/blog/itblog/aic/urls.py
|
500123864531af51cedc00711e76325632a32796
|
[] |
no_license
|
iTenis/itennis_webpy
|
9a3688b36030a3922062b85fd4df477c15f17490
|
d17a6bf8ecfe70091d62a0c6f3ae45a6da51bf6a
|
refs/heads/master
| 2020-03-25T21:12:09.739526
| 2018-08-09T16:00:05
| 2018-08-09T16:00:05
| 144,164,637
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 149
|
py
|
from django.conf.urls import url
from . import views
from django.views.generic.base import TemplateView
urlpatterns=[
url(r'^$', views.Index),
]
|
[
"itennishy@qq.com"
] |
itennishy@qq.com
|
2e9af2e99de997feb66abeffd26bd07c3bf4add5
|
0d9e8a57a523e145ccd2deb9413bfd6c3ea22232
|
/compare-skan-fiji1.py
|
600b2672e56c9b4a785c61eb51858a9ef9396178
|
[
"BSD-3-Clause"
] |
permissive
|
jni/useful-histories
|
8e9cb3b6d026ebbe3e88a266c113dbe9bb675ca7
|
6a3e5a4bf831d97ffbb5b067d80f64ff6625306d
|
refs/heads/main
| 2023-04-08T12:34:28.923087
| 2023-04-04T10:21:05
| 2023-04-04T10:21:05
| 19,419,904
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 705
|
py
|
# IPython log file
from skimage import io
spacing = [3.033534 * 3, 3.033534, 3.033534]
skel1 = io.imread('OP_1_Rendered_Paths_thinned.tif')
from skan import csr
df = csr.summarise(skel1.astype(bool), spacing=spacing)
df2 = pd.read_excel('OP_1-Branch-information.xlsx')
bins = np.histogram(np.concatenate((df['branch-distance'],
df2['Branch length'])),
bins=35)[1]
plt.hist(df['branch-distance'], bins=bins, label='skan');
plt.hist(df2['Branch length'], bins=bins, label='Fiji', alpha=0.3);
plt.legend()
plt.xlabel('Branch length (µm)')
plt.ylabel('Count')
plt.title('OP1 Branch lengths')
plt.savefig('OP1 Branch lengths using skan and Fiji.png')
|
[
"juan.n@unimelb.edu.au"
] |
juan.n@unimelb.edu.au
|
0e30fca07b05c6b52273e61a01c5e28d0c63bcca
|
02a9f866dcea377c25b1894628f203700ea9909f
|
/p02_bulls_cows.py
|
2e048e67e21077af47332936dcc57cc8f01eb175
|
[] |
no_license
|
humpomar/pa_proj2
|
39bc66dfa153fe70526f9a6784b5f1039018df05
|
8b5aaf3c7a85de76e3226d5458d0e98f40f33e37
|
refs/heads/master
| 2022-11-26T15:13:01.647726
| 2020-07-31T19:44:21
| 2020-07-31T19:44:21
| 275,661,442
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,715
|
py
|
from random import randint
from time import time
line = "*" * 70
def main() -> None:
intro()
number_of_digits = how_many_digits()
total_rounds, total_points, total_time, total_attempts = 0, 0, 0, 0
active = True
while active:
round_points, round_time, round_attempts = game_round(number_of_digits)
total_rounds += 1
total_points += round_points
total_time += round_time
total_attempts += round_attempts
active = another_round()
game_statistics(total_points, total_time, total_attempts, total_rounds)
def intro() -> None:
"""Welcomes user and prints the rules"""
print(line)
print("Welcome to Bulls and Cows!".upper())
print("Try to guess a random number I'm thinking of!")
print(line)
print("RULES:")
print("If you guess the right digit and its position, you get 1 bull.")
print("If you guess only the digit but not its position, you get 1 cow.")
def how_many_digits() -> int():
"""Asks user how long should be in the secret number"""
print(line)
print("How long numbers would you like to guess?")
dig = 0
while dig not in range(1, 10):
try:
dig = int(input("Enter a number of digits (1-9): "))
except ValueError:
print("Invalid input! Please enter only numbers.")
return dig
def game_round(num_of_digits: int):
"""Performs 1 round - user is guessing 1 random number"""
secret_number = generate_number(num_of_digits)
# print(f"Secret number: {secret_number}")
attempts = 0
start_time = time()
while True:
give_up, user_tip = get_user_tip(num_of_digits)
if give_up:
r_time = time() - start_time
points = 0
print_looser_info(attempts, r_time, secret_number)
break
attempts += 1
if user_tip == secret_number:
r_time = time() - start_time
points = 1
print_winner_info(attempts, r_time)
break
else:
check_bulls_cows(user_tip, secret_number)
return points, r_time, attempts
def generate_number(num_of_digits: int) -> str():
"""Returns a random number with specified number of digits"""
list_of_digits = []
while len(list_of_digits) < num_of_digits:
new_digit = str(randint(0, 9))
if new_digit not in list_of_digits:
list_of_digits.append(new_digit)
number = ''.join(list_of_digits)
print(line)
print(f"I'm thinking of a secret {num_of_digits}-digit number...")
return number
def get_user_tip(num_of_digits: int):
"""Asks user for a tip with a correct number of digits"""
while True:
tip = (input("\tEnter a number (or 'gu' to give up): "))
if tip.lower() == 'gu':
return True, None
elif tip.isdigit() and (len(tip) == num_of_digits):
return False, tip
else:
print(f"\t\tPlease enter a NUMBER with {num_of_digits} digits!")
def check_bulls_cows(tip: str, number: str) -> None:
"""Checks user's tip and prints number of bulls and cows"""
bulls = 0
cows = 0
for digit in tip:
if digit in number and (tip.index(digit) == number.index(digit)):
bulls += 1
elif digit in number:
cows += 1
if bulls == 1:
print(f"\t\tYou have got {bulls} bull ", end='')
else:
print(f"\t\tYou have got {bulls} bulls ", end='')
if cows == 1:
print(f"and {cows} cow. Go on!")
else:
print(f"and {cows} cows. Go on!")
def print_looser_info(at: int, t: float, num: str) -> None:
"""Prints looser info, correct number and round statistics"""
print(line)
print(f"SORRY! The secret number was {num}!")
print(f"\tAttempts: {at}")
print(f"\tTime: {int(t)} seconds")
def print_winner_info(at: int, t: float) -> None:
"""Prints winner info and round statistics"""
print(line)
print("CORRECT! You have guessed the right number!")
print(f"\tAttempts: {at}")
print(f"\tTime: {int(t)} seconds")
def another_round() -> bool():
"""Asks user if he wants to play another round and returns True/False"""
answer = (input("Would you like to continue with another number? (Yes/No): "))
return answer.lower() == 'yes'
def game_statistics(t_points: int, t_time: float, t_attempts: int, t_rounds: int) -> None:
"""Prints statistics for all rounds"""
print(line)
print("FINAL STATISTICS")
print(f"\tYou have guessed {t_points} numbers of {t_rounds}!")
print(f"\tTotal number of attempts: {t_attempts}")
print(f"\tTotal time: {int(t_time/60)} min {int(t_time%60)} s")
print(line)
main()
|
[
"humpomar@gmail.com"
] |
humpomar@gmail.com
|
0d1bdfd550423442380a1ddcf9fc6a915ee42eb2
|
9b20743ec6cd28d749a4323dcbadb1a0cffb281b
|
/12_Xgboost_with_Python/02/horse_colic_missing_imputer.py
|
96a95a5b6d0d200d1203178ab266141871a71bb4
|
[] |
no_license
|
jggrimesdc-zz/MachineLearningExercises
|
6e1c7e1f95399e69bba95cdfe17c4f8d8c90d178
|
ee265f1c6029c91daff172b3e7c1a96177646bc5
|
refs/heads/master
| 2023-03-07T19:30:26.691659
| 2021-02-19T08:00:49
| 2021-02-19T08:00:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,306
|
py
|
# binary classification, missing data, impute with mean
import numpy
from pandas import read_csv
from sklearn.impute import SimpleImputer
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from xgboost import XGBClassifier
# load data
dataframe = read_csv("horse-colic.csv", delim_whitespace=True, header=None)
dataset = dataframe.values
# split data into X and y
X = dataset[:, 0:27]
Y = dataset[:, 27]
# set missing values to NaN
X[X == '?'] = numpy.nan
# convert to numeric
X = X.astype('float32')
# impute missing values as the mean
imputer = SimpleImputer()
imputed_x = imputer.fit_transform(X)
# encode Y class values as integers
label_encoder = LabelEncoder()
label_encoder = label_encoder.fit(Y)
label_encoded_y = label_encoder.transform(Y)
# split data into train and test sets
seed = 7
test_size = 0.33
X_train, X_test, y_train, y_test = train_test_split(imputed_x, label_encoded_y, test_size=test_size, random_state=seed)
# fit model on training data
model = XGBClassifier()
model.fit(X_train, y_train)
print(model)
# make predictions for test data
predictions = model.predict(X_test)
# evaluate predictions
accuracy = accuracy_score(y_test, predictions)
print("Accuracy: %.2f%%" % (accuracy * 100.0))
|
[
"jgrimes@jgrimes.tech"
] |
jgrimes@jgrimes.tech
|
8e5fb965f10bfc86c9000fb0abbaa39ad1b9528d
|
b9b83b870b00be690cd69521d1eddc74630301af
|
/mf6/test048_lgr3d_unconfB/postprocess.py
|
f97b3a3f0fb37d0d9a0a0d01474a818c5aaad3ec
|
[] |
no_license
|
dsimonMMA/modflow6-testmodels
|
758d213d1e482a49789bb757623eed65cee66526
|
88ebaa9c0c983e91eb5f327e2f3b7729e20ee425
|
refs/heads/master
| 2023-05-31T20:28:34.079344
| 2021-06-24T21:34:13
| 2021-06-24T21:34:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,926
|
py
|
import os
import sys
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
matplotlib.rcParams['figure.figsize'] = (20.0, 16.0)
import flopy
import flopy.utils.binaryfile as bf
# mf6
pth = os.path.join('.')
# mf6 parent
hnoflo = -999.
headfile = os.path.join(pth, 'parent.output.hds')
headobj = bf.HeadFile(headfile, precision='double')
phead6 = headobj.get_data(kstpkper=(0, 0))
phead6 = np.ma.masked_where(phead6 == hnoflo, phead6)
gmin = phead6.min()
gmax = phead6.max()
nlayp, nrowp, ncolp = phead6.shape
print('Parent nlay, nrow, ncol: ({}, {}, {})'.format(nlayp, nrowp, ncolp))
# mf6 child
hnoflo = -333.
headfile = os.path.join(pth, 'child.output.hds')
headobj = bf.HeadFile(headfile, precision='double')
chead6 = headobj.get_data(kstpkper=(0, 0))
chead6 = np.ma.masked_where(chead6 == hnoflo, chead6)
gmin = min(gmin, phead6.min())
gmax = max(gmax, phead6.max())
nlayc, nrowc, ncolc = chead6.shape
print('Child nlay, nrow, ncol: ({}, {}, {})'.format(nlayc, nrowc, ncolc))
# mflgr
pth = os.path.join('.', 'mflgr')
# mf6 parent
hnoflo = -999.
headfile = os.path.join(pth, 'testLgr_3d_parent.hed')
headobj = bf.HeadFile(headfile, precision='double')
pheadlgr = headobj.get_data(kstpkper=(0, 0))
pheadlgr = np.ma.masked_where(pheadlgr == hnoflo, pheadlgr)
gmin = min(gmin, pheadlgr.min())
gmax = max(gmax, pheadlgr.max())
# mf6 child
hnoflo = -333.
headfile = os.path.join(pth, 'testLgr_3d_child.hed')
headobj = bf.HeadFile(headfile, precision='double')
cheadlgr = headobj.get_data(kstpkper=(0, 0))
cheadlgr = np.ma.masked_where(cheadlgr == hnoflo, cheadlgr)
gmin = min(gmin, cheadlgr.min())
gmax = max(gmax, cheadlgr.max())
print('mf6 parent head min/max:', phead6.min(), phead6.max())
print('mf6 child head min/max:', chead6.min(), chead6.max())
print('mflgr parent head min/max:', pheadlgr.min(), pheadlgr.max())
print('mflgr child head min/max:', cheadlgr.min(), cheadlgr.max())
pextent = (0, ncolp * 1500., 0, nrowp * 1200.)
cextent = (2 * 1500., 4 * 1500., 3 * 1200., 5 * 1200.)
print('pextent', pextent)
print('cextent', cextent)
iplot = 1
ilay = 0
#mf5 heads
plt.subplot(3, 3, iplot, aspect='equal')
plt.imshow(phead6[ilay, :, :], interpolation='nearest', extent=pextent, vmin=gmin, vmax=gmax)
plt.imshow(chead6[ilay, :, :], interpolation='nearest', extent=cextent, vmin=gmin, vmax=gmax)
plt.gca().set_xlim(pextent[0], pextent[1])
plt.gca().set_ylim(pextent[2], pextent[3])
plt.colorbar(shrink=0.5)
plt.title('MF 6 Layer {}'.format(ilay + 1))
iplot += 1
#mflgr heads
plt.subplot(3, 3, iplot, aspect='equal')
plt.imshow(pheadlgr[ilay, :, :], interpolation='nearest', extent=pextent, vmin=gmin, vmax=gmax)
plt.imshow(cheadlgr[ilay, :, :], interpolation='nearest', extent=cextent, vmin=gmin, vmax=gmax)
plt.gca().set_xlim(pextent[0], pextent[1])
plt.gca().set_ylim(pextent[2], pextent[3])
plt.colorbar(shrink=0.5)
plt.title('MFLGR Layer {}'.format(ilay + 1))
iplot += 1
#difference heads
plt.subplot(3, 3, iplot, aspect='equal')
plt.imshow(phead6[ilay, :, :] - pheadlgr[ilay, :, :], interpolation='nearest', extent=pextent)
plt.imshow(chead6[ilay, :, :] - cheadlgr[ilay, :, :], interpolation='nearest', extent=cextent)
plt.gca().set_xlim(pextent[0], pextent[1])
plt.gca().set_ylim(pextent[2], pextent[3])
plt.colorbar(shrink=0.5)
plt.title('MF 6 - MFLGR Layer {}'.format(ilay + 1))
iplot += 1
ilay = 1
#mf5 heads
plt.subplot(3, 3, iplot, aspect='equal')
plt.imshow(phead6[ilay, :, :], interpolation='nearest', extent=pextent, vmin=gmin, vmax=gmax)
plt.imshow(chead6[ilay, :, :], interpolation='nearest', extent=cextent, vmin=gmin, vmax=gmax)
plt.gca().set_xlim(pextent[0], pextent[1])
plt.gca().set_ylim(pextent[2], pextent[3])
plt.colorbar(shrink=0.5)
plt.title('MF 6 Layer {}'.format(ilay + 1))
iplot += 1
#mflgr heads
plt.subplot(3, 3, iplot, aspect='equal')
plt.imshow(pheadlgr[ilay, :, :], interpolation='nearest', extent=pextent, vmin=gmin, vmax=gmax)
plt.imshow(cheadlgr[4, :, :], interpolation='nearest', extent=cextent, vmin=gmin, vmax=gmax)
plt.gca().set_xlim(pextent[0], pextent[1])
plt.gca().set_ylim(pextent[2], pextent[3])
plt.colorbar(shrink=0.5)
plt.title('MFLGR Layer {}'.format(ilay + 1))
iplot += 1
#difference heads
plt.subplot(3, 3, iplot, aspect='equal')
plt.imshow(phead6[ilay, :, :] - pheadlgr[ilay, :, :], interpolation='nearest', extent=pextent)
plt.imshow(chead6[4, :, :] - cheadlgr[4, :, :], interpolation='nearest', extent=cextent)
plt.gca().set_xlim(pextent[0], pextent[1])
plt.gca().set_ylim(pextent[2], pextent[3])
plt.colorbar(shrink=0.5)
plt.title('MF 6 - MFLGR Layer {}'.format(ilay + 1))
iplot += 1
ilay = 2
#mf5 heads
plt.subplot(3, 3, iplot, aspect='equal')
plt.imshow(phead6[ilay, :, :], interpolation='nearest', extent=pextent, vmin=gmin, vmax=gmax)
#plt.imshow(chead6[ilay, :, :], interpolation='nearest', extent=cextent, vmin=gmin, vmax=gmax)
plt.gca().set_xlim(pextent[0], pextent[1])
plt.gca().set_ylim(pextent[2], pextent[3])
plt.colorbar(shrink=0.5)
plt.title('MF 6 Layer {}'.format(ilay + 1))
iplot += 1
#mflgr heads
plt.subplot(3, 3, iplot, aspect='equal')
plt.imshow(pheadlgr[ilay, :, :], interpolation='nearest', extent=pextent, vmin=gmin, vmax=gmax)
#plt.imshow(cheadlgr[ilay, :, :], interpolation='nearest', extent=cextent, vmin=gmin, vmax=gmax)
plt.gca().set_xlim(pextent[0], pextent[1])
plt.gca().set_ylim(pextent[2], pextent[3])
plt.colorbar(shrink=0.5)
plt.title('MFLGR Layer {}'.format(ilay + 1))
iplot += 1
#difference heads
plt.subplot(3, 3, iplot, aspect='equal')
plt.imshow(phead6[ilay, :, :] - pheadlgr[ilay, :, :], interpolation='nearest', extent=pextent)
#plt.imshow(chead6[ilay, :, :] - cheadlgr[ilay, :, :], interpolation='nearest', extent=cextent)
plt.gca().set_xlim(pextent[0], pextent[1])
plt.gca().set_ylim(pextent[2], pextent[3])
plt.colorbar(shrink=0.5)
plt.title('MF 6 - MFLGR Layer {}'.format(ilay + 1))
iplot += 1
fname = 'postprocess.png'
print('saving results to {}'.format(fname))
plt.savefig(fname)
|
[
"jdhughes@usgs.gov"
] |
jdhughes@usgs.gov
|
900d504e9740a80146b6859ae0bc27cfed4f315a
|
388a83be90c835d1d0e1808c94109bcf89a8a384
|
/08_Stability/generate_hale_stability.py
|
9994f0e579638bfc44d694bfae978ad022374af4
|
[] |
no_license
|
ngoiz/hale-mpc
|
1d67e43c1a4222d058a9c4c5f5a21bbd346eb986
|
6aaab3d4c47faffce89de1d5234fdd8cc78df6ff
|
refs/heads/master
| 2023-01-09T13:51:41.618055
| 2020-11-07T16:33:22
| 2020-11-07T16:33:22
| 281,190,117
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 42,262
|
py
|
#! /usr/bin/env python3
import h5py as h5
import sharpy.sharpy_main
import numpy as np
import os
import sharpy.utils.algebra as algebra
# alpha_deg = 0
for alpha_deg in [4.]:
case_name = 'simple_HALE_uvlm_alpha{:04g}'.format(alpha_deg * 100)
route = os.path.dirname(os.path.realpath(__file__)) + '/'
# EXECUTION
flow = ['BeamLoader',
'AerogridLoader',
# 'NonLinearStatic',
'StaticUvlm',
# 'StaticTrim',
# 'StaticCoupled',
# 'BeamLoads',
'AerogridPlot',
'BeamPlot',
# 'DynamicCoupled',
'Modal',
'LinearAssembler',
'AsymptoticStability',
# 'SaveData',
# 'LinDynamicSim',
]
# if free_flight is False, the motion of the centre of the wing is prescribed.
free_flight = True
if not free_flight:
case_name += '_prescribed'
amplitude = 0*np.pi/180
period = 3
case_name += '_amp_' + str(amplitude).replace('.', '') + '_period_' + str(period)
lumped_mass_factor = 1
case_name += '_lm{:g}'.format(lumped_mass_factor)
## ROM
rom = True
# linear settings
num_modes = 20
case_name += '_rom{:g}_nmodes{:g}'.format(rom, num_modes)
# FLIGHT CONDITIONS
# the simulation is set such that the aircraft flies at a u_inf velocity while
# the air is calm.
u_inf = 10
rho = 1.225
# trim sigma = 1.5
alpha = alpha_deg * np.pi/180
beta = 0
roll = 0
gravity = 'on'
cs_deflection = 0 #-2.0249*np.pi/180
rudder_static_deflection = 0.0
rudder_step = 0.0*np.pi/180
thrust = 5.9443
sigma = 1.5
lambda_dihedral = 20*np.pi/180
# 8 | 4.1515 | -1.1249 | 4.7300 | 0.0088 | -0.0000 | 0.0005 | 0.0000 | -0.0005 | 0.0000
# m = 16
# alpha = 4.1515 * np.pi / 180
# cs_deflection = -1.1249 * np.pi / 180
# thrust = 4.7300
# gust settings
gust_intensity = 0.20
gust_length = 1*u_inf
gust_offset = 0.5*u_inf
# numerics
n_step = 5
structural_relaxation_factor = 0.6
relaxation_factor = 0.35
tolerance = 1e-6
fsi_tolerance = 1e-4
num_cores = 2
# MODEL GEOMETRY
# beam
span_main = 16.0
lambda_main = 0.25
ea_main = 0.3
ea = 1e7
ga = 1e5
gj = 1e4
eiy = 2e4
eiz = 4e6
m_bar_main = 0.75
j_bar_main = 0.075
length_fuselage = 10
offset_fuselage = 0
sigma_fuselage = 10
m_bar_fuselage = 0.2
j_bar_fuselage = 0.08
span_tail = 2.5
ea_tail = 0.5
fin_height = 2.5
ea_fin = 0.5
sigma_tail = 100
m_bar_tail = 0.3
j_bar_tail = 0.08
# lumped masses
n_lumped_mass = 1
lumped_mass_nodes = np.zeros((n_lumped_mass, ), dtype=int)
lumped_mass = np.zeros((n_lumped_mass, ))
lumped_mass[0] = 50 * lumped_mass_factor
lumped_mass_inertia = np.zeros((n_lumped_mass, 3, 3))
lumped_mass_position = np.zeros((n_lumped_mass, 3))
# aero
chord_main = 1.0
chord_tail = 0.5
chord_fin = 0.5
# DISCRETISATION
# spatial discretisation
# chordiwse panels
m = 8
m_star_factor = 10
# spanwise elements
n_elem_multiplier = 4
n_elem_main = int(4*n_elem_multiplier)
n_elem_tail = int(2*n_elem_multiplier)
n_elem_fin = int(2*n_elem_multiplier)
n_elem_fuselage = int(2*n_elem_multiplier)
n_surfaces = 5
# temporal discretisation
physical_time = 30
tstep_factor = 1.
dt = 1.0/m/u_inf*tstep_factor
n_tstep = round(physical_time/dt)
# linear files
elevator = 30 * np.pi / 180
rudder = 25 * np.pi / 180
# END OF INPUT-----------------------------------------------------------------
# case files folder
cases_folder = route + '/cases/' + case_name + '/'
if not os.path.isdir(cases_folder):
os.makedirs(cases_folder, exist_ok=True)
# beam processing
n_node_elem = 3
span_main1 = (1.0 - lambda_main)*span_main
span_main2 = lambda_main*span_main
n_elem_main1 = round(n_elem_main*(1 - lambda_main))
n_elem_main2 = n_elem_main - n_elem_main1
# total number of elements
n_elem = 0
n_elem += n_elem_main1 + n_elem_main1
n_elem += n_elem_main2 + n_elem_main2
n_elem += n_elem_fuselage
n_elem += n_elem_fin
n_elem += n_elem_tail + n_elem_tail
# number of nodes per part
n_node_main1 = n_elem_main1*(n_node_elem - 1) + 1
n_node_main2 = n_elem_main2*(n_node_elem - 1) + 1
n_node_main = n_node_main1 + n_node_main2 - 1
n_node_fuselage = n_elem_fuselage*(n_node_elem - 1) + 1
n_node_fin = n_elem_fin*(n_node_elem - 1) + 1
n_node_tail = n_elem_tail*(n_node_elem - 1) + 1
# total number of nodes
n_node = 0
n_node += n_node_main1 + n_node_main1 - 1
n_node += n_node_main2 - 1 + n_node_main2 - 1
n_node += n_node_fuselage - 1
n_node += n_node_fin - 1
n_node += n_node_tail - 1
n_node += n_node_tail - 1
# stiffness and mass matrices
n_stiffness = 3
base_stiffness_main = sigma*np.diag([ea, ga, ga, gj, eiy, eiz])
base_stiffness_fuselage = base_stiffness_main.copy()*sigma_fuselage
base_stiffness_fuselage[4, 4] = base_stiffness_fuselage[5, 5]
base_stiffness_tail = base_stiffness_main.copy()*sigma_tail
base_stiffness_tail[4, 4] = base_stiffness_tail[5, 5]
n_mass = 3
base_mass_main = np.diag([m_bar_main, m_bar_main, m_bar_main, j_bar_main, 0.5*j_bar_main, 0.5*j_bar_main])
base_mass_fuselage = np.diag([m_bar_fuselage,
m_bar_fuselage,
m_bar_fuselage,
j_bar_fuselage,
j_bar_fuselage*0.5,
j_bar_fuselage*0.5])
base_mass_tail = np.diag([m_bar_tail,
m_bar_tail,
m_bar_tail,
j_bar_tail,
j_bar_tail*0.5,
j_bar_tail*0.5])
# PLACEHOLDERS
# beam
x = np.zeros((n_node, ))
y = np.zeros((n_node, ))
z = np.zeros((n_node, ))
beam_number = np.zeros((n_elem, ), dtype=int)
frame_of_reference_delta = np.zeros((n_elem, n_node_elem, 3))
structural_twist = np.zeros((n_elem, 3))
conn = np.zeros((n_elem, n_node_elem), dtype=int)
stiffness = np.zeros((n_stiffness, 6, 6))
elem_stiffness = np.zeros((n_elem, ), dtype=int)
mass = np.zeros((n_mass, 6, 6))
elem_mass = np.zeros((n_elem, ), dtype=int)
boundary_conditions = np.zeros((n_node, ), dtype=int)
app_forces = np.zeros((n_node, 6))
# aero
airfoil_distribution = np.zeros((n_elem, n_node_elem), dtype=int)
surface_distribution = np.zeros((n_elem,), dtype=int) - 1
surface_m = np.zeros((n_surfaces, ), dtype=int)
m_distribution = 'uniform'
aero_node = np.zeros((n_node,), dtype=bool)
twist = np.zeros((n_elem, n_node_elem))
sweep = np.zeros((n_elem, n_node_elem))
chord = np.zeros((n_elem, n_node_elem,))
elastic_axis = np.zeros((n_elem, n_node_elem,))
# linear time domain vectors
# for m = 16 only
input_vec = np.zeros((10, num_modes * 3 + 4))
x0 = np.zeros(10)
input_vec[5:, 2 * num_modes] = elevator
input_vec[5:, 2 * num_modes + 1] = rudder
# FUNCTIONS-------------------------------------------------------------
def clean_test_files():
fem_file_name = cases_folder + case_name + '.fem.h5'
if os.path.isfile(fem_file_name):
os.remove(fem_file_name)
dyn_file_name = cases_folder + case_name + '.dyn.h5'
if os.path.isfile(dyn_file_name):
os.remove(dyn_file_name)
aero_file_name = cases_folder + case_name + '.aero.h5'
if os.path.isfile(aero_file_name):
os.remove(aero_file_name)
solver_file_name = cases_folder + case_name + '.sharpy'
if os.path.isfile(solver_file_name):
os.remove(solver_file_name)
flightcon_file_name = cases_folder + case_name + '.flightcon.txt'
if os.path.isfile(flightcon_file_name):
os.remove(flightcon_file_name)
linear_files = cases_folder + case_name + '.lininput.h5'
if os.path.isfile(linear_files):
os.remove(linear_files)
def generate_dyn_file():
global dt
global n_tstep
global route
global case_name
global num_elem
global num_node_elem
global num_node
global amplitude
global period
global free_flight
dynamic_forces_time = None
with_dynamic_forces = False
with_forced_vel = False
if not free_flight:
with_forced_vel = True
if with_dynamic_forces:
f1 = 100
dynamic_forces = np.zeros((num_node, 6))
app_node = [int(num_node_main - 1), int(num_node_main)]
dynamic_forces[app_node, 2] = f1
force_time = np.zeros((n_tstep, ))
limit = round(0.05/dt)
force_time[50:61] = 1
dynamic_forces_time = np.zeros((n_tstep, num_node, 6))
for it in range(n_tstep):
dynamic_forces_time[it, :, :] = force_time[it]*dynamic_forces
forced_for_vel = None
if with_forced_vel:
forced_for_vel = np.zeros((n_tstep, 6))
forced_for_acc = np.zeros((n_tstep, 6))
for it in range(n_tstep):
# if dt*it < period:
# forced_for_vel[it, 2] = 2*np.pi/period*amplitude*np.sin(2*np.pi*dt*it/period)
# forced_for_acc[it, 2] = (2*np.pi/period)**2*amplitude*np.cos(2*np.pi*dt*it/period)
forced_for_vel[it, 3] = 2*np.pi/period*amplitude*np.sin(2*np.pi*dt*it/period)
forced_for_acc[it, 3] = (2*np.pi/period)**2*amplitude*np.cos(2*np.pi*dt*it/period)
if with_dynamic_forces or with_forced_vel:
with h5.File(cases_folder + case_name + '.dyn.h5', 'a') as h5file:
if with_dynamic_forces:
h5file.create_dataset(
'dynamic_forces', data=dynamic_forces_time)
if with_forced_vel:
h5file.create_dataset(
'for_vel', data=forced_for_vel)
h5file.create_dataset(
'for_acc', data=forced_for_acc)
h5file.create_dataset(
'num_steps', data=n_tstep)
def generate_fem():
stiffness[0, ...] = base_stiffness_main
stiffness[1, ...] = base_stiffness_fuselage
stiffness[2, ...] = base_stiffness_tail
mass[0, ...] = base_mass_main
mass[1, ...] = base_mass_fuselage
mass[2, ...] = base_mass_tail
we = 0
wn = 0
# inner right wing
beam_number[we:we + n_elem_main1] = 0
y[wn:wn + n_node_main1] = np.linspace(0.0, span_main1, n_node_main1)
for ielem in range(n_elem_main1):
conn[we + ielem, :] = ((np.ones((3, ))*(we + ielem)*(n_node_elem - 1)) +
[0, 2, 1])
for inode in range(n_node_elem):
frame_of_reference_delta[we + ielem, inode, :] = [-1.0, 0.0, 0.0]
elem_stiffness[we:we + n_elem_main1] = 0
elem_mass[we:we + n_elem_main1] = 0
boundary_conditions[0] = 1
# remember this is in B FoR
app_forces[0] = [0, thrust, 0, 0, 0, 0]
we += n_elem_main1
wn += n_node_main1
# outer right wing
beam_number[we:we + n_elem_main1] = 0
y[wn:wn + n_node_main2 - 1] = y[wn - 1] + np.linspace(0.0, np.cos(lambda_dihedral)*span_main2, n_node_main2)[1:]
z[wn:wn + n_node_main2 - 1] = z[wn - 1] + np.linspace(0.0, np.sin(lambda_dihedral)*span_main2, n_node_main2)[1:]
for ielem in range(n_elem_main2):
conn[we + ielem, :] = ((np.ones((3, ))*(we + ielem)*(n_node_elem - 1)) +
[0, 2, 1])
for inode in range(n_node_elem):
frame_of_reference_delta[we + ielem, inode, :] = [-1.0, 0.0, 0.0]
elem_stiffness[we:we + n_elem_main2] = 0
elem_mass[we:we + n_elem_main2] = 0
boundary_conditions[wn + n_node_main2 - 2] = -1
we += n_elem_main2
wn += n_node_main2 - 1
# inner left wing
beam_number[we:we + n_elem_main1 - 1] = 1
y[wn:wn + n_node_main1 - 1] = np.linspace(0.0, -span_main1, n_node_main1)[1:]
for ielem in range(n_elem_main1):
conn[we + ielem, :] = ((np.ones((3, ))*(we+ielem)*(n_node_elem - 1)) +
[0, 2, 1])
for inode in range(n_node_elem):
frame_of_reference_delta[we + ielem, inode, :] = [1.0, 0.0, 0.0]
conn[we, 0] = 0
elem_stiffness[we:we + n_elem_main1] = 0
elem_mass[we:we + n_elem_main1] = 0
we += n_elem_main1
wn += n_node_main1 - 1
# outer left wing
beam_number[we:we + n_elem_main2] = 1
y[wn:wn + n_node_main2 - 1] = y[wn - 1] + np.linspace(0.0, -np.cos(lambda_dihedral)*span_main2, n_node_main2)[1:]
z[wn:wn + n_node_main2 - 1] = z[wn - 1] + np.linspace(0.0, np.sin(lambda_dihedral)*span_main2, n_node_main2)[1:]
for ielem in range(n_elem_main2):
conn[we + ielem, :] = ((np.ones((3, ))*(we+ielem)*(n_node_elem - 1)) +
[0, 2, 1])
for inode in range(n_node_elem):
frame_of_reference_delta[we + ielem, inode, :] = [1.0, 0.0, 0.0]
elem_stiffness[we:we + n_elem_main2] = 0
elem_mass[we:we + n_elem_main2] = 0
boundary_conditions[wn + n_node_main2 - 2] = -1
we += n_elem_main2
wn += n_node_main2 - 1
# fuselage
beam_number[we:we + n_elem_fuselage] = 2
x[wn:wn + n_node_fuselage - 1] = np.linspace(0.0, length_fuselage, n_node_fuselage)[1:]
z[wn:wn + n_node_fuselage - 1] = np.linspace(0.0, offset_fuselage, n_node_fuselage)[1:]
for ielem in range(n_elem_fuselage):
conn[we + ielem, :] = ((np.ones((3,))*(we + ielem)*(n_node_elem - 1)) +
[0, 2, 1])
for inode in range(n_node_elem):
frame_of_reference_delta[we + ielem, inode, :] = [0.0, 1.0, 0.0]
conn[we, 0] = 0
elem_stiffness[we:we + n_elem_fuselage] = 1
elem_mass[we:we + n_elem_fuselage] = 1
we += n_elem_fuselage
wn += n_node_fuselage - 1
global end_of_fuselage_node
end_of_fuselage_node = wn - 1
# fin
beam_number[we:we + n_elem_fin] = 3
x[wn:wn + n_node_fin - 1] = x[end_of_fuselage_node]
z[wn:wn + n_node_fin - 1] = z[end_of_fuselage_node] + np.linspace(0.0, fin_height, n_node_fin)[1:]
for ielem in range(n_elem_fin):
conn[we + ielem, :] = ((np.ones((3,))*(we + ielem)*(n_node_elem - 1)) +
[0, 2, 1])
for inode in range(n_node_elem):
frame_of_reference_delta[we + ielem, inode, :] = [-1.0, 0.0, 0.0]
conn[we, 0] = end_of_fuselage_node
elem_stiffness[we:we + n_elem_fin] = 2
elem_mass[we:we + n_elem_fin] = 2
we += n_elem_fin
wn += n_node_fin - 1
end_of_fin_node = wn - 1
# right tail
beam_number[we:we + n_elem_tail] = 4
x[wn:wn + n_node_tail - 1] = x[end_of_fin_node]
y[wn:wn + n_node_tail - 1] = np.linspace(0.0, span_tail, n_node_tail)[1:]
z[wn:wn + n_node_tail - 1] = z[end_of_fin_node]
for ielem in range(n_elem_tail):
conn[we + ielem, :] = ((np.ones((3, ))*(we + ielem)*(n_node_elem - 1)) +
[0, 2, 1])
for inode in range(n_node_elem):
frame_of_reference_delta[we + ielem, inode, :] = [-1.0, 0.0, 0.0]
conn[we, 0] = end_of_fin_node
elem_stiffness[we:we + n_elem_tail] = 2
elem_mass[we:we + n_elem_tail] = 2
boundary_conditions[wn + n_node_tail - 2] = -1
we += n_elem_tail
wn += n_node_tail - 1
# left tail
beam_number[we:we + n_elem_tail] = 5
x[wn:wn + n_node_tail - 1] = x[end_of_fin_node]
y[wn:wn + n_node_tail - 1] = np.linspace(0.0, -span_tail, n_node_tail)[1:]
z[wn:wn + n_node_tail - 1] = z[end_of_fin_node]
for ielem in range(n_elem_tail):
conn[we + ielem, :] = ((np.ones((3, ))*(we + ielem)*(n_node_elem - 1)) +
[0, 2, 1])
for inode in range(n_node_elem):
frame_of_reference_delta[we + ielem, inode, :] = [1.0, 0.0, 0.0]
conn[we, 0] = end_of_fin_node
elem_stiffness[we:we + n_elem_tail] = 2
elem_mass[we:we + n_elem_tail] = 2
boundary_conditions[wn + n_node_tail - 2] = -1
we += n_elem_tail
wn += n_node_tail - 1
with h5.File(cases_folder + case_name + '.fem.h5', 'a') as h5file:
coordinates = h5file.create_dataset('coordinates', data=np.column_stack((x, y, z)))
conectivities = h5file.create_dataset('connectivities', data=conn)
num_nodes_elem_handle = h5file.create_dataset(
'num_node_elem', data=n_node_elem)
num_nodes_handle = h5file.create_dataset(
'num_node', data=n_node)
num_elem_handle = h5file.create_dataset(
'num_elem', data=n_elem)
stiffness_db_handle = h5file.create_dataset(
'stiffness_db', data=stiffness)
stiffness_handle = h5file.create_dataset(
'elem_stiffness', data=elem_stiffness)
mass_db_handle = h5file.create_dataset(
'mass_db', data=mass)
mass_handle = h5file.create_dataset(
'elem_mass', data=elem_mass)
frame_of_reference_delta_handle = h5file.create_dataset(
'frame_of_reference_delta', data=frame_of_reference_delta)
structural_twist_handle = h5file.create_dataset(
'structural_twist', data=structural_twist)
bocos_handle = h5file.create_dataset(
'boundary_conditions', data=boundary_conditions)
beam_handle = h5file.create_dataset(
'beam_number', data=beam_number)
app_forces_handle = h5file.create_dataset(
'app_forces', data=app_forces)
lumped_mass_nodes_handle = h5file.create_dataset(
'lumped_mass_nodes', data=lumped_mass_nodes)
lumped_mass_handle = h5file.create_dataset(
'lumped_mass', data=lumped_mass)
lumped_mass_inertia_handle = h5file.create_dataset(
'lumped_mass_inertia', data=lumped_mass_inertia)
lumped_mass_position_handle = h5file.create_dataset(
'lumped_mass_position', data=lumped_mass_position)
def generate_aero_file():
global x, y, z
# control surfaces
n_control_surfaces = 2
control_surface = np.zeros((n_elem, n_node_elem), dtype=int) - 1
control_surface_type = np.zeros((n_control_surfaces, ), dtype=int)
control_surface_deflection = np.zeros((n_control_surfaces, ))
control_surface_chord = np.zeros((n_control_surfaces, ), dtype=int)
control_surface_hinge_coord = np.zeros((n_control_surfaces, ), dtype=float)
# control surface type 0 = static
# control surface type 1 = dynamic
control_surface_type[0] = 0
control_surface_deflection[0] = cs_deflection
control_surface_chord[0] = m # m
control_surface_hinge_coord[0] = -0.25 * 1 # nondimensional wrt elastic axis (+ towards the trailing edge)
control_surface_type[1] = 0
control_surface_deflection[1] = rudder_static_deflection
control_surface_chord[1] = m // 2 #1
control_surface_hinge_coord[1] = -0. # nondimensional wrt elastic axis (+ towards the trailing edge)
we = 0
wn = 0
# right wing (surface 0, beam 0)
i_surf = 0
airfoil_distribution[we:we + n_elem_main, :] = 0
surface_distribution[we:we + n_elem_main] = i_surf
surface_m[i_surf] = m
aero_node[wn:wn + n_node_main] = True
temp_chord = np.linspace(chord_main, chord_main, n_node_main)
temp_sweep = np.linspace(0.0, 0*np.pi/180, n_node_main)
node_counter = 0
for i_elem in range(we, we + n_elem_main):
for i_local_node in range(n_node_elem):
if not i_local_node == 0:
node_counter += 1
chord[i_elem, i_local_node] = temp_chord[node_counter]
elastic_axis[i_elem, i_local_node] = ea_main
sweep[i_elem, i_local_node] = temp_sweep[node_counter]
we += n_elem_main
wn += n_node_main
# left wing (surface 1, beam 1)
i_surf = 1
airfoil_distribution[we:we + n_elem_main, :] = 0
# airfoil_distribution[wn:wn + n_node_main - 1] = 0
surface_distribution[we:we + n_elem_main] = i_surf
surface_m[i_surf] = m
aero_node[wn:wn + n_node_main - 1] = True
# chord[wn:wn + num_node_main - 1] = np.linspace(main_chord, main_tip_chord, num_node_main)[1:]
# chord[wn:wn + num_node_main - 1] = main_chord
# elastic_axis[wn:wn + num_node_main - 1] = main_ea
temp_chord = np.linspace(chord_main, chord_main, n_node_main)
node_counter = 0
for i_elem in range(we, we + n_elem_main):
for i_local_node in range(n_node_elem):
if not i_local_node == 0:
node_counter += 1
chord[i_elem, i_local_node] = temp_chord[node_counter]
elastic_axis[i_elem, i_local_node] = ea_main
sweep[i_elem, i_local_node] = -temp_sweep[node_counter]
we += n_elem_main
wn += n_node_main - 1
we += n_elem_fuselage
wn += n_node_fuselage - 1 - 1
#
# # fin (surface 2, beam 3)
i_surf = 2
airfoil_distribution[we:we + n_elem_fin, :] = 1
# airfoil_distribution[wn:wn + n_node_fin] = 0
surface_distribution[we:we + n_elem_fin] = i_surf
surface_m[i_surf] = m
aero_node[wn:wn + n_node_fin] = True
# chord[wn:wn + num_node_fin] = fin_chord
for i_elem in range(we, we + n_elem_fin):
for i_local_node in range(n_node_elem):
chord[i_elem, i_local_node] = chord_fin
elastic_axis[i_elem, i_local_node] = ea_fin
control_surface[i_elem, i_local_node] = 1
# twist[end_of_fuselage_node] = 0
# twist[wn:] = 0
# elastic_axis[wn:wn + num_node_main] = fin_ea
#remove last elem of the control surface
control_surface[i_elem, :] = -1
we += n_elem_fin
wn += n_node_fin - 1
#
# # # right tail (surface 3, beam 4)
i_surf = 3
airfoil_distribution[we:we + n_elem_tail, :] = 2
# airfoil_distribution[wn:wn + n_node_tail] = 0
surface_distribution[we:we + n_elem_tail] = i_surf
surface_m[i_surf] = m
# XXX not very elegant
aero_node[wn:] = True
# chord[wn:wn + num_node_tail] = tail_chord
# elastic_axis[wn:wn + num_node_main] = tail_ea
for i_elem in range(we, we + n_elem_tail):
for i_local_node in range(n_node_elem):
twist[i_elem, i_local_node] = -0
for i_elem in range(we, we + n_elem_tail):
for i_local_node in range(n_node_elem):
chord[i_elem, i_local_node] = chord_tail
elastic_axis[i_elem, i_local_node] = ea_tail
control_surface[i_elem, i_local_node] = 0
we += n_elem_tail
wn += n_node_tail
#
# # left tail (surface 4, beam 5)
i_surf = 4
airfoil_distribution[we:we + n_elem_tail, :] = 2
# airfoil_distribution[wn:wn + n_node_tail - 1] = 0
surface_distribution[we:we + n_elem_tail] = i_surf
surface_m[i_surf] = m
aero_node[wn:wn + n_node_tail - 1] = True
# chord[wn:wn + num_node_tail] = tail_chord
# elastic_axis[wn:wn + num_node_main] = tail_ea
# twist[we:we + num_elem_tail] = -tail_twist
for i_elem in range(we, we + n_elem_tail):
for i_local_node in range(n_node_elem):
twist[i_elem, i_local_node] = -0
for i_elem in range(we, we + n_elem_tail):
for i_local_node in range(n_node_elem):
chord[i_elem, i_local_node] = chord_tail
elastic_axis[i_elem, i_local_node] = ea_tail
control_surface[i_elem, i_local_node] = 0
we += n_elem_tail
wn += n_node_tail
with h5.File(cases_folder + case_name + '.aero.h5', 'a') as h5file:
airfoils_group = h5file.create_group('airfoils')
# add one airfoil
naca_airfoil_main = airfoils_group.create_dataset('0', data=np.column_stack(
generate_naca_camber(P=0, M=0)))
naca_airfoil_tail = airfoils_group.create_dataset('1', data=np.column_stack(
generate_naca_camber(P=0, M=0)))
naca_airfoil_fin = airfoils_group.create_dataset('2', data=np.column_stack(
generate_naca_camber(P=0, M=0)))
# chord
chord_input = h5file.create_dataset('chord', data=chord)
dim_attr = chord_input .attrs['units'] = 'm'
# twist
twist_input = h5file.create_dataset('twist', data=twist)
dim_attr = twist_input.attrs['units'] = 'rad'
# sweep
sweep_input = h5file.create_dataset('sweep', data=sweep)
dim_attr = sweep_input.attrs['units'] = 'rad'
# airfoil distribution
airfoil_distribution_input = h5file.create_dataset('airfoil_distribution', data=airfoil_distribution)
surface_distribution_input = h5file.create_dataset('surface_distribution', data=surface_distribution)
surface_m_input = h5file.create_dataset('surface_m', data=surface_m)
m_distribution_input = h5file.create_dataset('m_distribution', data=m_distribution.encode('ascii', 'ignore'))
aero_node_input = h5file.create_dataset('aero_node', data=aero_node)
elastic_axis_input = h5file.create_dataset('elastic_axis', data=elastic_axis)
control_surface_input = h5file.create_dataset('control_surface', data=control_surface)
control_surface_deflection_input = h5file.create_dataset('control_surface_deflection', data=control_surface_deflection)
control_surface_chord_input = h5file.create_dataset('control_surface_chord', data=control_surface_chord)
control_surface_hinge_coord_input = h5file.create_dataset('control_surface_hinge_coord', data=control_surface_hinge_coord)
control_surface_types_input = h5file.create_dataset('control_surface_type', data=control_surface_type)
def generate_naca_camber(M=0, P=0):
mm = M*1e-2
p = P*1e-1
def naca(x, mm, p):
if x < 1e-6:
return 0.0
elif x < p:
return mm/(p*p)*(2*p*x - x*x)
elif x > p and x < 1+1e-6:
return mm/((1-p)*(1-p))*(1 - 2*p + 2*p*x - x*x)
x_vec = np.linspace(0, 1, 1000)
y_vec = np.array([naca(x, mm, p) for x in x_vec])
return x_vec, y_vec
def generate_linear_sim_files(x0, input_vec):
with h5.File(cases_folder + '/' + case_name + '.lininput.h5', 'a') as h5file:
x0 = h5file.create_dataset(
'x0', data=x0)
u = h5file.create_dataset(
'u', data=input_vec)
def generate_solver_file():
file_name = cases_folder + case_name + '.sharpy'
settings = dict()
settings['SHARPy'] = {'case': case_name,
'route': cases_folder,
'flow': flow,
'write_screen': 'on',
'write_log': 'on',
'log_folder': route + '/output/' + case_name,
'log_file': case_name + '.log'}
settings['BeamLoader'] = {'unsteady': 'on',
'orientation': algebra.euler2quat(np.array([roll,
alpha,
beta]))}
settings['AerogridLoader'] = {'unsteady': 'on',
'aligned_grid': 'on',
'mstar': m_star_factor * m,
'freestream_dir': ['1', '0', '0']}
settings['NonLinearStatic'] = {'print_info': 'off',
'max_iterations': 150,
'num_load_steps': 1,
'delta_curved': 1e-1,
'min_delta': tolerance,
'gravity_on': gravity,
'gravity': 9.81}
settings['StaticUvlm'] = {'print_info': 'on',
'horseshoe': 'off',
'num_cores': num_cores,
'n_rollup': 0,
'rollup_dt': dt,
'rollup_aic_refresh': 1,
'vortex_radius': 1e-8,
'rollup_tolerance': 1e-4,
'velocity_field_generator': 'SteadyVelocityField',
'velocity_field_input': {'u_inf': u_inf,
'u_inf_direction': [1., 0, 0]},
'rho': rho}
settings['StaticCoupled'] = {'print_info': 'off',
'structural_solver': 'NonLinearStatic',
'structural_solver_settings': settings['NonLinearStatic'],
'aero_solver': 'StaticUvlm',
'aero_solver_settings': settings['StaticUvlm'],
'max_iter': 100,
'n_load_steps': n_step,
'tolerance': fsi_tolerance,
'relaxation_factor': structural_relaxation_factor}
settings['StaticTrim'] = {'solver': 'StaticCoupled',
'solver_settings': settings['StaticCoupled'],
'initial_alpha': alpha,
'initial_deflection': cs_deflection,
'initial_thrust': thrust}
settings['NonLinearDynamicCoupledStep'] = {'print_info': 'off',
'max_iterations': 950,
'delta_curved': 1e-1,
'min_delta': tolerance,
'newmark_damp': 5e-3,
'gravity_on': gravity,
'gravity': 9.81,
'num_steps': n_tstep,
'dt': dt,
'initial_velocity': u_inf}
settings['NonLinearDynamicPrescribedStep'] = {'print_info': 'off',
'max_iterations': 950,
'delta_curved': 1e-1,
'min_delta': tolerance,
'newmark_damp': 5e-3,
'gravity_on': gravity,
'gravity': 9.81,
'num_steps': n_tstep,
'dt': dt,
'initial_velocity': u_inf*int(free_flight)}
relative_motion = 'off'
if not free_flight:
relative_motion = 'on'
settings['StepUvlm'] = {'print_info': 'off',
'horseshoe': 'off',
'num_cores': num_cores,
'n_rollup': 0,
'convection_scheme': 2,
'rollup_dt': dt,
'rollup_aic_refresh': 1,
'rollup_tolerance': 1e-4,
'gamma_dot_filtering': 6,
'vortex_radius': 1e-8,
'velocity_field_generator': 'GustVelocityField',
'velocity_field_input': {'u_inf': int(not free_flight)*u_inf,
'u_inf_direction': [1., 0, 0],
'gust_shape': '1-cos',
'gust_length': gust_length,
'gust_intensity': gust_intensity*u_inf,
'offset': gust_offset,
'span': span_main,
'relative_motion': relative_motion},
'rho': rho,
'n_time_steps': n_tstep,
'dt': dt}
if free_flight:
solver = 'NonLinearDynamicCoupledStep'
else:
solver = 'NonLinearDynamicPrescribedStep'
settings['DynamicCoupled'] = {'structural_solver': solver,
'structural_solver_settings': settings[solver],
'aero_solver': 'StepUvlm',
'aero_solver_settings': settings['StepUvlm'],
'fsi_substeps': 200,
'fsi_tolerance': fsi_tolerance,
'relaxation_factor': relaxation_factor,
'minimum_steps': 1,
'relaxation_steps': 150,
'final_relaxation_factor': 0.5,
'n_time_steps': 1,
'dt': dt,
'include_unsteady_force_contribution': 'off',
'postprocessors': ['BeamLoads', 'BeamPlot', 'AerogridPlot'],
'postprocessors_settings': {'BeamLoads': {'folder': route + '/output/',
'csv_output': 'off'},
'BeamPlot': {'folder': route + '/output/',
'include_rbm': 'on',
'include_applied_forces': 'on'},
'AerogridPlot': {
'folder': route + '/output/',
'include_rbm': 'on',
'include_applied_forces': 'on',
'minus_m_star': 0},
}}
settings['BeamLoads'] = {'folder': route + '/output/',
'csv_output': 'off'}
settings['BeamPlot'] = {'folder': route + '/output/',
'include_rbm': 'on',
'include_applied_forces': 'on',
'include_forward_motion': 'on'}
settings['AerogridPlot'] = {'folder': route + '/output/',
'include_rbm': 'on',
'include_forward_motion': 'off',
'include_applied_forces': 'on',
'minus_m_star': 0,
'u_inf': u_inf,
'dt': dt}
settings['Modal'] = {'print_info': 'on',
'use_undamped_modes': 'on',
'NumLambda': num_modes,
'rigid_body_modes': free_flight,
'write_modes_vtk': 'on',
'print_matrices': 'off',
'write_data': 'on',
'rigid_modes_cg': 'on'}
settings['LinearAssembler'] = {'linear_system': 'LinearAeroelastic',
'linear_system_settings': {
'beam_settings': {'modal_projection': 'on',
'inout_coords': 'modes',
'discrete_time': 'on',
'newmark_damp': 5e-4,
'discr_method': 'newmark',
'dt': dt,
'proj_modes': 'undamped',
'use_euler': 'on',
'num_modes': num_modes,
'print_info': 'on',
'gravity': 'on',
'remove_dofs': []},
'aero_settings': {'dt': dt,
# 'ScalingDict': {'density': rho,
# 'length': chord_main * 0.5,
# 'speed': u_inf},
'integr_order': 2,
'density': rho,
'remove_predictor': 'off',
'use_sparse': 'on',
'vortex_radius': 1e-8,
'remove_inputs': ['u_gust']},
'rigid_body_motion': free_flight,
'use_euler': 'on',
}
}
if rom:
settings['LinearAssembler']['linear_system_settings']['aero_settings']['rom_method'] = ['Krylov']
settings['LinearAssembler']['linear_system_settings']['aero_settings']['rom_method_settings'] = {
'Krylov': {'algorithm': 'mimo_rational_arnoldi',
'frequency': [0.],
'r': 4,
'single_side': 'observability'}}
settings['AsymptoticStability'] = {'sys_id': 'LinearAeroelastic',
'print_info': 'on',
'modes_to_plot': [],
'display_root_locus': 'off',
'frequency_cutoff': 0,
'export_eigenvalues': 'off',
'num_evals': 40,
'folder': route + '/output/'}
settings['SaveData'] = {'folder': route + '/output/' + case_name + '/',
'save_aero': 'off',
'save_struct': 'off',
'save_linear': 'on',
'save_linear_uvlm': 'on',
'format': 'mat'
}
settings['LinDynamicSim'] = {'folder': route + '/output/',
'n_tsteps': 10,
'dt': dt,
'postprocessors': ['AerogridPlot'],
'postprocessors_settings':
{'AerogridPlot': {'folder': route + '/output/',
'include_rbm': 'on',
'include_applied_forces': 'on',
'minus_m_star': 0}, }
}
import configobj
config = configobj.ConfigObj()
config.filename = file_name
for k, v in settings.items():
config[k] = v
config.write()
clean_test_files()
generate_fem()
generate_aero_file()
generate_solver_file()
generate_dyn_file()
generate_linear_sim_files(x0, input_vec)
sharpy.sharpy_main.main(['', cases_folder + '/' + case_name + '.sharpy'])
|
[
"ng213@ic.ac.uk"
] |
ng213@ic.ac.uk
|
d6bc1b786c3c3972df4b12cc22656baf7af2b670
|
b5b665097ef54459d85d4cc4bf0748f885a1ccdc
|
/ml-for-trading/04-incomplete-data/01-fillnan.py
|
7623adc1227d9cbc1d8c200f4efe3762f064f6ca
|
[] |
no_license
|
dylanjorgensen/financial-engineering
|
76d769723fcc4eb26601e2abab9bcb8f60a49ef3
|
f66b47ee8fcc15b599eab3af5040975b1ec7add2
|
refs/heads/master
| 2021-01-13T07:28:43.790685
| 2016-10-19T05:18:34
| 2016-10-19T05:18:34
| 71,321,116
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,931
|
py
|
"""Fill missing values"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
def fill_missing_values(df_data):
"""Fill missing values in data frame, in place."""
##########################################################
pass # TODO: Your code here (DO NOT modify anything else)
##########################################################
def symbol_to_path(symbol, base_dir="data"):
"""Return CSV file path given ticker symbol."""
return os.path.join(base_dir, "{}.csv".format(str(symbol)))
def get_data(symbols, dates):
"""Read stock data (adjusted close) for given symbols from CSV files."""
df_final = pd.DataFrame(index=dates)
if "SPY" not in symbols: # add SPY for reference, if absent
symbols.insert(0, "SPY")
for symbol in symbols:
file_path = symbol_to_path(symbol)
df_temp = pd.read_csv(file_path, parse_dates=True, index_col="Date",
usecols=["Date", "Adj Close"], na_values=["nan"])
df_temp = df_temp.rename(columns={"Adj Close": symbol})
df_final = df_final.join(df_temp)
if symbol == "SPY": # drop dates SPY did not trade
df_final = df_final.dropna(subset=["SPY"])
return df_final
def plot_data(df_data):
"""Plot stock data with appropriate axis labels."""
ax = df_data.plot(title="Stock Data", fontsize=2)
ax.set_xlabel("Date")
ax.set_ylabel("Price")
plt.show()
def test_run():
"""Function called by Test Run."""
# Read data
symbol_list = ["JAVA", "FAKE1", "FAKE2"] # list of symbols
start_date = "2005-12-31"
end_date = "2014-12-07"
dates = pd.date_range(start_date, end_date) # date range as index
df_data = get_data(symbol_list, dates) # get data for each symbol
# Fill missing values
fill_missing_values(df_data)
# Plot
plot_data(df_data)
if __name__ == "__main__":
test_run()
|
[
"dylan@dylanjorgensen.com"
] |
dylan@dylanjorgensen.com
|
b87ba1691e9a6848f0ea54cfff155612e89f2ccf
|
66fa2059c1aaddf2141318a867919da404085d9a
|
/ch8/9-4.py
|
5567e3acd75bbc46bb85d0f89945ed81d9e88bde
|
[] |
no_license
|
DX-ZZ/218341
|
c39733c21bd5f9234196100eaa776b3ddb71fe2a
|
7c47544ebe9841b6dd16e06d678d29e0c39edda0
|
refs/heads/master
| 2022-11-09T20:05:21.562849
| 2020-06-24T07:16:56
| 2020-06-24T07:16:56
| 274,601,632
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,730
|
py
|
#9-4就餐人数
class Restaurant:
def __init__(self,restaurant_name, cuisine_type):
self.restaurant_name = restaurant_name
self.cuisine_type = cuisine_type
self.number_served = 0
def describe_restaurant(self):
print("Restaurant Name:" + self.restaurant_name.title())
print("Cuisine Type:" + self.cuisine_type.title())
def read_number_served(self):
print("我们服务了" + str(self.number_served) + "人.\n")
def set_number_served(self,number):
self.number_served = number
def increment_number_served(self,increment_number):
self.number_served = self.number_served + increment_number
restaurant = Restaurant("全聚德", "China")
restaurant.describe_restaurant()
restaurant.read_number_served()
restaurant.number_served = 10
restaurant.read_number_served()
restaurant.set_number_served(20)
restaurant.read_number_served()
restaurant.increment_number_served(30)
restaurant.read_number_served()
#9-5尝试登录次数
class User:
def __init__(self, first_name, last_name):
self.first_name = first_name
self.last_name = last_name
self.login_attempts = 0
def greet_user(self):
name = self.first_name.title() + "" + self.last_name.title()
print("你好,"+ name + ".")
def incement_login_attempts(self):
self.login_attempts = self.login_attempts + 1
def reset_login_attempts(self):
self.login_attempts = 0
user = User('bruce', 'li')
user.greet_user()
for you in range(2):
user.incement_login_attempts()
print("You are already login " + str(user.login_attempts) + " times.")
user.reset_login_attempts()
print("Have login " + str(user.login_attempts) + " time.")
|
[
"648873230@qq.com"
] |
648873230@qq.com
|
d98f23952fc745b69292d4f2ba0b6e6226c561f9
|
d98268be398fdaa8de4cdafc5051deb690fd250b
|
/data_processors/team_hash.py
|
1071bc1ef214f36db62e39354afde34504c35a68
|
[] |
no_license
|
3dankateers/league
|
991181f5d7ff1fd453c89d851f525e7afe854830
|
93f5bd4a89472c974d0b51ac8a10162913c732eb
|
refs/heads/master
| 2021-05-07T18:07:51.858240
| 2018-08-09T07:09:28
| 2018-08-09T07:09:28
| 108,783,347
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 220
|
py
|
## returns hash value from a team
class TeamHash:
## returns hash key from list of summoner ids
@staticmethod
def calc_hash_key(summoners):
product = 1
for s in summoners:
product *= long(s)
return product
|
[
"andr1357@gmail.com"
] |
andr1357@gmail.com
|
ca12a9af59ce7ffd8ee1dfc65f7a3c6fd730579d
|
5793ca31f8e52ccf0985c060017f65465da2392e
|
/network.py
|
a7d84ffcb620a988b6ab4a9f0df5c8b61f0cde65
|
[
"MIT"
] |
permissive
|
aliyildiz/networkScan
|
16170978c48905a7f5d269aea0bc351215710995
|
a66e32c2db246f7d04dc20b3d391f0170e59dbea
|
refs/heads/master
| 2021-09-04T15:33:25.582916
| 2018-01-19T22:56:46
| 2018-01-19T22:56:46
| 112,882,785
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,492
|
py
|
from scapy.all import *
import sys
import configparser
config=configparser.ConfigParser()
config.read('config.conf')
mac=[]
ip=[]
def printConf():
for i in range(len(mac)):
config['ARP'][ip[i]]=mac[i]
i=i+1
with open('config.conf','w') as configfile:
config.write(configfile)
def findKey():
keyList=[]
for key in config['ARP']:
keyList.append(key)
return keyList
def findValue():
valueList=[]
for value in config['ARP'].values():
valueList.append(value)
return valueList
if(config['FIRSTTIME']['firsttime']=='0'):
config['FIRSTTIME']['firsttime']='1'
with open('config.conf','w') as configfile:
config.write(configfile)
sbx=config['SETTINGS']['subnet']
ans,unans=srp(Ether(dst="ff:ff:ff:ff:ff:ff")/ARP(pdst=sbx),timeout=2)
for s,r in ans:
mac.append(r.sprintf("%Ether.src%"))
ip.append(r.sprintf("%ARP.psrc%"))
printConf()
else:
key=findKey()
value=findValue()
subnet=config['SETTINGS']['subnet']
ans,unans=srp(Ether(dst="ff:ff:ff:ff:ff:ff")/ARP(pdst=subnet),timeout=2)
for s,r in ans:
mac.append(r.sprintf("%Ether.src%"))
ip.append(r.sprintf("%ARP.psrc%"))
for i in range (len(mac)):
if mac[i] in value:
for j in range (len(value)):
if(mac[i]==value[j]):
if(ip[i]!=key[j]):
answer=raw_input("There is a changes device IP ("+key[j]+") do you want to update?\n-New IP adress=("+ip[i]+")(e/h):\n")
#answer=raw_input("Aginizdaki bir cihazin IP adresi ("+key[j]+") degismis guncellemek istiyor musunuz?\n-Yeni IP adresi=("+ip[i]+")(e/h):\n")
if(answer=='e' or answer=='E'):
with open('config.conf','w') as configfile:
config.remove_option('ARP',key[j])
config.write(configfile)
config['ARP'][ip[i]]=mac[i]
with open('config.conf','w') as configfile:
config.write(configfile)
elif(answer=='h' or answer=='H'):
continue
else:
answer2=raw_input("There is a new device on the network do you want to add\n-MAC=("+mac[i]+")-IP=("+ip[i]+")(e/h):\n")
#answer2=raw_input("Aga yeni cihaz katilmis eklenmesini istiyor musunuz?\n-MAC=("+mac[i]+")-IP=("+ip[i]+")(e/h):\n")
if(answer2=='e' or answer2=='E'):
config['ARP'][ip[i]]=mac[i]
with open('config.conf','w') as configfile:
config.write(configfile)
elif(answer2=='h' or answer2=='H'):
continue
|
[
"aliyildiz@outlook.com.tr"
] |
aliyildiz@outlook.com.tr
|
9e688f5ff91456dc4ca53fe45fab7b9f51f2ada9
|
09059f9b1a52482257e3acf3fa21e3daca45bf89
|
/src/agents.py
|
e3ee7a762cfb8bec1004c1b3e13ae830c844e2fd
|
[] |
no_license
|
HuangHam/CogSci2021-Epistemic-Reasoning
|
71faec912dacfdbeffcf97198b5d14618b2c26e0
|
ebf3d707a53be153968ea074f11d74cc0f592b40
|
refs/heads/main
| 2023-04-30T11:44:02.863469
| 2021-05-17T18:47:39
| 2021-05-17T18:47:39
| 368,110,338
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 60,695
|
py
|
import numpy as np
import networkx as nx
import itertools as it
from scipy.special import logsumexp
from src.epistemic_structures import *
import src.utils as f
'''
All CompMods written as python class object
Specific CompMod inherits a more general model category
All models must inherit task_parameters
'''
class task_parameters:
'''
Design models tailored for the task
'''
def __init__(self, data):
# task parameters
self.cards = 'AAAA8888'
self.possible_hand = ['AA', 'A8', '88']
self.possible_order = ['Amy,Ben,You', 'Ben,You,Amy', 'You,Amy,Ben']
self.num_cards = len(self.cards)
self.num_agents = 3
self.num_cards_each = 2
self.num_phase = 10
self.colnamesIn = ['phase', 'cards', 'order', 'outcomeArray', 'response', 'answer']
self.colnamesOut = self.colnamesIn + ['AmyResponse', 'BenResponse', 'corAnswer', 'round',
'subj', 'outcome', 'cards_iso'] # add parameter names when simulating
self.Sub = np.unique(data['subj']) # list of subject numbers
self.num_subj = len(self.Sub)
self.data = [f.pd2np(data[data.subj == n][self.colnamesOut]) for n in self.Sub] # turn data into list of subject data
def initialize(self):
'''
Initialize the parameters according to appropriate distributions
output (1d np array): initial parameters to put into the optimizer
'''
count, param0 = 0, np.empty(len(self.parameter_space), dtype=np.float64)
for k, v in self.continuous_parameter.items():
if self.initialize_method[k] == 'uniform':
interval = self.continuous_parameter[k]
param0[count] = np.random.uniform(low=interval[0], high=interval[1])
count += 1
return param0
# stochastic intake models (NoisyDEL can be considered as intake_prob == 1)
class NoisyDEL(task_parameters, modal_model):
'''
Model that assumes subjects holds the full model in mind and eliminate nodes as the game progresses
Parameter:
noise (float): chance of random guessing IF the model says she shouldn't know
Note: parameter_names, colnamesIn, colnamesOut are crucial in making sure the right value are retrieved for the right variable
Use them to guide indexing in the functions
'''
def __init__(self, data):
task_parameters.__init__(self, data)
modal_model.__init__(self)
self.name = 'NoisyDEL'
# parameters and their bounds
self.discrete_parameter = {}
self.continuous_parameter = {'noise':[0,1]}
self.parameter_combination = list(it.product(*[self.discrete_parameter.values()]))
# all possible combinations of the discrete parameters
self.parameter_space = list(self.continuous_parameter.values())
# all ranges of continuous parameters
self.parameter_names = list(self.discrete_parameter.keys()) + list(
self.continuous_parameter.keys()) # note the convention that discrete precedes continuous params
# how to initialize continuous parameters
self.initialize_method = {'noise': 'uniform'}
def agent_by_round(self, state, players, announcements, graph):
'''
state (string): 'AA88A8' (your cards, Amy's, Ben's)
players: (list of strings) ['Amy', 'Ben', 'You'] (note: not the game order. must match announcement order)
announcements: (1D np array) [] if subject goes first, [False, True] if subject goes third etc
graph: (nx object) epistemic model
return: response of the current turn (bool), updated model (graph)
'''
G = graph
for i in range(len(announcements)):
player, announcement = players[i], announcements[i]
g = self.update_model(G, announcement, self.player_number[player])
state_in = len([i for i, d in g.nodes(data=True) if d['state'] == state]) != 0
if state_in:
G = g
response = self.compute_correct_response(G, state, self.player_number['You'])
return response, G
def agent_by_game(self, param, state, order, outcomeArray):
'''
param (list): [noise]
state (str), order (list of str): cards and order in the game
outcomeArray: (list of bool) [[FALSE, FALSE, FALSE], [FALSE, TRUE]] represents the correct announcements players should produce
return: list of response (bool) for the entire game, the selected hand (str), correct response or not (bool)
'''
noise = param[self.parameter_names.index('noise')]
G, player_idx = self.generate_full_model(), order.index('You')
responses, answer = [], ''
players_after, announcement_after_player = [], []
outcome = True
for rnd in outcomeArray:
announcement_before_player, announcement_after_player = np.append(announcement_after_player, rnd[:player_idx]), rnd[player_idx:]
players_before, players_after = np.append(players_after, order[:player_idx]), order[player_idx:]
response, G = self.agent_by_round(state, players_before, announcement_before_player, G)
possible_answers = self.compute_possible_states(G, state, self.player_number['You'])
if np.random.random_sample() < noise: # guess
if np.random.random_sample() < 0.5: # guess don't know
responses.append(False)
outcome = outcome and not rnd[order.index('You')]
else: # guess know and randomly choose one from candidates
responses.append(True)
answer = np.random.choice(self.possible_hand) # randomly choose one of three
outcome = outcome and rnd[order.index('You')] and answer == state[:2]
break
else: # not guess
if response:
assert len(possible_answers) == 1
responses.append(response)
answer = possible_answers[0][:2]
outcome = outcome and rnd[order.index('You')] and answer == state[:2]
break # if know, the game ends
else:
responses.append(response)
outcome = outcome and not rnd[order.index('You')]
return responses, answer, outcome
def agent(self, subNum, param):
'''
param: same as above
subNum (int): which subject data we are simulating
return: numpy data matrix
'''
colnamesOut = self.colnamesOut + self.parameter_names
data = np.ones(len(colnamesOut)) # initialize output data. note for small dataset (<50000 rows) numpy is more efficient
# get coloumn index for different task variables
phase_idx = self.colnamesIn.index('phase')
cards_idx = self.colnamesIn.index('cards')
order_idx = self.colnamesIn.index('order')
outcomeArray_idx = self.colnamesIn.index('outcomeArray')
# get coloumn index for output variables
phase_out = colnamesOut.index('phase')
cards_out = colnamesOut.index('cards')
order_out = colnamesOut.index('order')
outcomeArray_out = colnamesOut.index('outcomeArray')
AmyResponse_out = colnamesOut.index('AmyResponse')
BenResponse_out = colnamesOut.index('BenResponse')
corAnswer_out = colnamesOut.index('corAnswer')
round_out = colnamesOut.index('round')
response_out = colnamesOut.index('response')
answer_out = colnamesOut.index('answer')
subj_out = colnamesOut.index('subj')
outcome_out = colnamesOut.index('outcome')
cards_iso_out = colnamesOut.index('cards_iso')
noise_out = colnamesOut.index('noise')
# remove redundant rows due to rnd info
real_data = f.unique([tuple(row) for row in self.data[subNum - 1]])
# get task parameters
phases, all_cards, all_order, all_outcomeArray = real_data[:, phase_idx], real_data[:, cards_idx], real_data[:, order_idx], real_data[:, outcomeArray_idx]
assert len(all_cards) == len(all_order) == len(phases)
for i in range(len(phases)): # for each game
phase, order, cards, outcomeArray = phases[i], all_order[i], all_cards[i], all_outcomeArray[i]
outcomeArray_evaled, cards_iso = eval(outcomeArray), self.iso_map[cards]
responses, answer, outcome = self.agent_by_game(param, cards, order.split(","), outcomeArray_evaled)
for rnd in range(len(responses)):
outcomeSubarray, response, log = outcomeArray_evaled[rnd], responses[rnd], [None]*3
noise = param[self.parameter_names.index('noise')]
for o in range(len(outcomeSubarray)):
log[o] = outcomeSubarray[o]
AmyResponse, BenResponse, corAnswer = log[order.split(",").index('Amy')], log[order.split(",").index('Ben')], log[order.split(",").index('You')]
# prepare output row to append to data
outputs = np.empty(len(colnamesOut), dtype=object)
outputs[phase_out], outputs[cards_out], outputs[order_out], outputs[outcomeArray_out] = phase, cards, order, outcomeArray
outputs[AmyResponse_out], outputs[BenResponse_out], outputs[corAnswer_out], outputs[
round_out] = AmyResponse, BenResponse, corAnswer, rnd+1
outputs[response_out], outputs[answer_out], outputs[subj_out], outputs[outcome_out] = response, answer, subNum, outcome
outputs[cards_iso_out], outputs[noise_out] = cards_iso, noise
data = np.vstack((data, outputs))
return data[1:]
def LLH_by_round(self, state, players, announcements, graph, update=1):
'''
state (string): 'AA88A8' (your cards, Amy's, Ben's)
players: (list of strings) ['Amy', 'Ben', 'You'] (note: not the game order. must match announcement order)
announcements: (1D np Array) [] if subject goes first, [False, True] if subject goes third etc
graph: (nx object) epistemic model
return: list of all possible model (graph) generated, list of respective log likehood (float)
'''
num_announcements = len(announcements)
models, LLHs = [], []
if num_announcements == 0:
models.append(graph)
LLHs.append(np.log(1))
else:
for branch in it.product(*[[0,1]]*num_announcements):
G, LLH = graph, 0
for i in range(len(branch)):
player, announcement, need_update = players[i], announcements[i], branch[i]
if need_update:
LLH += np.log(update)
else:
LLH += np.log(1 - update)
if need_update: # if 1 means update accordingly
g = self.update_model(G, announcement, self.player_number[player])
state_in = len([i for i, d in g.nodes(data=True) if d['state'] == state]) != 0
if state_in:
G = g
models.append(G)
LLHs.append(LLH)
return models, LLHs
def LLH_bayes_net(self, param, state, order, outcomeArray):
'''
param (list): [noise]
state (str), order (list): cards and order in the game
outcomeArray (list of bool): [[FALSE, FALSE, FALSE], [FALSE, TRUE]] represents the correct announcements players should produce
return: a bayes_net (nx object) encoding the log likelihood of each possible situation generated by this model
'''
noise = param[self.parameter_names.index('noise')]
G, player_idx,bayes_net, max_round = self.generate_full_model(), order.index('You'), nx.DiGraph(), len(outcomeArray)
bayes_net.add_node((0,), model=G) # initialize start node.
players_after, announcement_after_player, respond_false_prob = [], [], 1
for idx in range(max_round): # for every round
rnd, child_id = outcomeArray[idx], 0 # prepare for the input to the LLH_round
announcement_before_player, announcement_after_player = \
np.append(announcement_after_player, rnd[:player_idx]), rnd[player_idx:]
players_before, players_after = np.append(players_after, order[:player_idx]), order[player_idx:]
current_layer = [node_pair for node_pair in bayes_net.nodes(data=True) if node_pair[0][0] == idx and node_pair[1]['model']]
for node_pair in current_layer: # for all possible output model from the previous rnd except the special hand node
node, prev_model = node_pair[0], node_pair[1]['model']
if node[0] != 0:
parent_response = self.compute_correct_response(prev_model, state, self.player_number['You'])
respond_false_prob = (1-parent_response)*(1-noise) + noise / 2
models, LLHs = self.LLH_by_round(state, players_before, announcement_before_player, prev_model)
assert len(models) == len(LLHs)
for hand in self.possible_hand:
bayes_net.add_node((idx + 1, hand), model=None) # add the answer node
for i in range(len(models)): # for all updated models
child_id += 1
model = models[i]
model_answer = self.compute_correct_response(model, state, self.player_number['You'])
respond_true_prob = model_answer*(1-noise) + (noise/2) # chance of answer true
bayes_net.add_node((idx+1, child_id), model=model)
bayes_net.add_edge(node, (idx+1, child_id), weight=np.log(respond_false_prob) + LLHs[i])
for hand in self.possible_hand: # all hand nodes
bayes_net.add_edge((idx + 1, child_id), (idx + 1, hand),
weight=np.log(respond_true_prob/len(self.possible_hand)))
return bayes_net
def LLH_by_game(self, param, state, order, outcomeArray):
'''
return: a dictionary of dictionary and float
If the key is int, it represents the rnd (1,2,or 3) where agent answers I know
Then its value is a dictionary
key (str): possible hand ('AA', 'A8', '88')
value (float): their log likelihood
If the key is '', it represents the agent answered I don't know till the end
value (float): log likelihood
'''
bayes_net = self.LLH_bayes_net(param, state, order, outcomeArray)
response_llh = {}
for rnd in range(len(outcomeArray)):
answer_chance = {}
for hand in self.possible_hand:
hand_llh = []
for path in nx.all_simple_paths(bayes_net, source=(0,), target=(rnd+1, hand)):
path_llh = 0 # initialize branch conditional probability
for parent in range(len(path)-1):
path_llh += bayes_net.get_edge_data(path[parent], path[parent+1])['weight']
hand_llh.append(path_llh)
answer_chance[hand] = logsumexp(hand_llh) # marginalize
response_llh[rnd+1] = answer_chance
LLH = []
for dic in response_llh.values():
for llh in dic.values():
LLH.append(llh)
response_llh[''] = np.log(1-np.exp(logsumexp(LLH)))
return response_llh
def nLLH(self, param, Data):
'''
param: (list) Model parameters
Data: (numpy matrix) The actual subject data
return: (float) The negative log likelihood
'''
# get coloumn index for different task variables
phase_out = self.colnamesOut.index('phase')
round_out = self.colnamesOut.index('round')
answer_out = self.colnamesOut.index('answer')
cards_out = self.colnamesOut.index('cards')
order_out = self.colnamesOut.index('order')
# prep work
llh, games =0, np.unique(Data[:, phase_out]) # list of block numbers
for g in games: # loop through games
current_game = Data[Data[:, phase_out] == g]
cardss, orders = np.unique(current_game[:, cards_out]), np.unique(current_game[:, order_out])
rounds, answers = current_game[:, round_out], np.unique(current_game[:, answer_out])
assert len(cardss) == 1 and len(orders) == 1
num_round, answer, cards, order = len(rounds), answers[0], cardss[0], orders[0].split(",")
if np.isnan(sum(param)): # a wierd bug that scipy minimize sometimes sample [nan nan] as parameter
continue
LLH_look_up = self.LLH_by_game(param, cards, order, self.compute_game_response(list(cards), order))
if answer in self.possible_hand:
llh += LLH_look_up[num_round][answer]
else:
llh += LLH_look_up['']
if np.isnan(llh):
llh = -np.inf # fit sometimes (not always) turn inf in a funtion into nan somehow. I think it's an internal bug
return -llh
class SIWEB(task_parameters, bounded_modal_model):
'''
Model that assumes subjects hold a bounded model in mind and stochastically intake announcements to update it
Parameter:
level (int): bounds the initial model
intake_prob (float): the chance of intaking an annoucement to update the model every turn
noise (float): the chance of random guessing IF the model says she shouldn't know
Note: parameter_names, colnamesIn, colnamesOut are crucial in making sure the right value are retrieved for the right variable
Use them to guide indexing in the functions
'''
def __init__(self, data):
task_parameters.__init__(self, data)
bounded_modal_model.__init__(self)
self.name = 'SIWEB'
# parameters and their bounds
self.discrete_parameter = {'level':range(5)}
self.continuous_parameter = {'intake_prob':[0,1], 'noise':[0,1]}
self.parameter_combination = list(it.product(*self.discrete_parameter.values()))
# all possible combinations of the discrete parameters
self.parameter_space = list(self.continuous_parameter.values())
# all ranges of continuous parameters
self.parameter_names = list(self.discrete_parameter.keys()) + list(
self.continuous_parameter.keys()) # note the convention that discrete precedes continuous params
# how to initialize continuous parameters
self.initialize_method = {'intake_prob':'uniform', 'noise': 'uniform'}
def agent_by_round(self, state, players, announcements, graph, intake_prob):
'''
state (string): 'AA88A8' (your cards, Amy's, Ben's)
players: (list of strings) ['Amy', 'Ben', 'You'] (note: not the game order. must match announcement order)
announcements: (1D np array) [] if subject goes first, [False, True] if subject goes third etc
graph: (nx object) epistemic model
intake_prob (float): one of the model parameters
return: response of the current turn (bool), updated model (graph)
'''
G = graph
for i in range(len(announcements)):
response = self.compute_my_response(G, state[2:4], state[4:6])
if response:
return response, G
player, announcement = players[i], announcements[i]
if np.random.random_sample() < intake_prob:
G = self.update_model(G, announcement, self.player_number[player])
response = self.compute_my_response(G, state[2:4], state[4:6])
return response, G
def agent_by_game(self, param, state, Amy_cards, Ben_cards, order, outcomeArray):
'''
param (list): [level, intake_prob, noise]
state (str), order (list of str): cards and order in the game
outcomeArray: (list of bool) [[FALSE, FALSE, FALSE], [FALSE, TRUE]] represents the correct announcements players should produce
return: list of response (bool) for the entire game, the selected hand (str), correct response or not (bool)
'''
assert Amy_cards == state[2:4] and Ben_cards == state[4:6]
level = param[self.parameter_names.index('level')]
noise, intake_prob = param[self.parameter_names.index('noise')], param[self.parameter_names.index('intake_prob')]
G, player_idx = self.generate_partial_model(Amy_cards, Ben_cards, level), order.index('You')
responses, answer = [], ''
players_after, announcement_after_player = [], []
outcome = True
for rnd in outcomeArray:
announcement_before_player, announcement_after_player = np.append(announcement_after_player, rnd[:player_idx]), rnd[player_idx:]
players_before, players_after = np.append(players_after, order[:player_idx]), order[player_idx:]
response, G = self.agent_by_round(state, players_before, announcement_before_player, G, intake_prob)
if response and response != -1:
responses.append(response)
possibilities = []
for i in list(G.nodes):
if G.nodes[i]['state'][2:4] == Amy_cards and G.nodes[i]['state'][4:6] == Ben_cards:
possibilities.append(i)
assert len(possibilities) == 1
answer = G.nodes[possibilities[0]]['state'][:2]
outcome = outcome and rnd[order.index('You')] and answer == state[:2]
break # if know, the game ends
else:
if response == -1: # inconsistent and have to guess
if np.random.random_sample() < 0.5: # guess don't know
responses.append(False)
outcome = outcome and not rnd[order.index('You')]
else: # guess know and randomly choose one from candidates
responses.append(True)
answer = np.random.choice(self.possible_hand) # randomly choose one of three
outcome = outcome and rnd[order.index('You')] and answer == state[:2]
break
else: # don't know
if np.random.random_sample() < noise:
responses.append(True)
answer = np.random.choice(self.possible_hand) # random choose one of three
outcome = outcome and rnd[order.index('You')] and answer == state[:2]
break
else:
responses.append(response) # honestly say don't know
outcome = outcome and not rnd[order.index('You')]
return responses, answer, outcome
def agent(self, subNum, param):
'''
param: same as above
subNum (int): which subject data we are simulating
return: numpy data matrix
'''
colnamesOut = self.colnamesOut + self.parameter_names
data = np.ones(len(colnamesOut)) # initialize output data. Note for small dataset (<50000 rows) numpy is more efficient
# get coloumn index for different task variables
phase_idx = self.colnamesIn.index('phase')
cards_idx = self.colnamesIn.index('cards')
order_idx = self.colnamesIn.index('order')
outcomeArray_idx = self.colnamesIn.index('outcomeArray')
# get coloumn index for output variables
phase_out = colnamesOut.index('phase')
cards_out = colnamesOut.index('cards')
order_out = colnamesOut.index('order')
outcomeArray_out = colnamesOut.index('outcomeArray')
AmyResponse_out = colnamesOut.index('AmyResponse')
BenResponse_out = colnamesOut.index('BenResponse')
corAnswer_out = colnamesOut.index('corAnswer')
round_out = colnamesOut.index('round')
response_out = colnamesOut.index('response')
answer_out = colnamesOut.index('answer')
subj_out = colnamesOut.index('subj')
outcome_out = colnamesOut.index('outcome')
cards_iso_out = colnamesOut.index('cards_iso')
level_out = colnamesOut.index('level')
intake_prob_out = colnamesOut.index('intake_prob')
noise_out = colnamesOut.index('noise')
# remove redundant rows due to rnd info
real_data = f.unique([tuple(row) for row in self.data[subNum - 1]])
# get task parameters
phases, all_cards, all_order, all_outcomeArray = real_data[:, phase_idx], real_data[:, cards_idx], real_data[:, order_idx], real_data[:, outcomeArray_idx]
assert len(all_cards) == len(all_order) == len(phases)
for i in range(len(phases)): #for each game
phase, order, cards, outcomeArray = phases[i], all_order[i], all_cards[i], all_outcomeArray[i]
outcomeArray_evaled, cards_iso, Amy_cards, Ben_cards = eval(outcomeArray), self.iso_map[cards], cards[2:2+self.num_cards_each], cards[4:4+self.num_cards_each]
responses, answer, outcome = self.agent_by_game(param, cards, Amy_cards, Ben_cards, order.split(","), outcomeArray_evaled)
for rnd in range(len(responses)):
outcomeSubarray, response, log = outcomeArray_evaled[rnd], responses[rnd], [None]*3
level, intake_prob, noise = param
for o in range(len(outcomeSubarray)):
log[o] = outcomeSubarray[o]
AmyResponse, BenResponse, corAnswer = log[order.split(",").index('Amy')], log[order.split(",").index('Ben')], log[order.split(",").index('You')]
# prepare output row to append to data
outputs = np.empty(len(colnamesOut), dtype=object)
outputs[phase_out], outputs[cards_out], outputs[order_out], outputs[outcomeArray_out] = phase, cards, order, outcomeArray
outputs[AmyResponse_out], outputs[BenResponse_out], outputs[corAnswer_out], outputs[
round_out] = AmyResponse, BenResponse, corAnswer, rnd+1
outputs[response_out], outputs[answer_out], outputs[subj_out], outputs[outcome_out] = response, answer, subNum, outcome
outputs[cards_iso_out], outputs[level_out], outputs[noise_out] = cards_iso, level, noise
outputs[intake_prob_out] = intake_prob
data = np.vstack((data, outputs))
return data[1:]
def LLH_by_round(self, intake_prob, state, players, announcements, graph):
'''
intake_prob: (float) between 0 and 1 capture chance of updating the model
state: 'AA88A8' (your cards, Amy's, Ben's)
players: (list of strings) ['Amy', 'Ben', 'You'] (note: not the game order. must match announcement order)
announcements: (1D np Array) [] if subject goes first, [False, True] if subject goes third etc
graph: (nx object) epistemic model
return: list of all possible model (graph) generated, list of respective log likehood (float)
'''
num_announcements = len(announcements)
models, LLHs = [], []
if num_announcements == 0:
models.append(graph)
LLHs.append(np.log(1))
else:
for branch in it.product(*[[0,1]]*num_announcements):
G, LLH = graph, 0
for i in range(len(branch)):
player, announcement, need_update = players[i], announcements[i], branch[i]
if need_update:
LLH += np.log(intake_prob)
else:
LLH += np.log(1 - intake_prob)
response = self.compute_my_response(G, state[2:4], state[4:6])
if response:
continue
if need_update: # if 1 means update accordingly
player, announcement = players[i], announcements[i]
G = self.update_model(G, announcement, self.player_number[player])
models.append(G)
LLHs.append(LLH)
return models, LLHs
def LLH_bayes_net(self, param, state, order, outcomeArray):
'''
param (list): [noise]
state (str), order (list): cards and order in the game
outcomeArray (list of bool): [[FALSE, FALSE, FALSE], [FALSE, TRUE]] represents the correct announcements players should produce
return: a bayes_net (nx object) encoding the log likelihood of each possible situation generated by this model
'''
level, intake_prob, noise = param[self.parameter_names.index('level')], min(param[self.parameter_names.index('intake_prob')], 1), min(param[self.parameter_names.index('noise')],1)
G, player_idx,bayes_net, max_round = self.generate_partial_model(state[2:4], state[4:6], level), order.index('You'), nx.DiGraph(), len(outcomeArray)
bayes_net.add_node((0,), model=G) # initialize start node
players_after, announcement_after_player, respond_false_prob = [], [], 1
for idx in range(max_round): # for every round
rnd, child_id = outcomeArray[idx], 0 # prepare for the input to the LLH_round
announcement_before_player, announcement_after_player = \
np.append(announcement_after_player, rnd[:player_idx]), rnd[player_idx:]
players_before, players_after = np.append(players_after, order[:player_idx]), order[player_idx:]
current_layer = [node_pair for node_pair in bayes_net.nodes(data=True) if node_pair[0][0] == idx and node_pair[1]['model']]
for node_pair in current_layer: # for all possible output model from the previous rnd except the special hand node
node, prev_model = node_pair[0], node_pair[1]['model']
prev_response = self.compute_my_response(prev_model, state[2:4], state[4:6])
if prev_response != 1 or node[0] == 0:
# if the parent is a model still with uncertainty unless it's the start node
# generate all possible updated the models from it
models, LLHs = self.LLH_by_round(intake_prob, state, players_before, announcement_before_player, prev_model)
assert len(models) == len(LLHs)
for hand in self.possible_hand:
bayes_net.add_node((idx + 1, hand), model=None) # add the answer node
for i in range(len(models)): # for all updated models
child_id += 1
model = models[i]
response = self.compute_my_response(model, state[2:4], state[4:6])
if response != -1:
respond_true_prob = response*1 + (1-response)*noise # chance of answer true
else:
respond_true_prob = 0.5
bayes_net.add_node((idx+1, child_id), model=model)
if prev_response != -1:
bayes_net.add_edge(node, (idx+1, child_id), weight=np.log(respond_false_prob) + LLHs[i])
else:
bayes_net.add_edge(node, (idx+1, child_id), weight=np.log(0.5) + LLHs[i])
for hand in self.possible_hand: # all hand nodes
bayes_net.add_edge((idx + 1, child_id), (idx + 1, hand),
weight=np.log(respond_true_prob/len(self.possible_hand)))
respond_false_prob = 1 - noise
return bayes_net
def LLH_by_game(self, param, state, order, outcomeArray):
'''
return: a dictionary of dictionary or float
If the key is int, it represents the rnd (1,2,or 3) where agent answers I know
Then its value is a dictionary
key (str): possible hand ('AA', 'A8', '88')
value (float): their log likelihood
If the key is '', it represents the agent answered I don't know till the end
value (float): log likelihood
'''
bayes_net = self.LLH_bayes_net(param, state, order, outcomeArray)
response_llh = {}
for rnd in range(len(outcomeArray)):
answer_chance = {}
for hand in self.possible_hand:
hand_llh = []
for path in nx.all_simple_paths(bayes_net, source=(0,), target=(rnd+1, hand)):
path_llh = 0 # initialize branch conditional probability
for parent in range(len(path)-1):
path_llh += bayes_net.get_edge_data(path[parent], path[parent+1])['weight']
hand_llh.append(path_llh)
answer_chance[hand] = logsumexp(hand_llh) # marginalize
response_llh[rnd+1] = answer_chance
LLH = []
for dic in response_llh.values():
for llh in dic.values():
LLH.append(llh)
response_llh[''] = np.log(1-min(1,np.exp(logsumexp(LLH)))) # sometimes get 1.000000002 overflow
return response_llh
def nLLH(self, continuous_param, Data, discrete_param):
'''
param: (list) Model parameters
Data: (numpy matrix) The actual subject data
return: (float) The negative log likelihood
'''
# get coloumn index for different task variables
phase_out = self.colnamesOut.index('phase')
round_out = self.colnamesOut.index('round')
answer_out = self.colnamesOut.index('answer')
cards_out = self.colnamesOut.index('cards')
order_out = self.colnamesOut.index('order')
outcomeArray_out = self.colnamesOut.index('outcomeArray')
# prep work
if np.isnan(sum(continuous_param)) or max(continuous_param) > 1: # a wierd bug that scipy minimize sometimes sample [nan nan] as parameter or have 1.00001 which exceeds bound
return np.inf
llh, games =0, np.unique(Data[:, phase_out]) # list of block numbers
for g in games: # loop through games
current_game = Data[Data[:, phase_out] == g]
cardss, orders = np.unique(current_game[:, cards_out]), np.unique(current_game[:, order_out])
rounds, answers = current_game[:, round_out], np.unique(current_game[:, answer_out])
outcomeArray = eval(current_game[:, outcomeArray_out][0])
assert len(cardss) == 1 and len(orders) == 1
num_round, answer, cards, order = len(rounds), answers[0], cardss[0], orders[0].split(",")
# continue
Amy_cards, Ben_cards = cards[2:4], cards[4:6]
LLH_look_up = self.LLH_by_game(list(discrete_param)+list(continuous_param), cards, order, outcomeArray)
if answer in self.possible_hand:
llh += LLH_look_up[num_round][answer]
else:
llh += LLH_look_up['']
return -llh
# stochastic update models
nSample = 200 # number of samples used to estimate likelihood
class SUWEB(task_parameters, imperfect_update_model):
'''
Model that assumes subjects hold a bounded model in mind and stochastically eliminate nodes when updating it
Parameter:
level (int): bounds the initial model
update_prob (float): the chance of successfully eliminating a node when the update requires an elimination
noise (float): chance of random guessing IF the model says she shouldn't know
Note: parameter_names, colnamesIn, colnamesOut are crucial in making sure the right value are retrieved for the right variable
Use them to guide indexing in the functions
'''
def __init__(self, data, nSample = nSample):
task_parameters.__init__(self, data)
imperfect_update_model.__init__(self)
self.name = 'SUWEB'
self.nSample = nSample
# parameters and their bounds
self.discrete_parameter = {'level':range(5)}
self.continuous_parameter = {'update_prob':[0,1], 'noise':[0,1]}
self.parameter_combination = list(it.product(*self.discrete_parameter.values()))
# all possible combinations of the discrete parameters
self.parameter_space = list(self.continuous_parameter.values())
# all ranges of continuous parameters
self.parameter_names = list(self.discrete_parameter.keys()) + list(
self.continuous_parameter.keys()) # note the convention that discrete precedes continuous params
# how to initialize continuous parameters
self.initialize_method = {'update_prob':'uniform', 'noise': 'uniform'}
def agent_by_round(self, state, players, announcements, graph, update_prob):
'''
state (string): 'AA88A8' (your cards, Amy's, Ben's)
players: (list of strings) ['Amy', 'Ben', 'You'] (note: not the game order. must match announcement order)
announcements: (1D np array) [] if subject goes first, [False, True] if subject goes third etc
graph: (nx object) epistemic model
intake_prob (float): one of the model parameters
return: response of the current turn (bool), updated model (graph)
'''
G = graph
for i in range(len(announcements)):
response = self.compute_my_response(G, state[2:4], state[4:6])
if response:
return response, G
player, announcement = players[i], announcements[i]
G = self.update_model(G, update_prob, announcement, self.player_number[player])
response = self.compute_my_response(G, state[2:4], state[4:6])
return response, G
def agent_by_game(self, param, state, Amy_cards, Ben_cards, order, outcomeArray):
'''
param (list): [level, update_prob, noise]
state (str), order (list of str): cards and order in the game
outcomeArray: (list of bool) [[FALSE, FALSE, FALSE], [FALSE, TRUE]] represents the correct announcements players should produce
return: list of response (bool) for the entire game, the selected hand (str), correct response or not (bool)
'''
assert Amy_cards == state[2:4] and Ben_cards == state[4:6]
level = param[self.parameter_names.index('level')]
noise, update_prob = param[self.parameter_names.index('noise')], param[self.parameter_names.index('update_prob')]
G, player_idx = self.generate_partial_model(Amy_cards, Ben_cards, level), order.index('You')
responses, answer = [], ''
players_after, announcement_after_player = [], []
outcome = True
for rnd in outcomeArray:
announcement_before_player, announcement_after_player = np.append(announcement_after_player, rnd[:player_idx]), rnd[player_idx:]
players_before, players_after = np.append(players_after, order[:player_idx]), order[player_idx:]
response, G = self.agent_by_round(state, players_before, announcement_before_player, G, update_prob)
if response and response != -1:
responses.append(response)
possibilities = []
for i in list(G.nodes):
if G.nodes[i]['state'][2:4] == Amy_cards and G.nodes[i]['state'][4:6] == Ben_cards:
possibilities.append(i)
assert len(possibilities) == 1
answer = G.nodes[possibilities[0]]['state'][:2]
outcome = outcome and rnd[order.index('You')] and answer == state[:2]
break # if know, the game ends
else:
if response == -1: # inconsistent and have to guess
if np.random.random_sample() < 0.5: # guess don't know
responses.append(False)
outcome = outcome and not rnd[order.index('You')]
else: # guess know and randomly choose one from s
responses.append(True)
answer = np.random.choice(self.possible_hand) # randomly choose one of three
outcome = outcome and rnd[order.index('You')] and answer == state[:2]
break
else: # don't know
if np.random.random_sample() < noise:
responses.append(True)
answer = np.random.choice(self.possible_hand) # random choose one of three
outcome = outcome and rnd[order.index('You')] and answer == state[:2]
break
else:
responses.append(response) # honestly say don't know
outcome = outcome and not rnd[order.index('You')]
return responses, answer, outcome
def agent(self, subNum, param):
'''
param: same as above
subNum (int): which subject data we are simulating
return: numpy data matrix
'''
colnamesOut = self.colnamesOut + self.parameter_names
data = np.ones(len(colnamesOut)) # initialize output data. note for small dataset (<50000 rows) numpy is more efficient
# get coloumn index for different task variables
phase_idx = self.colnamesIn.index('phase')
cards_idx = self.colnamesIn.index('cards')
order_idx = self.colnamesIn.index('order')
outcomeArray_idx = self.colnamesIn.index('outcomeArray')
# get coloumn index for output variables
phase_out = colnamesOut.index('phase')
cards_out = colnamesOut.index('cards')
order_out = colnamesOut.index('order')
outcomeArray_out = colnamesOut.index('outcomeArray')
AmyResponse_out = colnamesOut.index('AmyResponse')
BenResponse_out = colnamesOut.index('BenResponse')
corAnswer_out = colnamesOut.index('corAnswer')
round_out = colnamesOut.index('round')
response_out = colnamesOut.index('response')
answer_out = colnamesOut.index('answer')
subj_out = colnamesOut.index('subj')
outcome_out = colnamesOut.index('outcome')
cards_iso_out = colnamesOut.index('cards_iso')
level_out = colnamesOut.index('level')
update_prob_out = colnamesOut.index('update_prob')
noise_out = colnamesOut.index('noise')
# remove redundant rows due to rnd info
real_data = f.unique([tuple(row) for row in self.data[subNum - 1]])
# get task parameters
phases, all_cards, all_order, all_outcomeArray = real_data[:, phase_idx], real_data[:, cards_idx], real_data[:, order_idx], real_data[:, outcomeArray_idx]
assert len(all_cards) == len(all_order) == len(phases)
for i in range(len(phases)): # for each game
phase, order, cards, outcomeArray = phases[i], all_order[i], all_cards[i], all_outcomeArray[i]
outcomeArray_evaled, cards_iso, Amy_cards, Ben_cards = eval(outcomeArray), self.iso_map[cards], cards[2:2+self.num_cards_each], cards[4:4+self.num_cards_each]
responses, answer, outcome = self.agent_by_game(param, cards, Amy_cards, Ben_cards, order.split(","), outcomeArray_evaled)
for rnd in range(len(responses)):
outcomeSubarray, response, log = outcomeArray_evaled[rnd], responses[rnd], [None]*3
level, update_prob, noise = param
for o in range(len(outcomeSubarray)):
log[o] = outcomeSubarray[o]
AmyResponse, BenResponse, corAnswer = log[order.split(",").index('Amy')], log[order.split(",").index('Ben')], log[order.split(",").index('You')]
# prepare output row to append to data
outputs = np.empty(len(colnamesOut), dtype=object)
outputs[phase_out], outputs[cards_out], outputs[order_out], outputs[outcomeArray_out] = phase, cards, order, outcomeArray
outputs[AmyResponse_out], outputs[BenResponse_out], outputs[corAnswer_out], outputs[
round_out] = AmyResponse, BenResponse, corAnswer, rnd+1
outputs[response_out], outputs[answer_out], outputs[subj_out], outputs[outcome_out] = response, answer, subNum, outcome
outputs[cards_iso_out], outputs[level_out], outputs[noise_out] = cards_iso, level, noise
outputs[update_prob_out] = update_prob
data = np.vstack((data, outputs))
return data[1:]
def approx_game_likelihood(self, param, state, Amy_cards, Ben_cards, order, outcomeArray):
'''
return: a dictionary of dictionary or float
If the key is int, it represents the rnd (1,2,or 3) where agent answers I know
Then its value is a dictionary
key (str): possible hand ('AA', 'A8', '88')
value (float): their log likelihood
If the key is '', it represents the agent answered I don't know till the end
value (float): log likelihood
'''
results = []
for i in range(self.nSample):
res, ans, _ = self.agent_by_game(param, state, Amy_cards, Ben_cards, order, outcomeArray)
results.append((tuple(res), ans))
probs = {}
for r in results:
probs[r] = 0
for r in results:
probs[r] += 1
for k in probs.keys():
probs[k] = np.log(probs[k] / self.nSample)
s = logsumexp(list(probs.values()))
assert s < 0.01 and s > -0.01, s
max_round = len(outcomeArray) # if the game ends in two rounds, then impossible to get to the third round
look_up = {}
cards = self.possible_hand
convert = {1: (True,), 2: (False, True), 3: (False, False, True)}
for i in range(1, max_round+1):
look_up[i] = {}
for i in range(1, max_round+1):
for c in cards:
if (convert[i], c) in probs.keys():
look_up[i][c] = probs[(convert[i], c)]
else:
look_up[i][c] = np.log(0)
if ((False,) * max_round, '') in probs.keys():
look_up[''] = probs[((False,) * max_round, '')]
else:
look_up[''] = np.log(0)
return look_up
def nLLH(self, continuous_param, Data, discrete_param):
'''
param: (list) Model parameters
Data: (numpy matrix) The actual subject data
return: (float) The negative log likelihood
'''
# get coloumn index for different task variables
phase_out = self.colnamesOut.index('phase')
round_out = self.colnamesOut.index('round')
answer_out = self.colnamesOut.index('answer')
cards_out = self.colnamesOut.index('cards')
order_out = self.colnamesOut.index('order')
outcomeArray_out = self.colnamesOut.index('outcomeArray')
llh, games =0, np.unique(Data[:, phase_out]) # list of block numbers
for g in games: # loop through games
current_game = Data[Data[:, phase_out] == g]
cardss, orders = np.unique(current_game[:, cards_out]), np.unique(current_game[:, order_out])
rounds, answers = current_game[:, round_out], np.unique(current_game[:, answer_out])
outcomeArray = eval(current_game[:, outcomeArray_out][0])
assert len(cardss) == 1 and len(orders) == 1
num_round, answer, cards, order = len(rounds), answers[0], cardss[0], orders[0].split(",")
if np.isnan(sum(continuous_param)): # a wierd bug that scipy minimize sometimes sample [nan nan] as parameter
continue
Amy_cards, Ben_cards = cards[2:4], cards[4:6]
LLH_look_up = self.approx_game_likelihood(list(discrete_param)+list(continuous_param), cards, Amy_cards, Ben_cards, order, outcomeArray)
if answer in self.possible_hand:
llh += LLH_look_up[num_round][answer]
else:
llh += LLH_look_up['']
return -llh
class SUWNB(task_parameters, imperfect_update_model):
'''
Model that assumes subjects hold the full model in mind and stochastically eliminate nodes when updating it
Parameter:
update_prob (float): the chance of successfully eliminating a node when the update requires an elimination
noise (float): chance of random guessing IF the model says she shouldn't know
Note: parameter_names, colnamesIn, colnamesOut are crucial in making sure the right value are retrieved for the right variable
Use them to guide indexing in the functions
'''
def __init__(self, data, nSample = nSample):
task_parameters.__init__(self, data)
imperfect_update_model.__init__(self)
self.name = 'SUWNB'
self.nSample = nSample
# parameters and their bounds
self.discrete_parameter = {}
self.continuous_parameter = {'update_prob':[0,1], 'noise':[0,1]}
self.parameter_combination = list(it.product(*[self.discrete_parameter.values()]))
# all possible combinations of the discrete parameters
self.parameter_space = list(self.continuous_parameter.values())
# all ranges of continuous parameters
self.parameter_names = list(self.discrete_parameter.keys()) + list(
self.continuous_parameter.keys()) # note the convention that discrete precedes continuous params
# how to initialize continuous parameters
self.initialize_method = {'update_prob':'uniform', 'noise': 'uniform'}
def agent_by_round(self, state, players, announcements, graph, update_prob):
'''
state (string): 'AA88A8' (your cards, Amy's, Ben's)
players: (list of strings) ['Amy', 'Ben', 'You'] (note: not the game order. must match announcement order)
announcements: (1D np array) [] if subject goes first, [False, True] if subject goes third etc
graph: (nx object) epistemic model
intake_prob (float): one of the model parameters
return: response of the current turn (bool), updated model (graph)
'''
G = graph
for i in range(len(announcements)):
response = self.compute_my_response(G, state[2:4], state[4:6])
if response:
return response, G
player, announcement = players[i], announcements[i]
G = self.update_model(G, update_prob, announcement, self.player_number[player])
response = self.compute_my_response(G, state[2:4], state[4:6])
return response, G
def agent_by_game(self, param, state, Amy_cards, Ben_cards, order, outcomeArray):
'''
param (list): [update_prob, noise]
state (str), order (list of str): cards and order in the game
outcomeArray: (list of bool) [[FALSE, FALSE, FALSE], [FALSE, TRUE]] represents the correct announcements players should produce
return: list of response (bool) for the entire game, the selected hand (str), correct response or not (bool)
'''
assert Amy_cards == state[2:4] and Ben_cards == state[4:6]
noise, update_prob = param[self.parameter_names.index('noise')], param[self.parameter_names.index('update_prob')]
G, player_idx = self.generate_partial_model(Amy_cards, Ben_cards), order.index('You')
responses, answer = [], ''
players_after, announcement_after_player = [], []
outcome = True
for rnd in outcomeArray:
announcement_before_player, announcement_after_player = np.append(announcement_after_player, rnd[:player_idx]), rnd[player_idx:]
players_before, players_after = np.append(players_after, order[:player_idx]), order[player_idx:]
response, G = self.agent_by_round(state, players_before, announcement_before_player, G, update_prob)
if response and response != -1:
responses.append(response)
possibilities = []
for i in list(G.nodes):
if G.nodes[i]['state'][2:4] == Amy_cards and G.nodes[i]['state'][4:6] == Ben_cards:
possibilities.append(i)
assert len(possibilities) == 1
answer = G.nodes[possibilities[0]]['state'][:2]
outcome = outcome and rnd[order.index('You')] and answer == state[:2]
break # if know, the game ends
else:
if response == -1: # inconsistent and have to guess
if np.random.random_sample() < 0.5: # guess don't know
responses.append(False)
outcome = outcome and not rnd[order.index('You')]
else: # guess know and randomly choose one from candidates
responses.append(True)
answer = np.random.choice(self.possible_hand) # randomly choose one of three
outcome = outcome and rnd[order.index('You')] and answer == state[:2]
break
else: # don't know
if np.random.random_sample() < noise:
responses.append(True)
answer = np.random.choice(self.possible_hand) # random choose one of three
outcome = outcome and rnd[order.index('You')] and answer == state[:2]
break
else:
responses.append(response) # honestly say don't know
outcome = outcome and not rnd[order.index('You')]
return responses, answer, outcome
def agent(self, subNum, param):
'''
param: same as above
subNum (int): which subject data we are simulating
return: numpy data matrix
'''
colnamesOut = self.colnamesOut + self.parameter_names
data = np.ones(len(colnamesOut)) # initialize output data. note for small dataset (<50000 rows) numpy is more efficient
# get coloumn index for different task variables
phase_idx = self.colnamesIn.index('phase')
cards_idx = self.colnamesIn.index('cards')
order_idx = self.colnamesIn.index('order')
outcomeArray_idx = self.colnamesIn.index('outcomeArray')
# get coloumn index for output variables
phase_out = colnamesOut.index('phase')
cards_out = colnamesOut.index('cards')
order_out = colnamesOut.index('order')
outcomeArray_out = colnamesOut.index('outcomeArray')
AmyResponse_out = colnamesOut.index('AmyResponse')
BenResponse_out = colnamesOut.index('BenResponse')
corAnswer_out = colnamesOut.index('corAnswer')
round_out = colnamesOut.index('round')
response_out = colnamesOut.index('response')
answer_out = colnamesOut.index('answer')
subj_out = colnamesOut.index('subj')
outcome_out = colnamesOut.index('outcome')
cards_iso_out = colnamesOut.index('cards_iso')
update_prob_out = colnamesOut.index('update_prob')
noise_out = colnamesOut.index('noise')
# remove redundant rows due to rnd info
real_data = f.unique([tuple(row) for row in self.data[subNum - 1]])
# get task parameters
phases, all_cards, all_order, all_outcomeArray = real_data[:, phase_idx], real_data[:, cards_idx], real_data[:, order_idx], real_data[:, outcomeArray_idx]
assert len(all_cards) == len(all_order) == len(phases)
for i in range(len(phases)): #for each game
phase, order, cards, outcomeArray = phases[i], all_order[i], all_cards[i], all_outcomeArray[i]
outcomeArray_evaled, cards_iso, Amy_cards, Ben_cards = eval(outcomeArray), self.iso_map[cards], cards[2:2+self.num_cards_each], cards[4:4+self.num_cards_each]
responses, answer, outcome = self.agent_by_game(param, cards, Amy_cards, Ben_cards, order.split(","), outcomeArray_evaled)
for rnd in range(len(responses)):
outcomeSubarray, response, log = outcomeArray_evaled[rnd], responses[rnd], [None]*3
update_prob, noise = param
for o in range(len(outcomeSubarray)):
log[o] = outcomeSubarray[o]
AmyResponse, BenResponse, corAnswer = log[order.split(",").index('Amy')], log[order.split(",").index('Ben')], log[order.split(",").index('You')]
# prepare output row to append to data
outputs = np.empty(len(colnamesOut), dtype=object)
outputs[phase_out], outputs[cards_out], outputs[order_out], outputs[outcomeArray_out] = phase, cards, order, outcomeArray
outputs[AmyResponse_out], outputs[BenResponse_out], outputs[corAnswer_out], outputs[
round_out] = AmyResponse, BenResponse, corAnswer, rnd+1
outputs[response_out], outputs[answer_out], outputs[subj_out], outputs[outcome_out] = response, answer, subNum, outcome
outputs[cards_iso_out], outputs[noise_out] = cards_iso, noise
outputs[update_prob_out] = update_prob
data = np.vstack((data, outputs))
return data[1:]
def approx_game_likelihood(self, param, state, Amy_cards, Ben_cards, order, outcomeArray):
'''
return: a dictionary of dictionary or float
If the key is int, it represents the rnd (1,2,or 3) where agent answers I know
Then its value is a dictionary
key (str): possible hand ('AA', 'A8', '88')
value (float): their log likelihood
If the key is '', it represents the agent answered I don't know till the end
value (float): log likelihood
'''
results = []
for i in range(self.nSample):
res, ans, _ = self.agent_by_game(param, state, Amy_cards, Ben_cards, order, outcomeArray)
results.append((tuple(res), ans))
probs = {}
for r in results:
probs[r] = 0
for r in results:
probs[r] += 1
for k in probs.keys():
probs[k] = np.log(probs[k] / self.nSample)
s = logsumexp(list(probs.values()))
assert s < 0.01 and s > -0.01, s
max_round = len(outcomeArray) # if the game ends in two rounds, then impossible to get to the third round
look_up = {}
cards = self.possible_hand
convert = {1: (True,), 2: (False, True), 3: (False, False, True)}
for i in range(1, max_round+1):
look_up[i] = {}
for i in range(1, max_round+1):
for c in cards:
if (convert[i], c) in probs.keys():
look_up[i][c] = probs[(convert[i], c)]
else:
look_up[i][c] = np.log(0)
if ((False,) * max_round, '') in probs.keys():
look_up[''] = probs[((False,) * max_round, '')]
else:
look_up[''] = np.log(0)
return look_up
def nLLH(self, continuous_param, Data):
'''
param: (list) Model parameters
Data: (numpy matrix) The actual subject data
return: (float) The negative log likelihood
'''
# get coloumn index for different task variables
phase_out = self.colnamesOut.index('phase')
round_out = self.colnamesOut.index('round')
answer_out = self.colnamesOut.index('answer')
cards_out = self.colnamesOut.index('cards')
order_out = self.colnamesOut.index('order')
outcomeArray_out = self.colnamesOut.index('outcomeArray')
llh, games =0, np.unique(Data[:, phase_out]) # list of block numbers
for g in games: # loop through games
current_game = Data[Data[:, phase_out] == g]
cardss, orders = np.unique(current_game[:, cards_out]), np.unique(current_game[:, order_out])
rounds, answers = current_game[:, round_out], np.unique(current_game[:, answer_out])
outcomeArray = eval(current_game[:, outcomeArray_out][0])
assert len(cardss) == 1 and len(orders) == 1
num_round, answer, cards, order = len(rounds), answers[0], cardss[0], orders[0].split(",")
if np.isnan(sum(continuous_param)): # a wierd bug that scipy minimize sometimes sample [nan nan] as parameter
continue
Amy_cards, Ben_cards = cards[2:4], cards[4:6]
LLH_look_up = self.approx_game_likelihood(list(continuous_param), cards, Amy_cards, Ben_cards, order, outcomeArray)
if answer in self.possible_hand:
llh += LLH_look_up[num_round][answer]
else:
llh += LLH_look_up['']
return -llh
|
[
"hamhuang@sas.upenn.edu"
] |
hamhuang@sas.upenn.edu
|
f37d3bef9f5d2d1c9b7ed49e95a0cd213511193a
|
93a4023cc405b40cfc0be2e1b025ac7ed5b390fb
|
/benchmark/spark_benchmark.py
|
b310cec04a1f2563fc77a4367035b65c718375bd
|
[
"Apache-2.0"
] |
permissive
|
autumnli11/data_model-1
|
e4a84778ab8c6fa27231405011d839ff96948ffb
|
c71b8272cf71f19504e4fcfa04b382ce9e68853a
|
refs/heads/main
| 2023-05-30T20:47:01.989647
| 2021-06-06T21:40:19
| 2021-06-06T21:40:19
| 337,819,677
| 1
| 0
|
Apache-2.0
| 2021-02-10T18:43:12
| 2021-02-10T18:43:11
| null |
UTF-8
|
Python
| false
| false
| 7,059
|
py
|
import time
import copy
from resource import getrusage as resource_usage, RUSAGE_SELF
from time import time as timestamp
from pyspark.sql import SQLContext
from pyspark.context import SparkContext
from pyspark.sql.session import SparkSession
import pyspark
import os
from collections import defaultdict
import statistics
from pyspark import SparkConf
import sys
import time
from statistics import mean
merged_df_path = "../merged"
parquet_path = "../parquet"
#conf = SparkConf().set("spark.executor.memory","2g").set("spark.driver.memory","2g").setMaster("local")
#sc = SparkContext("local")
#sc = SparkContext(conf=conf)
#sqlContext = SQLContext(sc)
#spark = SparkSession.builder.master("local[1]").config("spark.driver.memory","2g").config("spark.executor.memory","2g").getOrCreate()
spark = SparkSession.builder.master("local[1]").config("spark.ui.port","4050").getOrCreate()
def unix_time(function, *args, **kwargs):
'''Return `real`, `sys` and `user` elapsed time, like UNIX's command `time`
You can calculate the amount of used CPU-time used by your
function/callable by summing `user` and `sys`. `real` is just like the wall
clock.
Note that `sys` and `user`'s resolutions are limited by the resolution on
the operating system's software clock (check `man 7 time` for more
details).
'''
start_time, start_resources = timestamp(), resource_usage(RUSAGE_SELF)
r = function(*args, **kwargs)
end_resources, end_time = resource_usage(RUSAGE_SELF), timestamp()
return {'return': r,
'real': end_time - start_time,
'sys': end_resources.ru_stime - start_resources.ru_stime,
'user': end_resources.ru_utime - start_resources.ru_utime}
def has_column(df, col):
try:
df[col]
return True
except pyspark.sql.utils.AnalysisException:
return False
#load the merged parquet into a df
def load():
df = spark.read.parquet(merged_df_path)
return df
def analytics(df, iter_num):
sys.stdout = open('/dev/null', 'w')
df.groupBy("`id.orig_h`").count().show(df.count(), False)
#over each individual parquet instead of the merged file
#for root, dirs, files in os.walk(parquet_path, topdown = False):
# for name in files:
# df = spark.read.parquet(os.path.join(root, name))
# if has_column(df, "`id.orig_h`"):
# df.select("`id.orig_h`").count()
def search(df, iter_num):
df2 = spark.sql("""SELECT *
FROM MERGED
WHERE `id.orig_h` = '10.128.0.19'
ORDER BY ts
LIMIT 5
""")
sys.stdout = open('/dev/null', 'w')
df2.show()
def discovery(df, iter_num):
for root, dirs, files in os.walk(parquet_path, topdown = False):
for name in files:
# print(name)
df = spark.read.parquet(os.path.join(root, name))
df.createOrReplaceTempView("Schema")
df2 = spark.sql("""SELECT COUNT(*) FROM Schema""")
sys.stdout = open('/dev/null', 'w')
df2.show()
def path(df, iter_num):
df2 = spark.sql("""SELECT * FROM MERGED
WHERE _path = 'smb*' or _path = 'dce_rpc' """)
sys.stdout = open('/dev/null', 'w')
df2.show()
def post(df, iter_num):
df2 = spark.sql("""SELECT ts, uid, id, method, uri, status_code
from MERGED
WHERE method = 'POST'
""")
sys.stdout = open('/dev/null', 'w')
df2.show()
def file_not_null(df, iter_num):
df2 = spark.sql("""SELECT _path, tx_hosts, rx_hosts, conn_uids, mime_type, filename, md5, sha1
from MERGED
WHERE filename IS NOT NULL
""")
sys.stdout = open('/dev/null', 'w')
df2.show()
def count_path(df, iter_num):
df2 = spark.sql("""SELECT _path, count(*)
from MERGED
GROUP BY _path
ORDER BY COUNT(*) DESC""")
sys.stdout = open('/dev/null', 'w')
df2.show()
def path_dns(df, iter_num):
df5 = spark.sql("""SELECT query, count(*)
from MERGED
WHERE _path = 'dns'
GROUP BY query
ORDER BY COUNT(*) DESC""")
sys.stdout = open('/dev/null', 'w')
df5.show()
def http_reqs(df, iter_num):
df2 = spark.sql("""SELECT DISTINCT `id.orig_h`, `id.resp_h`, `id.resp_p`, method, host, uri
FROM MERGED
WHERE _path = 'http'
""")
sys.stdout = open('/dev/null', 'w')
df2.show()
def path_conn(df, iter_num):
df2 = spark.sql("""SELECT DISTINCT `id.orig_h`, `id.resp_h`, `id.resp_p`
FROM MERGED
WHERE _path = 'conn'
ORDER BY `id.orig_h`, `id.resp_h`, `id.resp_p`
""")
sys.stdout = open('/dev/null', 'w')
df2.show()
def total_bytes(df, iter_num):
df2 = spark.sql("""SELECT orig_bytes + resp_bytes AS total_bytes, uid, orig_bytes, resp_bytes
FROM MERGED
WHERE _path = 'conn'
ORDER BY total_bytes DESC""")
sys.stdout = open('/dev/null', 'w')
df2.show()
def benchmark(fn, num_iter=100):
_real = list()
for i in range(num_iter):
df = load()
if fn != analytics:
df = df.createOrReplaceTempView("MERGED")
origin = sys.stdout
t = unix_time(fn, df=df, iter_num=i)
sys.stdout = origin
if i >= 10:
_real.append(t["real"])
#if fn == search and i == 0:
#df = t["return"]
if i == num_iter - 1:
print("{}".format(fn.__name__))
print("{}".format(_real))
print("{}".format(mean(_real)))
def main():
# TBD use sys.argv[0] for num_iter or dataset
print("loading..")
#df = unix_time(load)["return"]
#df = load()
print("name,real")
print("------------------")
print("Analytics query")
print("count total number of records with each distinct source IP")
benchmark(analytics, num_iter=100)
print("Data discovery query")
print("count the number of records with each different schema")
#benchmark(discovery, num_iter=100)
print("Windows Networking Activity")
#benchmark(path, num_iter=100)
print("HTTP Post Requests")
#benchmark(post, num_iter=100)
print("Activity overview")
#benchmark(count_path, num_iter=100)
print("File Activity")
#benchmark(file_not_null, num_iter=100)
print("Unique DNS queries")
#benchmark(path_dns, num_iter=100)
print("HTTP Requests")
#benchmark(http_reqs, num_iter=100)
print("Unique Network Connections")
#benchmark(path_conn, num_iter=100)
print("Connection Received Data")
#benchmark(total_bytes, num_iter=100)
print("Search query")
print("find all records with IP 10.128.19, sort by timestamp and return top 5")
#df.createOrReplaceTempView("MERGED")
#benchmark(search, num_iter=100)
if __name__ == '__main__':
main()
|
[
"qiushiautumnli@gmail.com"
] |
qiushiautumnli@gmail.com
|
ea22e83cfc6232863a3aae1c2e2bd11e9e5be067
|
450afb9a0f57a24ce757f4c516b7966d4348d76d
|
/simple_port_scan/send_mail.py
|
50b346332dde715e9781ba13207d062f709591fe
|
[] |
no_license
|
guytet/python
|
ea23566d869f8a12c235e2eba798428f2454e63e
|
46e07812b72ff263af63d28c413b24fef82d7281
|
refs/heads/master
| 2023-01-21T06:26:38.690185
| 2020-12-02T22:36:47
| 2020-12-02T22:36:47
| 300,088,190
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 452
|
py
|
#!/usr/bin/python3
import smtplib
sender = 'result@example.com'
receivers = ['user@example.com']
message = """From: result_message <result@example.com>
To: <admin@example.com>
Subject: SMTP e-mail test
This is a test e-mail message.
"""
try:
smtpObj = smtplib.SMTP('mail.example.com', 25)
smtpObj.sendmail(sender, receivers, message)
print("Successfully sent email")
except SMTPException:
print("Error: unable to send email")
|
[
"none@none.com"
] |
none@none.com
|
87e3f472aaa03d95be771c8bcd0f93978095e714
|
eb9d19d210c29109bced619fd662eebd185c5047
|
/server.py
|
05a98dc2a833a63dd4286097d6c9d852a0f0a558
|
[] |
no_license
|
CTMObservatory/socketprog
|
4f9caae5d12834476cf113dae2de5a0a0246e494
|
1877e599359b7df6de761f3844643ab2e5df1e78
|
refs/heads/master
| 2021-06-22T14:30:47.951599
| 2020-11-28T00:00:10
| 2020-11-28T00:00:10
| 142,206,422
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 469
|
py
|
# Echo server program
import socket
HOST = '' # Symbolic name meaning all available interfaces
PORT = 50007 # Arbitrary non-privileged port
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((HOST, PORT))
s.listen(1)
conn, addr = s.accept()
with conn:
print('Connected by', addr)
while True:
data = conn.recv(1024)
if not data: break
conn.sendall(data)
|
[
"martinberoiz@gmail.com"
] |
martinberoiz@gmail.com
|
62e304b4c5c2a8c545f88a22d1219fb47bb47029
|
be3bc396b580975970a7f323b91229ed5d4aad1c
|
/dft_workflow/job_analysis/prepare_oer_sets/write_oer_sets.py
|
33dd033658982cd7b408c1f1a2f7e2cfa1241277
|
[
"MIT"
] |
permissive
|
raulf2012/PROJ_IrOx_OER
|
813ee91139b45f47acb980d1ebfacdf87c364996
|
b79fc490f598a48e405819bd6a788ca6d4af440e
|
refs/heads/master
| 2023-06-23T22:48:25.695679
| 2023-06-09T22:34:41
| 2023-06-09T22:34:41
| 269,264,743
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,274
|
py
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.4.2
# kernelspec:
# display_name: Python [conda env:PROJ_irox_oer] *
# language: python
# name: conda-env-PROJ_irox_oer-py
# ---
# # Writing OER sets to file for
# ---
# ### Import Modules
# +
import os
print(os.getcwd())
import sys
import time; ti = time.time()
import json
import pandas as pd
import numpy as np
# #########################################################
from methods import (
get_df_features_targets,
get_df_jobs,
get_df_jobs_paths,
get_df_atoms_sorted_ind,
)
from methods import create_name_str_from_tup
from methods import get_df_jobs_paths, get_df_jobs_data
# #########################################################
from local_methods import write_other_jobs_in_set
# -
from methods import isnotebook
isnotebook_i = isnotebook()
if isnotebook_i:
from tqdm.notebook import tqdm
verbose = True
else:
from tqdm import tqdm
verbose = False
# ### Read Data
# +
df_jobs = get_df_jobs()
df_jobs_paths = get_df_jobs_paths()
df_features_targets = get_df_features_targets()
df_atoms = get_df_atoms_sorted_ind()
df_jobs_paths = get_df_jobs_paths()
df_jobs_data = get_df_jobs_data()
# -
df_atoms = df_atoms.set_index("job_id")
# + active=""
#
#
#
# -
# ### Main loop | writing OER sets
# +
# # TEMP
# name_i = ('slac', 'wufulafe_03', 58.0)
# df_features_targets = df_features_targets.loc[[name_i]]
# +
# # TEMP
# print(111 * "TEMP | ")
# indices = [
# # ('slac', 'relovalu_12', 24.0),
# ('sherlock', 'sifebelo_94', 61.0),
# # ('sherlock', 'sifebelo_94', 62.0),
# ]
# df_features_targets = df_features_targets.loc[indices]
# +
# for name_i, row_i in df_features_targets.iterrows():
iterator = tqdm(df_features_targets.index, desc="1st loop")
for i_cnt, index_i in enumerate(iterator):
row_i = df_features_targets.loc[index_i]
# if verbose:
# print(name_i)
# #####################################################
job_id_o_i = row_i.data.job_id_o.iloc[0]
job_id_bare_i = row_i.data.job_id_bare.iloc[0]
job_id_oh_i = row_i.data.job_id_oh.iloc[0]
# #####################################################
if job_id_bare_i is None:
continue
oh_exists = False
if job_id_oh_i is not None:
oh_exists = True
# #####################################################
df_atoms__o = df_atoms.loc[job_id_o_i]
df_atoms__bare = df_atoms.loc[job_id_bare_i]
# #####################################################
atoms__o = df_atoms__o.atoms_sorted_good
atoms__bare = df_atoms__bare.atoms_sorted_good
if oh_exists:
df_atoms__oh = df_atoms.loc[job_id_oh_i]
atoms__oh = df_atoms__oh.atoms_sorted_good
# #########################################################
# #########################################################
# dir_name = create_name_str_from_tup(name_i)
dir_name = create_name_str_from_tup(index_i)
dir_path = os.path.join(
os.environ["PROJ_irox_oer"],
"dft_workflow/job_analysis/prepare_oer_sets",
"out_data/oer_group_files",
dir_name)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
# #####################################################
atoms__o.write(
os.path.join(dir_path, "atoms__o.traj"))
atoms__o.write(
os.path.join(dir_path, "atoms__o.cif"))
atoms__bare.write(
os.path.join(dir_path, "atoms__bare.traj"))
atoms__bare.write(
os.path.join(dir_path, "atoms__bare.cif"))
if oh_exists:
atoms__oh.write(
os.path.join(dir_path, "atoms__oh.traj"))
atoms__oh.write(
os.path.join(dir_path, "atoms__oh.cif"))
# #####################################################
data_dict_to_write = dict(
job_id_o=job_id_o_i,
job_id_bare=job_id_bare_i,
job_id_oh=job_id_oh_i,
)
data_path = os.path.join(dir_path, "data.json")
with open(data_path, "w") as outfile:
json.dump(data_dict_to_write, outfile, indent=2)
# #####################################################
# Write other jobs in OER set
write_other_jobs_in_set(
job_id_bare_i,
dir_path=dir_path,
df_jobs=df_jobs, df_atoms=df_atoms,
df_jobs_paths=df_jobs_paths,
df_jobs_data=df_jobs_data,
)
# -
import ase
ase.__version__
atoms__o
assert False
# + active=""
#
#
# -
# # Writing top systems to file ROUGH TEMP
# +
# TOP SYSTEMS
if False:
# if True:
df_features_targets = df_features_targets.loc[
[
("slac", "tefovuto_94", 16.0),
# slac__nifupidu_92__032
# sherlock__bihetofu_24__036
('slac', 'hobukuno_29', 16.0),
('sherlock', 'ramufalu_44', 56.0),
('slac', 'nifupidu_92', 32.0),
('sherlock', 'bihetofu_24', 36.0),
('slac', 'dotivela_46', 32.0),
('slac', 'vovumota_03', 33.0),
('slac', 'ralutiwa_59', 32.0),
('sherlock', 'bebodira_65', 16.0),
('sherlock', 'soregawu_05', 62.0),
('slac', 'hivovaru_77', 26.0),
('sherlock', 'vegarebo_06', 50.0),
('slac', 'ralutiwa_59', 30.0),
('sherlock', 'kamevuse_75', 49.0),
('nersc', 'hesegula_40', 94.0),
('slac', 'fewirefe_11', 39.0),
('sherlock', 'vipikema_98', 60.0),
('slac', 'gulipita_22', 48.0),
('sherlock', 'rofetaso_24', 48.0),
('slac', 'runopeno_56', 32.0),
('slac', 'magiwuni_58', 26.0),
]
]
for name_i, row_i in df_features_targets.iterrows():
# #####################################################
job_id_o_i = row_i.data.job_id_o.iloc[0]
job_id_bare_i = row_i.data.job_id_bare.iloc[0]
job_id_oh_i = row_i.data.job_id_oh.iloc[0]
# #####################################################
oh_exists = False
if job_id_oh_i is not None:
oh_exists = True
# #####################################################
df_atoms__o = df_atoms.loc[job_id_o_i]
df_atoms__bare = df_atoms.loc[job_id_bare_i]
# #####################################################
atoms__o = df_atoms__o.atoms_sorted_good
atoms__bare = df_atoms__bare.atoms_sorted_good
if oh_exists:
df_atoms__oh = df_atoms.loc[job_id_oh_i]
atoms__oh = df_atoms__oh.atoms_sorted_good
# #########################################################
# #########################################################
dir_name = create_name_str_from_tup(name_i)
dir_path = os.path.join(
os.environ["PROJ_irox_oer"],
"dft_workflow/job_analysis/prepare_oer_sets",
"out_data/top_overpot_sys")
# dir_name)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
# atoms__o.write(
# os.path.join(dir_path, dir_name + "_o.cif"))
# atoms__bare.write(
# os.path.join(dir_path, dir_name + "_bare.cif"))
if oh_exists:
atoms__oh.write(
os.path.join(dir_path, dir_name + "_oh.cif"))
# -
# # MISC | Writing random cifs to file to open in VESTA
# +
df_subset = df_features_targets.sample(n=6)
if False:
for name_i, row_i in df_subset.iterrows():
tmp = 42
job_id_oh_i = row_i[("data", "job_id_oh", "", )]
# # #####################################################
# job_id_o_i = row_i.data.job_id_o.iloc[0]
# job_id_bare_i = row_i.data.job_id_bare.iloc[0]
# job_id_oh_i = row_i.data.job_id_oh.iloc[0]
# # #####################################################
# if job_id_bare_i is None:
# continue
oh_exists = False
if job_id_oh_i is not None:
oh_exists = True
# # #####################################################
# df_atoms__o = df_atoms.loc[job_id_o_i]
# df_atoms__bare = df_atoms.loc[job_id_bare_i]
# # #####################################################
# atoms__o = df_atoms__o.atoms_sorted_good
# atoms__bare = df_atoms__bare.atoms_sorted_good
if oh_exists:
df_atoms__oh = df_atoms.loc[job_id_oh_i]
atoms__oh = df_atoms__oh.atoms_sorted_good
# #########################################################
# #########################################################
file_name_i = create_name_str_from_tup(name_i)
print(file_name_i)
dir_path = os.path.join(
os.environ["PROJ_irox_oer"],
"dft_workflow/job_analysis/prepare_oer_sets",
"out_data/misc_cif_files_oh")
# dir_name)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
# #####################################################
# atoms__o.write(
# os.path.join(dir_path, "atoms__o.traj"))
# atoms__o.write(
# os.path.join(dir_path, "atoms__o.cif"))
# atoms__bare.write(
# os.path.join(dir_path, "atoms__bare.traj"))
# atoms__bare.write(
# os.path.join(dir_path, "atoms__bare.cif"))
if oh_exists:
atoms__oh.write(
os.path.join(dir_path, file_name_i + ".cif"))
# os.path.join(dir_path, "atoms__oh.traj"))
# atoms__oh.write(
# os.path.join(dir_path, "atoms__oh.cif"))
# -
# #########################################################
print(20 * "# # ")
print("All done!")
print("Run time:", np.round((time.time() - ti) / 60, 3), "min")
print("write_oer_sets.ipynb")
print(20 * "# # ")
# #########################################################
# + active=""
#
#
#
# + jupyter={"source_hidden": true}
# import os
# print(os.getcwd())
# import sys
# import pickle
# pd.set_option('display.max_columns', None)
# # pd.set_option('display.max_rows', None)
|
[
"raulf2012@gmail.com"
] |
raulf2012@gmail.com
|
81652a7659ac61dfcb918c5f892b8dffd49bb4ba
|
313e77e2cad3e2f50f4fa625893bd6c5ff1145a1
|
/test_network.py
|
228c54581264c53562f9eedff4175b1c47d6463a
|
[] |
no_license
|
saima-h18/Communication-Network-Designer
|
09de79bd692e5ecea76baaad7a6f49a0e1ab3c07
|
7f998581870e11dc0750a198983d162f0bf2e39d
|
refs/heads/master
| 2022-08-03T17:52:14.720256
| 2020-05-28T17:55:14
| 2020-05-28T17:55:14
| 255,208,235
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 690
|
py
|
def prodofList(arr):
# multiplies values in an array - useful for reliability
prod = 1
for x in arr:
prod *= x
return prod
rel_ring = [1-pow(1-0.97,2), 1-pow(1-0.96,3),1-pow(1-0.94,3), 1-pow(1-0.91,3), 1-pow(1-0.93,3), 1-pow(1-0.91,3)]
# rel_ring = [0.99,0.93,0.96,0.96,0.94]
RelofLoop =prodofList(rel_ring)
# add probabilities where one and only one edge fails
for indx, r in enumerate(rel_ring):
copy = rel_ring.copy()
failure = 1 - r
copy.pop(indx)
tmp = prodofList(copy)
product = failure * tmp
RelofLoop += product
# print(RelofLoop*0.9984)
print(RelofLoop)
# 0.9999965812408375
#0.9983958167067905
#0.945275118336
#0.9982508265745879
|
[
"yubei.xiong@mail.mcgill.ca"
] |
yubei.xiong@mail.mcgill.ca
|
f99f8b5423d85d36619ec720ba43ddadd424f923
|
b1185625f5e3124fafd6c59858a1913811de2f98
|
/communication_proj/communication_proj/wsgi.py
|
fa82aa9c19adf046f397c318322578917f1d03b0
|
[] |
no_license
|
afeefebrahim/mentor_question_api
|
1715cf46b7f7db4c9d1e2afa61eba0db5eec40fb
|
9e53db7f92186a81dfe020716bdf714a1c4f15cc
|
refs/heads/main
| 2023-03-02T05:48:18.341945
| 2021-02-07T10:46:27
| 2021-02-07T10:46:27
| 334,694,003
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 413
|
py
|
"""
WSGI config for communication_proj project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'communication_proj.settings')
application = get_wsgi_application()
|
[
"afeefebrahim@gmail.com"
] |
afeefebrahim@gmail.com
|
8a1fdc176ceeabc8bbcda6039924735abbe05278
|
881041fab1b4d05f1c5371efed2f9276037eb609
|
/tasks/nyc-clean-heat-dataset/depositor.py
|
9ccda5f3ab4625576905b380703235e9d1582d78
|
[] |
no_license
|
ResidentMario/urban-physiology-nyc-catalog
|
b568f3b6ee1a887a50c4df23c488f50c92e30625
|
cefbc799f898f6cdf24d0a0ef6c9cd13c76fb05c
|
refs/heads/master
| 2021-01-02T22:43:09.073952
| 2017-08-06T18:27:22
| 2017-08-06T18:27:22
| 99,377,500
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 354
|
py
|
import requests
r = requests.get("https://data.cityofnewyork.us/api/views/8isn-pgv3/rows.csv?accessType=DOWNLOAD")
with open("/home/alex/Desktop/urban-physiology-nyc-catalog/catalog/nyc-clean-heat-dataset/data.csv", "wb") as f:
f.write(r.content)
outputs = ["/home/alex/Desktop/urban-physiology-nyc-catalog/catalog/nyc-clean-heat-dataset/data.csv"]
|
[
"aleksey.bilogur@gmail.com"
] |
aleksey.bilogur@gmail.com
|
2f378ba27c80a7f7edbd143cb0147c840416c445
|
03595a5d109e0f7d986c295a9b8041070566eec7
|
/ch6/invest.py
|
3712053accde8a8bf4bba424cce7cf6fbf6905df
|
[
"MIT"
] |
permissive
|
cjadeveloper/real-python-course-bundle
|
d46e0736d8af20872c1feadabe2e9b2abc424d57
|
ce73f48855884f1b9dfc12fee4c9ae89e3f8152f
|
refs/heads/main
| 2023-01-11T12:29:50.070390
| 2020-11-15T18:33:30
| 2020-11-15T18:33:30
| 311,988,975
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 628
|
py
|
# 6.5 Challenge: Track Your Investments
def invest(amount: float, rate: float, years: int) -> None:
"""
Tracks the growing amount of an investment over time.
An initial deposit, called the principal amount, is made. Each year,
the amount increases by a fixed percentage, called the annual rate of return.
>>> invest(100, .05, 4)
year 1: $105.00
year 2: $110.25
year 3: $115.76
year 4: $121.55
"""
for year in range(1, years + 1):
amount += amount * rate
print(f"year {year}: ${amount:.2f}")
if __name__ == "__main__":
invest(100, 0.05, 4)
|
[
"cjadeveloper@gmail.com"
] |
cjadeveloper@gmail.com
|
015875c929cb49a5aa91a776ede8e35d232db0b7
|
0b532a85b4c31de38ae5e371619653c75a762e3e
|
/src/homepage/migrations/0075_auto_20181207_2109.py
|
70ccf40c80087f42cb13b0240abd32c490ea561f
|
[] |
no_license
|
pirate765/howdyfinal
|
4fd13044d62d05de70ceecb3c6dacbff537ad96f
|
0860bdb05fe10e04d77bcf477b022c4ecb92d1d8
|
refs/heads/master
| 2020-04-21T07:03:24.952788
| 2019-02-23T18:51:20
| 2019-02-23T18:51:20
| 169,382,630
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,628
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-12-07 21:09
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import tinymce.models
class Migration(migrations.Migration):
dependencies = [
('homepage', '0074_auto_20181207_1715'),
]
operations = [
migrations.CreateModel(
name='UpcomingTripItinerary',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('day_detail', tinymce.models.HTMLField()),
('upcoming_trip_package', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='upcoming_trip_itinerary', to='homepage.UpcomingTrip')),
],
),
migrations.AlterField(
model_name='addon',
name='destination_package',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='addons', to='homepage.Destinationpackage'),
),
migrations.AlterField(
model_name='destinationimage',
name='destination',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='destinationimages', to='homepage.Destinationpackage'),
),
migrations.AlterField(
model_name='grouppackageitenerary',
name='destination_package',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='destinationitinerary', to='homepage.Destinationpackage'),
),
]
|
[
"tusharblogger12@gmail.com"
] |
tusharblogger12@gmail.com
|
5b8987f9171654c2c99e935901b1480eb604ff3c
|
0384edeb54c45a4542de8787a4761c7f89d91d9e
|
/step-two.py
|
13820ee14fe4d1a931bb5c6a0234f7030263f8cc
|
[] |
no_license
|
seantstacey/SODA
|
3d1a3dcfb3810b520b85b6cc6ea1200b4d44abec
|
fff14e9680414f0fd346a49aea06ad6a65ef289c
|
refs/heads/main
| 2023-01-09T09:51:07.292704
| 2020-11-05T14:32:00
| 2020-11-05T14:32:00
| 303,754,096
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 354
|
py
|
import cx_Oracle
import myConnectInfo
connection = cx_Oracle.connect(myConnectInfo.usrnm, myConnectInfo.psswd, myConnectInfo.dsn)
print("Database version:", connection.version)
results = connection.cursor()
results.execute('select * from dept')
print('\nDEPT table contents:')
for row in results:
print (row)
results.close()
connection.close()
|
[
"noreply@github.com"
] |
seantstacey.noreply@github.com
|
4e8dcedcb2657e3ee8afd64d9d8926a0c5640eb4
|
f00331853234b558dd007c5b1e047fe727a810d6
|
/forms/optical_bench_form1.py
|
22682d080671b0ded3b3e027a1e04e6feb833e86
|
[] |
no_license
|
dl495/ss2165.github.io
|
6154a8590a782936c64e0e598def852baa59a827
|
5f0de710921a0108583cbd0f84e268b1c26ab63a
|
refs/heads/master
| 2020-12-24T09:53:11.740095
| 2015-08-25T13:20:44
| 2015-08-25T13:20:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,872
|
py
|
from anvil import *
import physics
import draw
import math
from slits import slits
from single import single
from grating import grating
class Form1(Form1Template):
R = 6
W = 0.3
x_stp = 0.0005
line_width = 0.001
def canvas_mouse_move (self, x, y, **event_args):
# This method is called when the mouse cursor moves over this component
#record mouse pos
# self.mouse.x = x/(self.xu*1.0)
# self.mouse.y = (self.ch-y)/(self.xu*1.0)
pass
def canvas_mouse_up (self, x, y, button, **event_args):
# This method is called when a mouse button is released on this component
# self.mouse.x = x/(self.xu*1.0)
# self.mouse.y = (self.ch-y)/(self.xu*1.0)
pass
def canvas_mouse_down (self, x, y, button, **event_args):
# This method is called when a mouse button is pressed on this component
# self.mouse.x = x/(self.xu*1.0)
# self.mouse.y = (self.ch-y)/(self.xu*1.0)
pass
def btn_run_click (self, **event_args):
# This method is called when the button is clicked
#if not self.running:
# self.running = True
# self.reset = False
# self.btn_run.text = "Pause"
#else:
# self.running = False
# self.btn_run.text = "Run"
pass
def btn_reset_click (self, **event_args):
# This method is called when the button is clicked
#self.running = False
self.reset = True
def change(self, **event_args):
self.wav = self.wav_slider.value*1e-9
self.wav_slider.draw()
self.draw_all()
def draw_all(self):
draw.reset2(self.canvas, self.xu)
draw.clear_canvas(self.canvas, "#fff")
N = int(self.slits.N_slider.value)
d = float(self.slits.d_slider.value)
a = float(self.single.a_slider.value)
n = float(self.grating.n_slider.value)
if self.aperture == "slits":
if self.rad_int.selected:
self.draw_slit_int(N,d,a, self.wav)
elif self.rad_pat.selected:
self.draw_slit_pat(N,d,a, self.wav)
elif self.aperture == "single":
if self.rad_int.selected:
self.draw_slit_int(N,d,a, self.wav, "single")
elif self.rad_pat.selected:
self.draw_slit_pat(N,d,a, self.wav, "single")
elif self.aperture == "grating":
self.draw_grating(n, self.wav)
def timer_tick (self, **event_args):
canvas = self.canvas
self.cw = canvas.get_width()
self.ch = canvas.get_height()
cw = self.cw
ch = self.ch
dt = self.dt
if self.first:
self.xu = float(self.cw)/self.W
self.wav_slider = draw.slider(self.can_slid, mini= 400, maxi = 700, stepsize = 1, start=510)
self.wav_slider.maxmin = True
self.wav = self.wav_slider.value*1e-9
self.wav_slider.draw()
self.draw_all()
self.first = False
self.change()
def draw_grating(self, n, wav):
canvas = self.canvas
draw.reset2(canvas, 1)
canvas.translate(float(self.cw)/2, 0)
col = draw.wavelength_to_rgb(self.wav*1e9)
canvas.line_width = 5
step = self.wav*n*self.R
x = 0
canvas.begin_path()
while x*self.xu<float(self.cw)/2:
xt = x*self.xu
canvas.move_to(xt,0)
canvas.line_to(xt, float(self.ch))
canvas.move_to(-xt,0)
canvas.line_to(-xt, float(self.ch))
x+=step
canvas.stroke_style = "rgb({0}, {1}, {2})".format(col[0],col[1],col[2])
canvas.stroke()
draw.reset2(canvas, self.xu)
def draw_slit_pat(self, N,d,a, wav, choice = "slits"):
canvas = self.canvas
x_stp = self.x_stp
canvas.scale(1/self.xu, 1/self.xu)
col = draw.wavelength_to_rgb(self.wav*1e9)
canvas.line_width = 2
for i in range(0,int(self.cw),2):
canvas.begin_path()
x = i/self.xu - float(self.W)/2
ang = math.asin(x/self.R)
if choice == "slits":
I = self.slit_int(N,d,wav,ang)
ma = self.slit_int(N,d,wav,0)
else:
I = self.single_int(a,wav,ang)
ma = 1
x +=float(self.W)/2
canvas.move_to(x*self.xu+1,0)
canvas.line_to(x*self.xu+1, float(self.ch))
canvas.stroke_style = "rgba({0}, {1}, {2}, {3})".format(col[0],col[1],col[2],math.sqrt(float(I/ma)))
canvas.stroke()
canvas.scale(self.xu, self.xu)
def draw_slit_int(self, N, d, a, wav, choice = "slits"):
canvas = self.canvas
x_stp = self.x_stp
x = []
fx = []
for i in range(int(float(self.W)/x_stp)):
x.append(i*x_stp- float(self.W)/2)
ang = math.asin((x[i] )/self.R)
if choice =="slits":
fx.append(self.slit_int(N,d,wav,ang))
else:
fx.append(self.single_int(a,wav,ang))
graph = draw.graph_plot(canvas,zip(x,fx))
graph.yrange[0] = 0
graph.xlabel = "x/m"
graph.ylabel = "I"
graph.axes_enabled = False
graph.plot()
draw.reset2(canvas, self.xu)
def slit_int(self, N, d, wav, theta):
dl = 2*math.pi*math.sin(theta)*d/wav
if dl == 0:
dl += self.x_stp
y = math.sin(0.5*N*dl)/math.sin(0.5*dl)
return y**2
def single_int(self,a, wav, theta):
dl = math.pi*math.sin(theta)*a/wav
if dl == 0:
dl += self.x_stp
y = math.sin(dl)/dl
return y**2
def btn_single_click(self, **event_args):
self.grid_opt.clear()
self.single = single()
self.grid_opt.add_component(self.single)
self.aperture = "single"
self.rad_int.enabled = True
self.change()
#self.slits.a_slider.draw()
def btn_grating_click(self, **event_args):
self.grid_opt.clear()
self.grating = grating()
self.grid_opt.add_component(self.grating)
self.aperture = "grating"
self.rad_int.enabled = False
self.rad_pat.selected = True
self.change()
#self.slits.n_slider.draw()
def btn_slits_click(self, **event_args):
self.grid_opt.clear()
self.grid_opt.add_component(self.slits)
self.aperture = "slits"
self.rad_int.enabled = True
self.change()
self.slits.N_slider.draw()
self.slits.d_slider.draw()
def __init__(self):
# This sets up a variable for every component on this form.
# For example, if we've drawn a button called "send_button", we can
# refer to it as self.send_button:
self.init_components()
#self.mouse = physics.vector3(0,0)
self.slits = slits()
# self.slits.txt_N.set_event_handler("pressed_enter", self.change)
# self.slits.txt_d.set_event_handler("pressed_enter", self.change)
self.grid_opt.add_component(self.slits)
self.aperture = "slits"
self.grating = grating()
self.single = single()
#self.single.txt_a.set_event_handler("pressed_enter", self.change)
#self.grating.txt_n.set_event_handler("pressed_enter", self.change)
# Any code you write here will run when the form opens.
#Uncomment as required.
#self.running= False
self.reset = True
self.dt = self.timer.interval
self.first = True
#self.t = 0
#SET SCALE (pixels per m, or unit used in code)
self.xu = 1
self.ang_range = 2*math.asin(float(self.W)/(2*self.R))
#APPEND ALL PARAMETER BOXES
#self.param_boxes= []
|
[
"ss2165@cam.ac.uk"
] |
ss2165@cam.ac.uk
|
2abb05fd7b0c94ae2d1719952139711abc569be0
|
d60f90decccc98bf70f8b90ff6d661a8c753ef37
|
/Les2/2_2.py
|
575a3ddb08f54bd4a94498b220033a67bcdf18e1
|
[] |
no_license
|
stephaniepham/programming
|
a9d3c2f1d9a29174732c353e2a597bb6c8e681e8
|
fa94630f035f4ce8e62ec6c92a4df0b4be065e5a
|
refs/heads/master
| 2021-05-05T12:52:25.197190
| 2017-09-25T12:39:52
| 2017-09-25T12:39:52
| 104,776,437
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 333
|
py
|
cijferICOR=8
cijferPROG=6
cijferCSN=7
gemiddelde=((cijferICOR+cijferPROG+cijferCSN)/3)
print(gemiddelde)
beloning=(cijferICOR*30)+(cijferPROG*30)+(cijferCSN*30)
print(beloning)
overzicht= 'Mijn cijfers gemiddeld een' + ' ' +str(gemiddelde) + ' ' + 'leveren een beloning van'+ ' ' +str(beloning)+ ' ' + 'euro op!'
print(overzicht)
|
[
"stephanie.pham@student.hu.nl"
] |
stephanie.pham@student.hu.nl
|
8da59d7215b68fa429ef7860353575c6a60196f2
|
a709f24e86e01317ecc8402ca87085e1d3505257
|
/gh-clone-all
|
c1a170eb7fa6a2707eabf04a8b585e3140906278
|
[
"MIT"
] |
permissive
|
dcoles/tools
|
eafeafddeb5138b9f36a5a6698f9cbcc495d251f
|
12b4c0bd0666fb0d7c5c415e1ae266e04eaed873
|
refs/heads/master
| 2022-11-08T11:20:00.805262
| 2022-11-02T04:43:57
| 2022-11-02T04:43:57
| 156,034,730
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,398
|
#!/usr/bin/env python3
"""Clone all repositories of a GitHub user/organization"""
import argparse
import getpass
import itertools
import os
import subprocess
import sys
from urllib.parse import urlsplit, urlunsplit
import requests
GITHUB_API = os.getenv('GITHUB_API', 'https://api.github.com')
def url_add_auth(url, username, password):
"""Add user/password auth to URL"""
u = urlsplit(url)
return urlunsplit((u.scheme, f'{username}:{password}@{u.netloc}', u.path, u.query, u.fragment))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--user', default=getpass.getuser())
parser.add_argument('-F', '--skip-forks', action='store_true')
parser.add_argument('--bare', action='store_true')
parser.add_argument('--mirror', action='store_true')
parser.add_argument('owner')
args = parser.parse_args()
try:
username, password = args.user.split(':', 1)
except ValueError:
username, password = args.user, getpass.getpass()
with requests.Session() as s:
s.auth = (username, password)
s.headers = {'Accept': 'application/vnd.github.v3.object'}
next_page = f'{GITHUB_API}/users/{args.owner}/repos'
while next_page:
r = s.get(next_page)
r.raise_for_status()
repos = r.json()
rel_next = r.links.get('next')
next_page = rel_next['url'] if rel_next else None
for repo in repos:
name = repo['name']
clone_url = repo['clone_url']
if args.skip_forks and repo['fork']:
print(f'Skipping fork {name}', file=sys.stderr)
continue
clone_auth_url = url_add_auth(clone_url, username, password)
print(f'Cloning {name} from {clone_url}', file=sys.stderr)
try:
git_args = []
if args.bare:
git_args.append('--bare')
if args.mirror:
git_args.append('--mirror')
subprocess.run(['git', 'clone', *git_args, clone_auth_url], check=True)
except subprocess.CalledProcessError as e:
print(f'ERROR: Failed to clone {name} from {clone_url}')
sys.exit(e.returncode)
if __name__ == '__main__':
main()
|
[
"coles.david@gmail.com"
] |
coles.david@gmail.com
|
|
ee44db13b8317d57a62576d6699be42745fac18b
|
374d6e45a127184b6a30fe9987e782503f1cd061
|
/app/main/model/DataProcess.py
|
70ee26b72d0daebb08b311e9bbff271085e2d4a9
|
[] |
no_license
|
FR-Team/FinancialRegulation
|
5b2a4ce5cf2bc1faaf1cd7b2ec2017b2129c686a
|
e01720855a9a2320289769cae3bddc94a00761e3
|
refs/heads/master
| 2020-03-17T00:48:17.840210
| 2018-05-14T06:19:08
| 2018-05-14T06:19:08
| 133,130,227
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,648
|
py
|
import os
import pandas as pd
import numpy as np
from sklearn.cluster import KMeans
def read_data(filename):
# Data文件夹路径
data_path = os.path.abspath('..') + '\\' + 'data'
# 文件路径
current_path = data_path + '\\' + filename
print("CurrentDataPath: " + current_path)
# 读取csv文件为DataFrame
df = pd.DataFrame.from_csv(current_path)
return df
def get_property():
df = read_data("ClientCount.csv")
return df.columns.values.tolist()
def read_assign_data(filename, assign_list):
# 读取csv文件为DataFrame
df = read_data(filename)
# 返回numpy.ndarray格式数据
return df.as_matrix(assign_list)
def kmeans_process(property_list, filename):
# 数据获取和预处理
data_array = read_assign_data("ClientCount.csv", property_list)
init_data_len = len(data_array) # 原始数据数量,用于切片
append_array = read_assign_data(filename, property_list)
data_array = np.vstack((data_array, append_array)) # 放置在末尾
# 数据处理
random_state = 170
n_clusters = 20
pred = KMeans(n_clusters=n_clusters,
random_state=random_state).fit_predict(data_array).tolist()
map_list = pred[init_data_len:]
clusters_num = {}
for i in range(0, n_clusters):
clusters_num[i] = pred.count(i)
result = sorted(append_array,
key=lambda x: clusters_num[map_list[append_array.index(x)]],
reverse=True)
return result
def test_main():
read_assign_data("ClientCount.csv", ["account", "total", "average"])
get_property()
# test
# test_main()
|
[
"30664307+Cauchy-NY@users.noreply.github.com"
] |
30664307+Cauchy-NY@users.noreply.github.com
|
b8241c215f9bb176d55fd4cfd3af879696346392
|
de01cb554c2292b0fbb79b4d5413a2f6414ea472
|
/algorithms/Medium/486.predict-the-winner.py
|
08f9322db63221f8993b5218cb85de500829d02d
|
[] |
no_license
|
h4hany/yeet-the-leet
|
98292017eadd3dde98a079aafcd7648aa98701b4
|
563d779467ef5a7cc85cbe954eeaf3c1f5463313
|
refs/heads/master
| 2022-12-10T08:35:39.830260
| 2020-09-02T23:12:15
| 2020-09-02T23:12:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,818
|
py
|
#
# @lc app=leetcode id=486 lang=python3
#
# [486] Predict the Winner
#
# https://leetcode.com/problems/predict-the-winner/description/
#
# algorithms
# Medium (47.90%)
# Total Accepted: 72K
# Total Submissions: 150.3K
# Testcase Example: '[1,5,2]'
#
# Given an array of scores that are non-negative integers. Player 1 picks one
# of the numbers from either end of the array followed by the player 2 and then
# player 1 and so on. Each time a player picks a number, that number will not
# be available for the next player. This continues until all the scores have
# been chosen. The player with the maximum score wins.
#
# Given an array of scores, predict whether player 1 is the winner. You can
# assume each player plays to maximize his score.
#
# Example 1:
#
#
# Input: [1, 5, 2]
# Output: False
# Explanation: Initially, player 1 can choose between 1 and 2.
# If he chooses 2 (or 1), then player 2 can choose from 1 (or 2) and 5. If
# player 2 chooses 5, then player 1 will be left with 1 (or 2).
# So, final score of player 1 is 1 + 2 = 3, and player 2 is 5.
# Hence, player 1 will never be the winner and you need to return False.
#
#
#
#
# Example 2:
#
#
# Input: [1, 5, 233, 7]
# Output: True
# Explanation: Player 1 first chooses 1. Then player 2 have to choose between 5
# and 7. No matter which number player 2 choose, player 1 can choose 233.
# Finally, player 1 has more score (234) than player 2 (12), so you need to
# return True representing player1 can win.
#
#
#
# Constraints:
#
#
# 1 <= length of the array <= 20.
# Any scores in the given array are non-negative integers and will not exceed
# 10,000,000.
# If the scores of both players are equal, then player 1 is still the winner.
#
#
#
class Solution:
def PredictTheWinner(self, nums: List[int]) -> bool:
|
[
"kevin.wkmiao@gmail.com"
] |
kevin.wkmiao@gmail.com
|
0a47a441b06dec8e2b65dd145888e2b14f2dd6cf
|
18d22cc4e4eb734514fb502832274bd16f82883c
|
/balloon_scan/__init__.py
|
6342c81f1e04b0c0ae9023afd788a5a36ba9cab9
|
[] |
no_license
|
paganol/Balloon-Scan
|
0a6eacf3837449fced175568b1e49f8ceaae3dbb
|
844368126faa7a04d93b2c37976320dbe6b5d943
|
refs/heads/main
| 2023-08-14T14:18:37.658850
| 2021-10-01T09:10:07
| 2021-10-01T09:10:07
| 412,389,459
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 35
|
py
|
from .balloon_scan import scan_sky
|
[
"lu.pagano@gmail.com"
] |
lu.pagano@gmail.com
|
f5d6711d080adc6fb676ef082189f474314e59f3
|
abad82a1f487c5ff2fb6a84059a665aa178275cb
|
/Codefights/arcade/python-arcade/level-9/61.Math-Practice/Python/solution1.py
|
3d81845674377f4eff6a2cc884f732d5939eec47
|
[
"MIT"
] |
permissive
|
RevansChen/online-judge
|
8ae55f136739a54f9c9640a967ec931425379507
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
refs/heads/master
| 2021-01-19T23:02:58.273081
| 2019-07-05T09:42:40
| 2019-07-05T09:42:40
| 88,911,035
| 9
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 236
|
py
|
# Python3
import functools
# 有限制修改區域
def mathPractice(numbers):
return functools.reduce(lambda x, y: x * y[0] + (y[1] if len(y) == 2 else 0), [ numbers[i:i + 2] for i in range(2, len(numbers), 2)], sum(numbers[:2]))
|
[
"d79523@hotmail.com"
] |
d79523@hotmail.com
|
3a1ced5c24e818318c93075a58a918bdfd68a85e
|
c55c914325d047611a721d231a267bbbb6777fbc
|
/recursion_q/string_rev.py
|
0c357ec56eca2bf2f79e761279a42644d53a551d
|
[] |
no_license
|
vkd8756/Python
|
80389b21e056fe5884d765fa45e6bc5057209e45
|
80defd50c6dffec8b7d0530d8d8dbcb711aefc8b
|
refs/heads/main
| 2023-07-24T10:24:18.300489
| 2021-08-26T16:30:53
| 2021-08-26T16:30:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 302
|
py
|
a='racecar'
a=list(a)
b='racecaar'
def rever(s,start,stop):
if start>stop:
return
elif start==stop-1:
return s[start]
else:
s[start],s[stop-1]=s[stop-1],s[start]
rever(s,start+1,stop-1)
s="".join(s)
return s
print(rever(a,0,len(a))==b)
|
[
"noreply@github.com"
] |
vkd8756.noreply@github.com
|
d46ce21c82e8f703bf761564097c5ac8ced9260a
|
1e09bc56bf2904b349df1873e11da3d527437880
|
/lecture-12/subseq.py
|
013d014af9d3cf68f028dc875375a3f40b58000a
|
[] |
no_license
|
codeforcauseorg-archive/DSA-Live-Python-Jun-0621
|
f3444f5671cb4985644c7432517477c3585c70fb
|
e4fe544178d7851c24755242390f39675b99fabe
|
refs/heads/main
| 2023-08-09T08:31:41.449120
| 2021-09-14T16:44:39
| 2021-09-14T16:44:39
| 384,725,085
| 4
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 210
|
py
|
def subseq(orig, proc, index):
if index == len(orig):
print(proc)
return
ch = orig[index]
subseq(orig, proc + ch, index+1)
subseq(orig, proc, index+1)
subseq("abc", "", 0)
|
[
"anujgargcse@gmail.com"
] |
anujgargcse@gmail.com
|
77148390d2f8ae4e9f8d74b4bc1900e45ba4f721
|
9e5353ba6e50f77a40a765bd494d8bfb990c8922
|
/stream_backend/api/migrations/0025_auto_20200801_1808.py
|
667c8efccd70ebe719d5f0c399e614972e6fceaf
|
[] |
no_license
|
admiralbolt/stream-stuff
|
d9e24f1d78ac142416525b9b42cc53ef0bc4712a
|
29cfa96f9e8d40c531362aced47ebacadccbe759
|
refs/heads/master
| 2023-08-05T00:02:17.812991
| 2021-09-23T05:47:16
| 2021-09-23T05:47:16
| 261,022,447
| 7
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 404
|
py
|
# Generated by Django 3.0.6 on 2020-08-01 23:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0024_twitchchatter'),
]
operations = [
migrations.AlterField(
model_name='twitchchatter',
name='latest_join',
field=models.DateField(blank=True),
),
]
|
[
"aviknecht@gmail.com"
] |
aviknecht@gmail.com
|
3d4908c197509ea8d5f9c284778592b418171b1b
|
3e51bdce6730240a243f95e1dfabe507b437af8e
|
/torch/testing/_internal/jit_metaprogramming_utils.py
|
e2485087122146d72d4918616028adf452205050
|
[
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
wang-xiaoyu23/pytorch
|
aa859f10ab36a6ea618cf0ac54211ce6e35dc83b
|
b0833533a779d656cd6e9f6d103956ff105e7ef5
|
refs/heads/master
| 2023-04-25T03:53:51.886859
| 2021-05-13T23:55:56
| 2021-05-13T23:57:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 25,553
|
py
|
# Torch
from torch.jit.annotations import BroadcastingList2, BroadcastingList3 # noqa: F401
from torch.testing._internal.common_methods_invocations import non_differentiable, create_input, \
unpack_variables
import torch.nn.functional as F
import torch
import torch.cuda
import torch.jit
import torch.jit._logging
import torch.jit.frontend
from torch.testing._internal.common_nn import module_tests, new_module_tests
from torch.testing._internal.common_utils import is_iterable_of_tensors
from copy import deepcopy
from typing import List, Union
import math # noqa: F401
# Testing utils
from torch._six import inf
# TODO: include files like this should not set the default dtype
torch.set_default_dtype(torch.double)
L = 20
M = 10
S = 5
# NB: JIT script tests for all nn functional interfaces, script mode does
# not support in_place operations yet, so no inplace operation tests added.
# removed all the deprecated functions
#
# (
# method name,
# input size/constructing fn,
# args (tuple represents shape of a tensor arg),
# test variant name(will be used at test name suffix,
# 'inplace' skips grad tests), // optional
# (True, nonfusible_nodes, fusible_nodes) for autodiff // optional
# fn to determine if test should be skipped, // optional
# fn mapping output to part that should be gradcheck'ed, // optional
# kwargs for function, // optional
# )
nn_functional_tests = [
('conv1d', (S, S, S), ((S, S, S),)),
('conv2d', (S, S, S, S), ((S, S, S, S),)),
('conv3d', (S, S, S, S, S), ((S, S, S, S, S),)),
('conv_transpose1d', (S, S, S), ((S, S, S),)),
('conv_transpose2d', (S, S, S, S), ((S, S, S, S),)),
('conv_transpose3d', (S, S, S, S, S), ((S, S, S, S, S),)),
('conv_tbc', (S, S, S), ((S, S, S), (S,), 2)),
('avg_pool1d', (S, S, S), (3,)),
('avg_pool2d', (S, S, S, S), (3,), '', (True,)),
('avg_pool3d', (S, S, S, S, S), (3,)),
('fractional_max_pool2d', (S, S, S, S), (3, [2, 3],)),
('max_pool1d', (S, S, S), (2, 1)),
('max_pool1d', (S, S, S), (2, 1, 1, 1, False, True), 'with_indices'),
('max_pool2d', (S, S, S, S), (2, 1), '', (True, 'aten::max_pool2d_with_indices')),
('max_pool2d', (S, S, S, S), (2, 1, 1, 1, False, True), 'with_indices', (True, 'aten::max_pool2d_with_indices')),
('max_pool3d', (S, S, S, S, S), (2, 1)),
('max_unpool1d', torch.tensor([[[2., 4]]]), (torch.tensor([[[1, 3]]]), 2, 2, 0)),
('max_unpool2d', torch.tensor([[[[2., 4]]]]), (torch.tensor([[[[1, 3]]]]), 2, 2, 0)),
('max_unpool3d', torch.tensor([[[[[2., 4]]]]]), (torch.tensor([[[[[1, 3]]]]]), 2, 2, 0)),
('lp_pool1d', (S, S, S), (2., 3, 2,)),
('lp_pool2d', (S, S, S, S), (2., 3, 2,)),
('adaptive_max_pool1d', (S, S, S), (5,)),
('adaptive_max_pool2d', (S, S, S, S), ([5, 7],)),
('adaptive_max_pool3d', (S, S, S, S, S), ([3, 2, 2],)),
('adaptive_avg_pool1d', (S, S, S), (5,), '', (True,)),
('adaptive_avg_pool2d', (S, S, S, S), ([5, 7],), '', (True,)),
('adaptive_avg_pool3d', (S, S, S, S, S), ([3, 2, 2],), '', (True,)),
('dropout', (S, S, S), (0.5,), '', (True,
['aten::bernoulli_',
'aten::empty_like', 'aten::mul', 'aten::div'])),
('alpha_dropout', (S, S, S), (0.5,)),
('dropout2d', (S, S, S), (0.5,)),
('dropout3d', (S, S, S), (0.5,)),
('feature_alpha_dropout', (S, S, S), (0.5,)),
('threshold', (S, S, S), (0.1, 2.), '', (True,)),
('threshold', (S, S, S), (0.1, 2., True), 'inplace'),
('relu', (S, S, S), (), '', (True,)),
('relu', (S, S, S), (), 'inplace'),
('glu', (S - 1, S - 1, S - 1), (),),
('hardtanh', (S, S, S), (-0.5, 0.5),),
('hardtanh', (S, S, S), (-0.5, 0.5, True), 'inplace'),
('relu6', (S, S, S), (),),
('relu6', (S, S, S), (True), 'inplace'),
('elu', (S, S, S), (0.9,),),
('elu', (S, S, S), (0.9, True), 'inplace'),
('selu', (S, S, S), (),),
('selu', (S, S, S), (True), 'inplace'),
('celu', (S, S, S), (0.9,),),
('celu', (S, S, S), (0.9, True), 'inplace'),
('leaky_relu', (S, S, S), (0.02,),),
('leaky_relu', (S, S, S), (0.02,), 'inplace'),
('rrelu', (S, S), (0.1, 0.3, False),),
('rrelu', (S, S), (0.1, 0.3, False, True), 'inplace'),
('hardshrink', (S, S, S), (0.4,),),
('tanhshrink', (S, S, S), (),),
('softsign', (S, S, S), (),),
('softplus', (S, S, S), (),),
('softmin', (S, S, S), (0,),),
('softmax', (S, S, S), (0,), '', (True,)),
('softmax', (S, S, S), (0, 3, torch.double), 'with_all_args', (True,)),
('tanh', (S, S, S), (), '', (True,)),
('sigmoid', (S, S, S), (), '', (True,)),
('log_softmax', (S, S, S), (0,), '', (True,)),
('linear', (S, S), ((M, S),), '', (True, ['aten::linear'])),
('linear', (S, S), ((M, S), (M,)), 'addmm', (True, ['aten::linear'])),
('bilinear', (S, S, S), ((S, S, M), torch.zeros(M, S, M),),),
('embedding', torch.tensor([[1, 2, 4, 5], [4, 3, 2, 5]]), (torch.rand(6, 3), ), '', (True,)),
('embedding_bag', torch.tensor([1, 2, 4, 2]), (torch.rand(5, 3), torch.tensor([0, 4]),),),
('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), ),
'', (False, 'aten::_batch_norm_impl_index')),
('instance_norm', (S, S, S), (non_differentiable(torch.zeros(S)), non_differentiable(torch.ones(S))),),
('layer_norm', (S, S, S, S), ([5],), '',
(False, ['aten::contiguous', 'aten::_batch_norm_impl_index'])),
('layer_norm', (S, S, S, S), ([5], non_differentiable(torch.rand(S)),), 'with_only_weight',
(False, ['aten::contiguous', 'aten::_batch_norm_impl_index'])),
('layer_norm', (S, S, S, S), ([5], None, non_differentiable(torch.rand(S)),), 'with_only_bias',
(False, ['aten::contiguous', 'aten::_batch_norm_impl_index'])),
('layer_norm', (S, S, S, S), ([5], non_differentiable(torch.rand(S)),
non_differentiable(torch.rand(S))), 'with_weight_and_bias',
(False, ['aten::contiguous', 'aten::_batch_norm_impl_index', 'aten::addcmul'])),
('group_norm', (S, S, S), (1, torch.rand(5),),),
('local_response_norm', (S, S, S), (2, ),),
('nll_loss', F.log_softmax(torch.randn(3, 5), dim=0), (torch.tensor([1, 0, 4]),), '',),
('poisson_nll_loss', torch.rand(S, 2), (torch.rand(S, 2),),),
('poisson_nll_loss', torch.rand(S, 2), (torch.rand(S, 2), True, True), 'full'),
('kl_div', F.log_softmax(torch.randn(S, 10), 1), (F.softmax(torch.randn(S, 10), 1),),),
('cross_entropy', (3, S), (torch.randint(S, (3,), dtype=torch.int64),),),
('binary_cross_entropy_with_logits', (3,), (torch.empty(3).random_(2), ),),
('smooth_l1_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),
('huber_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),
('l1_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),
('mse_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),
('smooth_l1_loss', (3, S), ((torch.rand(3, S)),), 'with_grad'),
('huber_loss', (3, S), ((torch.rand(3, S)),), 'with_grad'),
('l1_loss', (3, S), ((torch.rand(3, S)),), 'with_grad'),
('mse_loss', (3, S), ((torch.rand(3, S)),), 'with_grad'),
('margin_ranking_loss', (3, S), ((3, S), (S,)),),
('hinge_embedding_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),
('soft_margin_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),
('multilabel_soft_margin_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),
('cosine_embedding_loss', (S, S), ((S, S), non_differentiable(torch.rand(S,))),),
('pixel_shuffle', (1, 9, 4, 4), (3,),),
('pixel_unshuffle', (1, 1, 12, 12), (3,),),
('affine_grid', (S, 2, 3), (torch.Size([S, 1, 7, 7]),),),
('pad', (3, 3, 4, 2), ([1, 1],),),
('pairwise_distance', (S, S), ((S, S),),),
('pdist', (S, S), (),),
('cosine_similarity', (S, S), ((S, S),),),
('triplet_margin_loss', (S, S), ((S, S), (S, S)),),
('normalize', (S, S, S), (),),
('unfold', (S, S, S, S), ([2, 3]),),
('fold', (1, 3 * 2 * 2, 12), ([4, 5], [2, 2]),),
('grid_sample', (S, S, S, S), (non_differentiable(torch.rand(S, S, S, 2)),),),
('gumbel_softmax', (S, S), (2.,), '', (True, ['aten::softmax', 'aten::add', 'aten::div'], ['aten::neg'])),
('gumbel_softmax', (S, S), (2., True,), 'hard', (True, ['aten::softmax', 'aten::add', 'aten::div'], ['aten::neg'])),
('multilabel_margin_loss', torch.tensor([[0.2, -0.2, 0.07]]), (torch.tensor([[0, 0, 1]]),),),
('multi_margin_loss', (S, S), (non_differentiable(torch.randint(S, (S, ), dtype=torch.int64)),
1, 1., non_differentiable(torch.randn(S))),),
('binary_cross_entropy', torch.randn(3, 2).sigmoid(), (non_differentiable(torch.rand(3, 2)),
non_differentiable(torch.randn(3, 2))),),
('binary_cross_entropy', torch.randn(3, 2).sigmoid(),
(non_differentiable(torch.rand(3, 2)),
non_differentiable(torch.randn(3, 2)), None, None, 'mean'), 'size_average'),
('ctc_loss', torch.rand(S, S, S).log_softmax(2).detach().requires_grad_(),
(torch.randint(1, S, (S, S), dtype=torch.long), torch.full((S,), S, dtype=torch.long),
torch.randint(1, S, (S,), dtype=torch.long))),
('upsample', torch.randn(S, S, M, M), (None, 2.), 'with_scale'),
('upsample', torch.randn(S, S, M, M), (4,), 'with_size'),
('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2,), 'nearest_4d'),
('interpolate', torch.randn(S, S, M, M), (None, 2.), 'nearest_4d_with_scale'),
('interpolate', torch.randn(S, S, M, M), (4,), 'nearest_4d_with_size'),
('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2,), 'area_4d'),
('interpolate', torch.randn(S, S, M, M), (None, 2.), 'area_4d_with_scale'),
('interpolate', torch.randn(S, S, M, M), (4,), 'area_4d_with_size'),
('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2,), 'bilinear_4d'),
('interpolate', torch.randn(S, S, M, M), (None, 2.), 'bilinear_4d_with_scale'),
('interpolate', torch.randn(S, S, M, M), (4,), 'bilinear_4d_with_size'),
('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2,), 'bicubic_4d'),
('interpolate', torch.randn(S, S, M, M), (None, 2.), 'bicubic_4d_with_scale'),
('interpolate', torch.randn(S, S, M, M), (4,), 'bicubic_4d_with_size'),
('interpolate', torch.zeros(3, 3).view(1, 3, 3), (2,), 'nearest_3d'),
('interpolate', torch.randn(S, M, M), (None, 2.), 'nearest_3d_with_scale'),
('interpolate', torch.randn(S, M, M), (4,), 'nearest_3d_with_size'),
('interpolate', torch.zeros(3, 3).view(1, 3, 3), (2,), 'area_3d'),
('interpolate', torch.randn(S, M, M), (None, 2.), 'area_3d_with_scale'),
('interpolate', torch.randn(S, M, M), (4,), 'area_3d_with_size'),
('interpolate', torch.zeros(3, 3).view(1, 3, 3), (2,), 'linear_3d'),
('interpolate', torch.randn(S, M, M), (None, 2.), 'linear_3d_with_scale'),
('interpolate', torch.randn(S, M, M), (4,), 'linear_3d_with_size'),
('interpolate', torch.randn(S, M, M, M, M), (None, 2.), 'nearest_5d_with_scale'),
('interpolate', torch.randn(S, M, M, M, M), (4,), 'nearest_5d_with_size'),
('interpolate', torch.zeros(3, 3, 3).view(1, 1, 3, 3, 3), (2,), 'area_5d'),
('interpolate', torch.randn(S, M, M, M, M), (None, 2.), 'area_5d_with_scale'),
('interpolate', torch.randn(S, M, M, M, M), (4,), 'area_5d_with_size'),
('interpolate', torch.zeros(3, 3, 3).view(1, 1, 3, 3, 3), (2,), 'trilinear_5d'),
('interpolate', torch.randn(S, M, M, M, M), (None, 2.), 'trilinear_5d_with_scale'),
('interpolate', torch.randn(S, M, M, M, M), (4,), 'trilinear_5d_with_size'),
('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2, None, 'nearest', None, False),
'nearest_4d_not_recompute_scale_factor'),
('interpolate', torch.randn(S, S, M, M), (4, None, 'nearest', None, False),
'nearest_4d_with_size_not_recompute_scale_factor'),
('interpolate', torch.randn(S, S, M, M), (None, 2., 'bilinear', None, False),
'bilinear_4d_with_scale_not_recompute_scale_factor'),
('interpolate', torch.randn(S, S, M, M), (4, None, 'bilinear', None, False),
'bilinear_4d_with_size_not_recompute_scale_factor'),
('interpolate', torch.randn(S, S, M, M), (None, 2., 'bicubic', None, False),
'bicubic_4d_with_scale_not_recompute_scale_factor'),
('interpolate', torch.randn(S, S, M, M), (4, None, 'bicubic', None, False),
'bicubic_4d_with_size_not_recompute_scale_factor'),
('interpolate', torch.randn(S, M, M), (None, 2., 'nearest', None, False),
'nearest_3d_with_scale_not_recompute_scale_factor'),
('interpolate', torch.randn(S, M, M), (4, None, 'nearest', None, False),
'nearest_3d_with_size_not_recompute_scale_factor'),
('interpolate', torch.randn(S, M, M), (None, 2., 'linear', None, False),
'linear_3d_with_scale_not_recompute_scale_factor'),
('interpolate', torch.randn(S, M, M), (4, None, 'linear', None, False),
'linear_3d_with_size_not_recompute_scale_factor'),
('interpolate', torch.randn(S, M, M, M, M), (None, 2., 'nearest', None, False),
'nearest_5d_with_scale_not_recompute_scale_factor'),
('interpolate', torch.randn(S, M, M, M, M), (4, None, 'nearest', None, False),
'nearest_5d_with_size_not_recompute_scale_factor'),
('interpolate', torch.randn(S, M, M, M, M), (None, 2., 'trilinear', None, False),
'trilinear_5d_with_scale_not_recompute_scale_factor'),
('interpolate', torch.randn(S, M, M, M, M), (4, None, 'trilinear', None, False),
'trilinear_5d_with_size_not_recompute_scale_factor'),
]
script_template = '''
def the_method({}):
return {}
'''
def value_to_literal(value):
if isinstance(value, str):
# Quotes string and escapes special characters
return ascii(value)
else:
return str(value)
def get_call(method_name, func_type, args, kwargs):
kwargs_str = ', '.join([k + '=' + value_to_literal(v) for k, v in kwargs.items()])
self_arg = args[0]
if(func_type == 'method'):
args = args[1:]
argument_str = ', '.join(args)
argument_str += ', ' if len(args) and len(kwargs) else ''
argument_str += kwargs_str
if func_type == 'functional' or func_type == 'function':
call = 'torch.{}({})'.format(method_name, argument_str)
elif func_type == 'method':
call = '{}.{}({})'.format(self_arg, method_name, argument_str)
elif func_type == 'nn_functional':
call = 'torch.nn.functional.{}({})'.format(method_name, argument_str)
else:
raise TypeError('Unsupported function type')
return call
def get_constant(x):
if x == inf:
return 'math.inf'
if x == -inf:
return '-math.inf'
return x
def get_script_args(args):
formals: List[str] = []
tensors: List[Union[torch.Tensor, List[torch.Tensor]]] = []
actuals: List[str] = []
for arg in args:
if isinstance(arg, torch.Tensor):
name = 'i{}'.format(len(formals))
formals.append(name)
actuals.append(name)
tensors.append(arg)
elif is_iterable_of_tensors(arg):
name = 'i{}'.format(len(formals))
formals.append(name + ': List[torch.Tensor]')
actuals.append(name)
tensors.append(list(arg))
elif isinstance(arg, str):
actuals.append("'{}'".format(arg))
else:
actuals.append(str(get_constant(arg)))
return (formals, tensors, actuals)
# create a script function from (name, func_type, output_process_fn),
# and returns the compiled function and example inputs
def gen_script_fn_and_args(method_name, func_type, *args, **kwargs):
formals, tensors, actuals = get_script_args(args)
call = get_call(method_name, func_type, actuals, kwargs)
script = script_template.format(', '.join(formals), call)
CU = torch.jit.CompilationUnit(script)
return CU.the_method, tensors
# create a script function from (name, func_type),
# returns a function takes in (args, kwargs) and runs the compiled function
def create_script_fn(self, method_name, func_type):
# function returns tuple containing original output and
# filtered output to be used in checking gradients
def script_fn(*args, **kwargs):
fn, tensors = gen_script_fn_and_args(method_name, func_type, *args, **kwargs)
self.assertExportImport(fn.graph, tensors)
output = fn(*tensors)
# skip type annotate function attributes for now, see: https://github.com/python/mypy/issues/2087
script_fn.last_graph = fn.graph_for(*tensors) # type: ignore[attr-defined]
return output
return script_fn
# make a new function where all non-tensor arguments in 'args' have been partially
# applied, and all tensor arguments remain.
# used to trace functions when some arguments are not tensors
def partial_apply_nontensors(fn, args, **kwargs):
source = ['t' if (isinstance(arg, torch.Tensor) or is_iterable_of_tensors(arg)) else 's' for arg in args]
def new_fn(*tensors_):
tensors = iter(tensors_)
return fn(*(args[i] if s == 's' else next(tensors) for i, s in enumerate(source)), **kwargs)
return new_fn, [arg for arg in args if isinstance(arg, torch.Tensor) or is_iterable_of_tensors(arg)]
# create a trace function from input fn
def create_traced_fn(self, fn):
def traced_fn(*inputs, **kwargs):
fn_tensors, inputs_tensors = partial_apply_nontensors(fn, inputs, **kwargs)
# `check_trace` is set to False because check_trace is run with @no_grad
# Also, `check_against_reference` already does all the checks
# against python function
traced = torch.jit.trace(fn_tensors, inputs_tensors, check_trace=False)
self.assertExportImport(traced.graph, inputs_tensors)
output = traced(*inputs_tensors)
# skip type annotate function attributes for now, see: https://github.com/python/mypy/issues/2087
traced_fn.last_graph = traced.graph_for(*inputs_tensors) # type: ignore[attr-defined]
return output
return traced_fn
# known to be failing in script
EXCLUDE_SCRIPT = {
'test_norm_fro_default',
'test_norm_fro_cpu',
'test_norm_nuc',
'test_norm_fro',
'test_norm_nuc_batched',
# aten op has additional cudnn argument
'test_nn_unfold',
# flaky test - TODO fix
'test_nn_ctc_loss',
# unknown builtin op
'test_nn_fold',
# jit doesn't support sparse tensors.
'test_to_sparse'
}
# generates a script function and set of example inputs
# from a specified test in the format of nn_functional_tests
def get_nn_functional_compiled_fn_and_inputs(name, self_size, args, variant_name='', *extra_args):
test_name = 'test_nn_' + name
if variant_name != '':
test_name = test_name + '_' + variant_name
no_grad = variant_name == 'inplace'
self_variable = create_input((self_size,))[0][0]
kwargs = None
# need to record this because methods can change the size (e.g. unsqueeze)
args_variable, kwargs_variable = create_input(args)
self_tensor = deepcopy(self_variable.data)
args_tensor = deepcopy(unpack_variables(args_variable))
f_args_variable = (self_variable,) + args_variable
f_args_tensor = (self_tensor,) + args_tensor
with torch._jit_internal._disable_emit_hooks():
script_fn, inputs = gen_script_fn_and_args(name, "nn_functional", *f_args_variable)
return script_fn, inputs
# additional modules test
# TODO: delete this list once we make all nn_tests work
additional_module_tests = [
{
'module_name': 'Bilinear',
'constructor_args': (S, S, M),
'input_size': (S, S),
'extra_args': ((S, S),)
},
{
'module_name': 'RNNCell',
'constructor_args': (S, S),
'input_size': (S, S),
},
{
'module_name': 'LSTMCell',
'constructor_args': (S, S),
'input_size': (S, S),
},
{
'module_name': 'GRUCell',
'constructor_args': (S, S),
'input_size': (S, S),
},
{
'module_name': 'MultiheadAttention',
'constructor_args': (128, 8),
'input_size': (10, 8, 128),
'extra_args': (torch.randn(10, 8, 128), torch.randn(10, 8, 128)),
'slowTest': True
},
{
'module_name': 'Transformer',
'constructor_args': (1, 1, 1, 1, 2),
'input_size': (3, 1, 1),
'extra_args': (torch.randn(1, 1, 1),),
'slowTest': True
}
]
EXCLUDE_SCRIPT_MODULES = {
'test_nn_AdaptiveAvgPool2d_tuple_none',
'test_nn_AdaptiveAvgPool3d_tuple_none',
'test_nn_AdaptiveMaxPool2d_tuple_none',
'test_nn_AdaptiveMaxPool3d_tuple_none',
# Doesn't use future division, so this is not supported
'test_nn_CrossMapLRN2d',
}
script_method_template = '''
def forward({}):
return {}
'''
def create_script_module(self, nn_module, constructor_args, *args, **kwargs):
def script_module(*args, **kwargs):
formals, tensors, actuals = get_script_args(args)
method_args = ', '.join(['self'] + actuals)
call_args_str = ', '.join(actuals)
call = "self.submodule({})".format(call_args_str)
script = script_method_template.format(method_args, call)
submodule_constants = []
if kwargs.get('is_constant'):
submodule_constants = ['submodule']
# Create module to use the script method
class TheModule(torch.jit.ScriptModule):
__constants__ = submodule_constants
def __init__(self):
super(TheModule, self).__init__()
self.submodule = nn_module(*constructor_args)
def make_module(script):
module = TheModule()
# check __repr__
str(module)
module.define(script)
return module
module = make_module(script)
if self:
self.assertExportImportModule(module, tensors)
module(*args)
# skip type annotate function attributes for now, see: https://github.com/python/mypy/issues/2087
create_script_module.last_graph = module.graph # type: ignore[attr-defined]
return module
return script_module
def check_alias_annotation(method_name, args, kwargs, *, aten_name, func_type='method'):
formals, tensors, actuals = get_script_args(args)
call = get_call(method_name, func_type, actuals, kwargs)
script = script_template.format(', '.join(formals), call)
CU = torch.jit.CompilationUnit(script)
# to clean up IR
torch._C._jit_pass_inline(CU.the_method.graph)
torch._C._jit_pass_constant_propagation(CU.the_method.graph)
torch._C._jit_check_alias_annotation(CU.the_method.graph, tuple(tensors), aten_name)
def get_nn_module_name_from_kwargs(**kwargs):
if 'module_name' in kwargs:
return kwargs['module_name']
elif 'fullname' in kwargs:
return kwargs['fullname']
elif 'constructor' in kwargs:
return kwargs['constructor'].__name__
def get_nn_mod_test_name(**kwargs):
name = get_nn_module_name_from_kwargs(**kwargs)
test_name = name
if 'desc' in kwargs:
test_name = "{}_{}".format(test_name, kwargs['desc'])
return 'test_nn_{}'.format(test_name)
def get_nn_module_class_from_kwargs(**kwargs):
name = get_nn_module_name_from_kwargs(**kwargs)
index = name.find("_")
if index == -1:
return name
else:
return name[0:name.find("_")]
def try_get_nn_module_compiled_mod_and_inputs(*args, **kwargs):
name = get_nn_module_name_from_kwargs(**kwargs)
if 'desc' in kwargs and 'eval' in kwargs['desc']:
# eval() is not supported, so skip these tests
return
test_name = name
if 'desc' in kwargs:
test_name = "{}_{}".format(test_name, kwargs['desc'])
test_name = get_nn_mod_test_name(**kwargs)
if test_name in EXCLUDE_SCRIPT_MODULES:
return
if 'constructor' in kwargs:
nn_module = kwargs['constructor']
else:
nn_module = getattr(torch.nn, name)
if "FunctionalModule" in str(nn_module):
return
if 'constructor_args_fn' in kwargs:
constructor_args = kwargs['constructor_args_fn']()
else:
constructor_args = kwargs.get('constructor_args', ())
# Set up inputs from tuple of sizes or constructor fn
input_dtype = torch.double
if 'input_fn' in kwargs:
input = kwargs['input_fn']()
if isinstance(input, torch.Tensor):
input = (input,)
if all(tensor.is_complex() for tensor in input):
input_dtype = torch.cdouble
else:
input = (kwargs['input_size'],)
# Extra parameters to forward()
if 'extra_args' in kwargs:
input = input + kwargs['extra_args']
if 'target_size' in kwargs:
input = input + (kwargs['target_size'],)
elif 'target_fn' in kwargs:
if torch.is_tensor(input):
input = (input,)
input = input + (kwargs['target_fn'](),)
args_variable, kwargs_variable = create_input(input, dtype=input_dtype)
f_args_variable = deepcopy(unpack_variables(args_variable))
out_var = deepcopy(f_args_variable)
args, mod = f_args_variable, create_script_module(None, nn_module, constructor_args, *f_args_variable)(*f_args_variable)
return mod, out_var
def get_all_nn_module_tests():
return module_tests + new_module_tests + additional_module_tests
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
d4080cb4d6ab23fa0cfc8c988bcfb0c94de674e3
|
552c39141dab7cbc0c34245000291a46cdb41495
|
/lte_enb/src/acceptance/egtpu/cm_xta_checkfile.py
|
17866cd0b750df8241104cd9dc8348e032c20aec
|
[] |
no_license
|
cmjeong/rashmi_oai_epc
|
a0d6c19d12b292e4da5d34b409c63e3dec28bd20
|
6ec1784eb786ab6faa4f7c4f1c76cc23438c5b90
|
refs/heads/master
| 2021-04-06T01:48:10.060300
| 2017-08-10T02:04:34
| 2017-08-10T02:04:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,171
|
py
|
#
#/********************************************************************20**
#
# Name: SIP
#
# Type: Python File
#
# Desc:
#
# File: cm_xta_checkfile.py
#
# Sid: cm_xta_checkfile.py@@/main/tenb_main_ccb/1 - Wed Jul 22 18:59:08 2015
#
# Prg:
#
#*********************************************************************21*/
#!/usr/bin/env python
import sys
import glob
import getopt
import cm_xta_py
from cm_xta_py import *
def listSort (a, b) :
# Split on dot
aList = a.split('.')
bList = b.split('.')
val1 = int (aList[1])
val2 = int (bList[1])
if val1 < val2 :
return -1
elif val1 > val2 :
return 1
else :
return 0
def showUsage () :
print 'Usage: xx_checkfile.py [-ihv] [file1] [fileN]'
print '\t[-i]: continue processing on error'
print '\t[-v]: verbose mode'
print '\t[-h]: show help'
def printDbgErr (*txt) :
if verboseMode == False :
txt = txt[2:]
for elm in txt :
if verboseMode == False :
print elm.lstrip(),
else :
print elm,
print
def printDbgInfo (*txt) :
if verboseMode == True :
for elm in txt :
print elm,
print
dbgMask = True
ignoreErr = False
verboseMode = False
if __name__ == '__main__' :
optLst, argLst = getopt.getopt(sys.argv[1:], 'ihv')
if len(optLst) != 0 :
for opt in optLst :
if opt[0] == '-i' :
ignoreErr = True
elif opt[0] == '-h' :
showUsage()
sys.exit()
elif opt[0] == '-v' :
verboseMode = True
cm_xta_py.dbgMask = True
argc = len(argLst)
if argc == 0 :
fileLst = glob.glob('*.xml')
else :
fileLst = argLst
cnt = 1
failTG = []
failTC = []
failCmd = []
for elm in fileLst :
# Two step process, load the file, parse the TC
step = str(cnt)
if (cnt < 10) :
printDbgInfo('Step', step, "\t\tInfo: Processing all the test cases in the file " + elm)
else :
printDbgInfo('Step', step, "\tInfo: Processing all the test cases in the file " + elm)
printDbgInfo()
try :
dictList = loadTestCases(elm, False)
except :
printDbgErr('Step', step, "\tError: loading file " + elm)
if ignoreErr == False :
raise
else :
printDbgInfo('Step', step, "\tInfo: skipping error, continue with the next file")
continue
#Sort the test cases list
tcList = dictList.keys()
tcList.sort(listSort)
subCnt = 0
# Parse the testcases
for tst in tcList :
subCnt = subCnt + 1
step = str(cnt) + '.' + str(subCnt)
try :
printDbgInfo('Step', step, "\tInfo: Parsing test case " + tst + " in the file " + elm)
parseTestCase(dictList[tst][3])
printDbgInfo()
except :
printDbgErr('Step', step, "\tError: parsing test case " + tst + " in the file " + elm)
open (tst + ".log", 'w').write(dictList[tst][3])
printDbgInfo('Step', step, "\tInfo: Test case written in log file " + tst + ".log")
if ignoreErr == False :
raise
else :
printDbgInfo('Step', step, "\tInfo: skipping testcase, continue with the next testcase")
printDbgInfo()
failTC.append(tst)
continue
cnt = cnt + 1
for elm in fileLst :
# Two step process, load the file, parse the TC/TG/Command
step = str(cnt)
printDbgInfo('Step', step, "\tInfo: Processing all the test groups in the file " + elm)
printDbgInfo()
try :
dictList = loadTestGroups(elm, False)
except :
printDbgErr('Step', step, "\tError: loading file " + elm)
if ignoreErr == False :
raise
else :
printDbgInfo('Step', step, "\tInfo: skipping error, continue with the next file")
printDbgInfo()
continue
subCnt = 0
# Parse the testgroups
for tst in dictList.keys() :
subCnt = subCnt + 1
step = str(cnt) + '.' + str(subCnt)
try :
printDbgInfo('Step', step, "\tInfo: Parsing test group " + tst + " in the file " + elm)
parseTestGroup(dictList[tst][2])
printDbgInfo()
except :
printDbgErr('Step', step, "\tError: parsing test group " + tst + " in the file " + elm)
open (tst + ".log", 'w').write(dictList[tst][2])
printDbgInfo('Step', step, "\tInfo: Test group written in log file " + tst + ".log")
if ignoreErr == False :
raise
else :
printDbgInfo('Step', step, "\tInfo: skipping test group, continue with the next test group")
printDbgInfo()
failTG.append(tst)
continue
cnt = cnt + 1
for elm in fileLst :
# Two step process, load the file, parse the Command
step = str(cnt)
printDbgInfo('Step', step, "\tInfo: Processing all the commands in the file " + elm)
printDbgInfo()
try :
dictList = loadCommands(elm, False)
except :
printDbgErr('Step', step, "\tError: loading file " + elm)
if ignoreErr == False :
raise
else :
printDbgInfo('Step', step, "\tInfo: skipping error, continue with the next file")
printDbgInfo()
continue
subCnt = 0
# Parse the commands
for tst in dictList.keys() :
subCnt = subCnt + 1
step = str(cnt) + '.' + str(subCnt)
try :
printDbgInfo('Step', step, "\tInfo: Parsing command " + tst + " in the file " + elm)
parseCommand(dictList[tst])
printDbgInfo()
except :
printDbgErr('Step', step, "\tError: parsing command " + tst + " in the file " + elm)
open (tst + ".log", 'w').write(dictList[tst])
printDbgInfo('Step', step, "\tInfo: Command written in log file " + tst + ".log")
if ignoreErr == False :
raise
else :
printDbgInfo('Step', step, "\tInfo: skipping command, continue with the next command")
printDbgInfo()
failCmd.append(tst)
continue
cnt = cnt + 1
if ignoreErr == True :
print
if len(failCmd) != 0 :
print 'Failed Commands :'
print '-----------------'
failCmd.sort()
for tst in failCmd :
print '\t', tst
if len(failTG) != 0 :
print 'Failed Test Groups :'
print '--------------------'
failTG.sort()
for tst in failTG :
print '\t', tst
if len(failTC) != 0 :
print 'Failed Test Cases :'
print '-------------------'
failTC.sort()
for tst in failTC :
print '\t', tst
#/********************************************************************30**
#
# End of file: cm_xta_checkfile.py@@/main/tenb_main_ccb/1 - Wed Jul 22 18:59:08 2015
#
#*********************************************************************31*/
#
#
#/********************************************************************40**
#
# Notes:
#
#*********************************************************************41*/
#
#/********************************************************************50**
#
#*********************************************************************51*/
#
#
#/********************************************************************60**
#
# Revision history:
#
#*********************************************************************61*/
#
#/********************************************************************90**
#
# ver pat init description
#------------ -------- ---- ----------------------------------------------
#/main/4 --- sy 1. Update of sid field
#/main/5 --- sk 1. Updated for PSF-SIP 1.1 Release
#*********************************************************************91*/
|
[
"sriprasads@gmail.com"
] |
sriprasads@gmail.com
|
b718ec692745d12a301e107267139b2f015d7c11
|
0f0c895825bacd8534d92e2443fc367d22e21740
|
/datasets/common_vision_dataset.py
|
4575209d9199b0ff164fe735f7e64f19649e35dd
|
[
"Apache-2.0"
] |
permissive
|
zilongzheng/PaddleEBM
|
158594a9356f8e7a3e2b5f9417768cb641bd8bfc
|
44356e281a21093b8a4607543a67f7c0601772c1
|
refs/heads/main
| 2023-08-21T19:09:11.022995
| 2021-10-19T07:56:15
| 2021-10-19T07:56:15
| 336,972,437
| 7
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,841
|
py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle
from .builder import DATASETS
from .base_dataset import BaseDataset
from .transforms.builder import build_transforms
@DATASETS.register()
class CommonVisionDataset(paddle.io.Dataset):
"""
Dataset for using paddle vision default datasets, such as mnist, flowers.
"""
def __init__(self,
dataset_name,
dataroot=None,
transforms=None,
return_label=True,
mode=None,
params=None):
"""Initialize this dataset class.
Args:
dataset_name (str): return a dataset from paddle.vision.datasets by this option.
transforms (list[dict]): A sequence of data transforms config.
return_label (bool): whether to retuan a label of a sample.
params (dict): paramters of paddle.vision.datasets.
"""
super(CommonVisionDataset, self).__init__()
dataset_cls = getattr(paddle.vision.datasets, dataset_name)
transform = build_transforms(transforms)
self.return_label = return_label
param_dict = {}
param_names = list(dataset_cls.__init__.__code__.co_varnames)
if 'transform' in param_names:
param_dict['transform'] = transform
if 'mode' in param_names:
param_dict['mode'] = mode
if 'image_path' in param_names:
param_dict['image_path'] = dataroot
if params is not None:
for name in param_names:
if name in params:
param_dict[name] = params[name]
self.dataset = dataset_cls(**param_dict)
def __getitem__(self, index):
return_dict = {}
return_list = self.dataset[index]
if isinstance(return_list, (tuple, list)):
if len(return_list) == 2:
return_dict['img'] = return_list[0]
if self.return_label:
return_dict['class_id'] = np.asarray(return_list[1])
else:
return_dict['img'] = return_list[0]
else:
return_dict['img'] = return_list
return return_dict
def __len__(self):
return len(self.dataset)
|
[
"zilongzheng0318@gmail.com"
] |
zilongzheng0318@gmail.com
|
70beb40305519e58e9e5d0ad258edf348724804d
|
4df39234ad4ac37314513747d4b8977465650a20
|
/Binary_Search_Tree.py
|
f6fa1af6db63e6518f853626e1104e7ea12541dc
|
[] |
no_license
|
kd1726/Data-Structures
|
6d6fc5f0d0968d59f9ee09037db78ab94816e73a
|
edf814d6e720a2a102f17a2dcdb119066acecbe5
|
refs/heads/main
| 2023-04-23T04:38:41.187669
| 2021-04-28T18:30:00
| 2021-04-28T18:30:00
| 355,355,336
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,863
|
py
|
class Node:
def __init__(self,data=None):
self.data=data
self.right_child = None
self.left_child = None
self.parent = None
class BinarySearchTree:
def __init__(self):
self.root = None
def append(self,data):
if self.root==None:
self.root = Node(data)
else:
return self._append(self.root,data)
def _append(self,cur_node,data):
if data>cur_node.data:
if cur_node.right_child==None:
cur_node.right_child = Node(data)
cur_node.right_child.parent = cur_node
else:
return self._append(cur_node.right_child,data)
if data<cur_node.data:
if cur_node.left_child==None:
cur_node.left_child=Node(data)
cur_node.left_child.parent = cur_node
else:
return self._append(cur_node.left_child,data)
else:
return "No duplicates in this tree!"
def height(self):
height=0
cur_node = self.root
if cur_node==None:
return height
else:
return self._height(self.root,height)
def _height(self,cur_node,height):
if cur_node!=None:
left_height = self._height(cur_node.left_child,height+1)
right_height = self._height(cur_node.right_child,height+1)
return max(left_height,right_height)
return height
def print_tree(self):
if self.root==None:
return "Cannot print a tree that doesn't exist"
else:
ask = input("In order (ino), Pre order (pro) or post order (pto)").lower()
if ask=="ino":
return self._print_in_order_tree(self.root)
elif ask=="pro":
return self._print_pre_order_tree(self.root)
elif ask=="pto":
return self._print_post_order_tree(self.root)
else:
return self._print_in_order_tree(self.root)
def _print_in_order_tree(self,cur_node):
if cur_node!=None:
self._print_in_order_tree(cur_node.left_child)
print(cur_node.data)
self._print_in_order_tree(cur_node.right_child)
return
def _print_pre_order_tree(self,cur_node):
if cur_node!=None:
print(cur_node.data)
self._print_pre_order_tree(cur_node.left_child)
self._print_pre_order_tree(cur_node.right_child)
return
def _print_post_order_tree(self,cur_node):
if cur_node!=None:
self._print_post_order_tree(cur_node.left_child)
self._print_post_order_tree(cur_node.right_child)
print(cur_node.data)
return
def search(self,data):
cur_node = self.root
if cur_node==None:
return "No results if there is no tree!"
else:
return self._search(self.root,data)
def _search(self,cur_node,data):
if data>cur_node.data:
if cur_node.right_child!=None:
return self._search(cur_node.right_child,data)
else:
return "No result found in tree."
elif data<cur_node.data:
if cur_node.left_child!=None:
return self._search(cur_node.left_child,data)
else:
return "No result found in tree."
elif data==cur_node.data:
return f"{cur_node.data} has been found in the tree"
else:
return "No result found in tree."
def find(self,data):
cur_node = self.root
if cur_node==None:
return "No results if there is no tree!"
else:
return self._find(self.root,data)
def _find(self,cur_node,data):
if data>cur_node.data:
if cur_node.right_child!=None:
return self._find(cur_node.right_child,data)
else:
return None
elif data<cur_node.data:
if cur_node.left_child!=None:
return self._find(cur_node.left_child,data)
else:
return None
elif data==cur_node.data:
return cur_node
else:
return None
def delete_value(self,data):
if self.find(data)==None:
return "You cannot delete something that doesn't exist"
else:
ask = input(f"You are deleting {self.find(data).data} from your tree. Continue? (y/n)").lower()
if ask=="y":
return self._delete_node(self.find(data))
else:
return "Operation aborted"
def _delete_node(self,node):
def min_value_node(node):
cur = node
while cur.left_child!=None:
cur = cur.left_child
return cur
def getChildren(node):
c =0
if node.left_child!=None:
c+=1
if node.right_child!=None:
c+=1
return c
parent = node.parent
children = getChildren(node)
if children==0:
if parent.left_child==node:
parent.left_child=None
else:
parent.right_child=None
if children==1:
if node.left_child!=None:
child = node.left_child
else:
child = node.right_child
if parent.left_child==node:
parent.left_child = child
node.left_child = None
else:
parent.right_child=child
node.right_child=None
child.parent = parent
if children==2:
successor = min_value_node(node.right_child)
node = successor
self._delete_node(successor)
return
|
[
"kd1726@nyu.edu"
] |
kd1726@nyu.edu
|
095b41aca370bf070f5a65753828b12ab111d65d
|
09fd456a6552f42c124c148978289fae1af2d5c3
|
/Easy/605.py
|
43ebae0908d0af11eedb5c4ad94e9b308d1d7b9e
|
[] |
no_license
|
hoang-ng/LeetCode
|
60b4e68cbcf54cbe763d1f98a70f52e628ab32fb
|
5407c6d858bfa43325363503c31134e560522be3
|
refs/heads/master
| 2021-04-10T11:34:35.310374
| 2020-07-28T10:22:05
| 2020-07-28T10:22:05
| 248,932,393
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,079
|
py
|
# 605. Can Place Flowers
# Suppose you have a long flowerbed in which some of the plots are planted and some are not. However, flowers cannot be planted in adjacent plots - they would compete for water and both would die.
# Given a flowerbed (represented as an array containing 0 and 1, where 0 means empty and 1 means not empty), and a number n, return if n new flowers can be planted in it without violating the no-adjacent-flowers rule.
# Example 1:
# Input: flowerbed = [1,0,0,0,1], n = 1
# Output: True
# Example 2:
# Input: flowerbed = [1,0,0,0,1], n = 2
# Output: False
# Note:
# The input array won't violate no-adjacent-flowers rule.
# The input array size is in the range of [1, 20000].
# n is a non-negative integer which won't exceed the input array size.
class Solution(object):
def canPlaceFlowers(self, flowerbed, n):
for i, x in enumerate(flowerbed):
if x == 0 and (i == 0 or flowerbed[i - 1] == 0) and (i == len(flowerbed) - 1 or flowerbed[i + 1] == 0):
n -= 1
flowerbed[i] = 1
return n <= 0
|
[
"hoang2109@gmail.com"
] |
hoang2109@gmail.com
|
9e004cd23f5ecd0d7d0ffe63290fe3f044e0e930
|
a124b7ef21f3d7ee3e4f27992963cd7205cb3ca9
|
/src/parser.py
|
0c1083dabfac83938b423fbd2070aade22f6a91b
|
[] |
no_license
|
prabhuanish/cs170_project_gargsack
|
bf4c60fc9d24a3b41d0ee4a4c67dd530fb8b90e7
|
7631ceeca30f3b11a94e926bf52ba23dee7e5a49
|
refs/heads/master
| 2021-01-20T05:54:38.168652
| 2017-05-02T06:53:01
| 2017-05-02T06:53:01
| 89,823,035
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,967
|
py
|
import numpy as np
import pickle
import gzip
import os
path = "../inputs/project_instances/"
# Load in all the inputs (NOTE: Maybe turn into nparray?)
def load_inputs(start_num, end_num):
print("Loading the inputs...")
inputs = []
for i in range(start_num, end_num + 1):
print("Loading Input " + str(i) + ":")
data_path = path + "problem" + str(i) + ".in"
data = parse_input(data_path)
inputs += [data]
#print(inputs)
return inputs
# Parse each individual input file
def parse_input(path):
# Load the data
with open(path) as f:
#data = f.readlines()
P = float(f.readline())
M = float(f.readline())
N = int(f.readline())
C = int(f.readline())
items = []
constraints = []
for i in range(N):
name, cls, weight, cost, val = f.readline().split(";")
items.append([name, int(cls), float(weight), float(cost), float(val)])
for i in range(C):
constraint = set(eval(f.readline()))
constraints.append(constraint)
# # Clean white space
# data = [x.strip() for x in data]
# P = float(data[0])
# M = float(data[1])
# N = int(data[2])
# C = int(data[3])
# print("P: " + str(P) + " M: " + str(M) + " N: " + str(N) + " C: " + str(C) + "\n")
# items = data[4:4+N]
# constraints = data[4+N:5+N+C]
return [P, M, items, constraints]
def read_input(filename):
"""
P: float
M: float
N: integer
C: integer
items: list of tuples
constraints: list of sets
"""
with open(filename) as f:
P = float(f.readline())
M = float(f.readline())
N = int(f.readline())
C = int(f.readline())
items = []
constraints = []
for i in range(N):
name, cls, weight, cost, val = f.readline().split(";")
items.append((name, int(cls), float(weight), float(cost), float(val)))
for i in range(C):
constraint = set(eval(f.readline()))
constraints.append(constraint)
return P, M, N, C, items, constraints
def parse_item(item):
return item.split(";")
def parse_constraint(constraint):
assert(constraint != "")
constraints = constraint.split(",")
for i in range(len(constraints)):
constraints[i] = int(constraints[i])
return constraints
def write_output(filename, items_chosen, new_best, p_num):
best_path = "../best/output_" + p_num + "_best.out"
b = open(best_path, "r")
old_best = b.readline()
if (old_best == "" or (new_best > float(old_best))):
print(filename)
print("OLD BEST: " + old_best)
print("NEW BEST: " + str(new_best))
b.close()
os.remove(best_path)
b = open(best_path, "w+")
b.write(str(new_best))
if (os.path.isfile(filename)):
os.remove(filename)
f = open(filename, "w+")
for i in items_chosen:
f.write("{0}\n".format(i))
|
[
"prabhuanish@berkeley.edu"
] |
prabhuanish@berkeley.edu
|
c91c1c9eb60b042dbe652a3ec2782f8bdc897680
|
d51c587c0f9f91981adb68f59d65756a2828efec
|
/twitter_api_client/utils.py
|
dae8b01702aa1b3bcddcd64be204315536fb801e
|
[
"Apache-2.0"
] |
permissive
|
owensengoku/twitter-api
|
fefbd37c5096ea2aca3c0a06c6b87409e2660063
|
564fbcfb76232251d7f6b73e47a630b5270c4ef0
|
refs/heads/master
| 2022-12-13T18:24:29.248906
| 2018-11-01T10:17:15
| 2018-11-01T10:17:15
| 154,934,389
| 0
| 0
|
Apache-2.0
| 2022-12-08T01:22:35
| 2018-10-27T06:40:42
|
Python
|
UTF-8
|
Python
| false
| false
| 1,650
|
py
|
# -*- coding: utf-8 -*-
from .variables import *
from datetime import datetime
# URL foramt of the API
# {SCHEME}://{subdomain}.{ROOT_DOMAIN}{version_path}/{path}?{parameters}
def generate_url(endpoint):
v = endpoint.get('version')
# becasue some api url without version
version_path = '' if v == NOVERSION else '/%s' % v
return '%s://%s.%s%s/%s' % (SCHEME,
endpoint.get('subdomain',''),
ROOT_DOMAIN,
version_path,
endpoint.get('path',''))
# Ref: https://developer.twitter.com/en/docs/basics/rate-limiting.html
# x-rate-limit-limit: the rate limit ceiling for that given endpoint
# x-rate-limit-remaining: the number of requests left for the 15 minute window
# x-rate-limit-reset: the remaining window before the rate limit resets, in UTC epoch seconds
def get_rate_limit_info(headers):
""" Get Rate Limit Information from response headers (A Dictionary)
:returns: Dictionary of 'remaining' (int), 'limit' (int), 'reset' (time)
"""
ret = {}
ret['remaining'] = int(headers.get('x-rate-limit-remaining'))
ret['limit'] = int(headers.get('x-rate-limit-limit'))
ret['reset'] = datetime.fromtimestamp(int(headers.get('x-rate-limit-reset')))
return ret
def get_result(response, return_args):
data = response.json()
from_field = return_args.get('from')
if from_field == RETURN_ALL:
v = data
else:
v = data.get(from_field)
return {
'rate_limit_info': get_rate_limit_info(response.headers),
return_args.get('to'): v
}
|
[
"owen.wu@droi.com.tw"
] |
owen.wu@droi.com.tw
|
3167f39dbf71aeba02f18fe37f0ee877fd3e7361
|
c23954da29144a7d75dde0a704748d886e02220b
|
/salt_main/salt/auth/yubico.py
|
6c787cc785a721c0bad9e58a0444e9ad7a27bc33
|
[] |
no_license
|
pombredanne/usystem
|
5bac2db49057698d2cffb35e5977418bb85425a8
|
12527f14d61ca30e996368e65ba74931ed85e3c1
|
refs/heads/master
| 2021-09-24T10:44:22.261460
| 2018-10-08T14:19:35
| 2018-10-08T14:19:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,126
|
py
|
# -*- coding: utf-8 -*-
'''
Provide authentication using YubiKey.
.. versionadded:: 2015.5.0
:depends: yubico-client Python module
To get your YubiKey API key you will need to visit the website below.
https://upgrade.yubico.com/getapikey/
The resulting page will show the generated Client ID (aka AuthID or API ID)
and the generated API key (Secret Key). Make a note of both and use these
two values in your /etc/usystem/master configuration.
/etc/usystem/master
.. code-block:: yaml
yubico_users:
damian:
id: 12345
key: ABCDEFGHIJKLMNOPQRSTUVWXYZ
.. code-block:: yaml
external_auth:
yubico:
damian:
- test.*
Please wait five to ten minutes after generating the key before testing so that
the API key will be updated on all the YubiCloud servers.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
from __future__ import print_function
import logging
log = logging.getLogger(__name__)
try:
from yubico_client import Yubico, yubico_exceptions
HAS_YUBICO = True
except ImportError:
HAS_YUBICO = False
def __get_yubico_users(username):
'''
Grab the YubiKey Client ID & Secret Key
'''
user = {}
try:
if __opts__['yubico_users'].get(username, None):
(user['id'], user['key']) = list(__opts__['yubico_users'][username].values())
else:
return None
except KeyError:
return None
return user
def auth(username, password):
'''
Authenticate against yubico server
'''
_cred = __get_yubico_users(username)
client = Yubico(_cred['id'], _cred['key'])
try:
return client.verify(password)
except yubico_exceptions.StatusCodeError as e:
log.info('Unable to verify YubiKey `%s`', e)
return False
def groups(username, *args, **kwargs):
return False
if __name__ == '__main__':
__opts__ = {'yubico_users': {'damian': {'id': '12345', 'key': 'ABC123'}}}
if auth('damian', 'OPT'):
print("Authenticated")
else:
print("Failed to authenticate")
|
[
"igonchik@gmail.com"
] |
igonchik@gmail.com
|
598ddb8f57c971e4027d139711f9e47259582a8b
|
a1bffcd8854e1843e56bb812d4d83b3161a5211e
|
/plugins/module_utils/docker/common.py
|
1818dc1419f946d1d6bbe74b38e253816d6889a6
|
[] |
no_license
|
goneri/ansible.community
|
1a71f9d98c164b77f8ed2ed7f558b4963005ff8f
|
f26f612dd0a3154050d90b51a75502018c95f6e4
|
refs/heads/master
| 2020-12-29T07:47:35.353515
| 2020-01-22T17:43:18
| 2020-01-22T17:43:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 41,253
|
py
|
#
# Copyright 2016 Red Hat | Ansible
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import platform
import re
import sys
from datetime import timedelta
from distutils.version import LooseVersion
from ansible_collections.ansible.community.plugins.module_utils.basic import AnsibleModule, env_fallback, missing_required_lib
from ansible_collections.ansible.community.plugins.module_utils.common._collections_compat import Mapping, Sequence
from ansible.module_utils.six import string_types
from ansible.module_utils.six.moves.urllib.parse import urlparse
from ansible_collections.ansible.community.plugins.module_utils.parsing.convert_bool import BOOLEANS_TRUE, BOOLEANS_FALSE
HAS_DOCKER_PY = True
HAS_DOCKER_PY_2 = False
HAS_DOCKER_PY_3 = False
HAS_DOCKER_ERROR = None
try:
from requests.exceptions import SSLError
from docker import __version__ as docker_version
from docker.errors import APIError, NotFound, TLSParameterError
from docker.tls import TLSConfig
from docker import auth
if LooseVersion(docker_version) >= LooseVersion('3.0.0'):
HAS_DOCKER_PY_3 = True
from docker import APIClient as Client
elif LooseVersion(docker_version) >= LooseVersion('2.0.0'):
HAS_DOCKER_PY_2 = True
from docker import APIClient as Client
else:
from docker import Client
except ImportError as exc:
HAS_DOCKER_ERROR = str(exc)
HAS_DOCKER_PY = False
# The next 2 imports ``docker.models`` and ``docker.ssladapter`` are used
# to ensure the user does not have both ``docker`` and ``docker-py`` modules
# installed, as they utilize the same namespace are are incompatible
try:
# docker (Docker SDK for Python >= 2.0.0)
import docker.models # noqa: F401
HAS_DOCKER_MODELS = True
except ImportError:
HAS_DOCKER_MODELS = False
try:
# docker-py (Docker SDK for Python < 2.0.0)
import docker.ssladapter # noqa: F401
HAS_DOCKER_SSLADAPTER = True
except ImportError:
HAS_DOCKER_SSLADAPTER = False
try:
from requests.exceptions import RequestException
except ImportError:
# Either docker-py is no longer using requests, or docker-py isn't around either,
# or docker-py's dependency requests is missing. In any case, define an exception
# class RequestException so that our code doesn't break.
class RequestException(Exception):
pass
DEFAULT_DOCKER_HOST = 'unix://var/run/docker.sock'
DEFAULT_TLS = False
DEFAULT_TLS_VERIFY = False
DEFAULT_TLS_HOSTNAME = 'localhost'
MIN_DOCKER_VERSION = "1.8.0"
DEFAULT_TIMEOUT_SECONDS = 60
DOCKER_COMMON_ARGS = dict(
docker_host=dict(type='str', default=DEFAULT_DOCKER_HOST, fallback=(env_fallback, ['DOCKER_HOST']), aliases=['docker_url']),
tls_hostname=dict(type='str', default=DEFAULT_TLS_HOSTNAME, fallback=(env_fallback, ['DOCKER_TLS_HOSTNAME'])),
api_version=dict(type='str', default='auto', fallback=(env_fallback, ['DOCKER_API_VERSION']), aliases=['docker_api_version']),
timeout=dict(type='int', default=DEFAULT_TIMEOUT_SECONDS, fallback=(env_fallback, ['DOCKER_TIMEOUT'])),
ca_cert=dict(type='path', aliases=['tls_ca_cert', 'cacert_path']),
client_cert=dict(type='path', aliases=['tls_client_cert', 'cert_path']),
client_key=dict(type='path', aliases=['tls_client_key', 'key_path']),
ssl_version=dict(type='str', fallback=(env_fallback, ['DOCKER_SSL_VERSION'])),
tls=dict(type='bool', default=DEFAULT_TLS, fallback=(env_fallback, ['DOCKER_TLS'])),
validate_certs=dict(type='bool', default=DEFAULT_TLS_VERIFY, fallback=(env_fallback, ['DOCKER_TLS_VERIFY']), aliases=['tls_verify']),
debug=dict(type='bool', default=False)
)
DOCKER_MUTUALLY_EXCLUSIVE = []
DOCKER_REQUIRED_TOGETHER = [
['client_cert', 'client_key']
]
DEFAULT_DOCKER_REGISTRY = 'https://index.docker.io/v1/'
EMAIL_REGEX = r'[^@]+@[^@]+\.[^@]+'
BYTE_SUFFIXES = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
if not HAS_DOCKER_PY:
docker_version = None
# No Docker SDK for Python. Create a place holder client to allow
# instantiation of AnsibleModule and proper error handing
class Client(object): # noqa: F811
def __init__(self, **kwargs):
pass
class APIError(Exception): # noqa: F811
pass
class NotFound(Exception): # noqa: F811
pass
def is_image_name_id(name):
"""Check whether the given image name is in fact an image ID (hash)."""
if re.match('^sha256:[0-9a-fA-F]{64}$', name):
return True
return False
def is_valid_tag(tag, allow_empty=False):
"""Check whether the given string is a valid docker tag name."""
if not tag:
return allow_empty
# See here ("Extended description") for a definition what tags can be:
# https://docs.docker.com/engine/reference/commandline/tag/
return bool(re.match('^[a-zA-Z0-9_][a-zA-Z0-9_.-]{0,127}$', tag))
def sanitize_result(data):
"""Sanitize data object for return to Ansible.
When the data object contains types such as docker.types.containers.HostConfig,
Ansible will fail when these are returned via exit_json or fail_json.
HostConfig is derived from dict, but its constructor requires additional
arguments. This function sanitizes data structures by recursively converting
everything derived from dict to dict and everything derived from list (and tuple)
to a list.
"""
if isinstance(data, dict):
return dict((k, sanitize_result(v)) for k, v in data.items())
elif isinstance(data, (list, tuple)):
return [sanitize_result(v) for v in data]
else:
return data
class DockerBaseClass(object):
def __init__(self):
self.debug = False
def log(self, msg, pretty_print=False):
pass
# if self.debug:
# log_file = open('docker.log', 'a')
# if pretty_print:
# log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': ')))
# log_file.write(u'\n')
# else:
# log_file.write(msg + u'\n')
def update_tls_hostname(result):
if result['tls_hostname'] is None:
# get default machine name from the url
parsed_url = urlparse(result['docker_host'])
if ':' in parsed_url.netloc:
result['tls_hostname'] = parsed_url.netloc[:parsed_url.netloc.rindex(':')]
else:
result['tls_hostname'] = parsed_url
def _get_tls_config(fail_function, **kwargs):
try:
tls_config = TLSConfig(**kwargs)
return tls_config
except TLSParameterError as exc:
fail_function("TLS config error: %s" % exc)
def get_connect_params(auth, fail_function):
if auth['tls'] or auth['tls_verify']:
auth['docker_host'] = auth['docker_host'].replace('tcp://', 'https://')
if auth['tls_verify'] and auth['cert_path'] and auth['key_path']:
# TLS with certs and host verification
if auth['cacert_path']:
tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
ca_cert=auth['cacert_path'],
verify=True,
assert_hostname=auth['tls_hostname'],
ssl_version=auth['ssl_version'],
fail_function=fail_function)
else:
tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
verify=True,
assert_hostname=auth['tls_hostname'],
ssl_version=auth['ssl_version'],
fail_function=fail_function)
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
if auth['tls_verify'] and auth['cacert_path']:
# TLS with cacert only
tls_config = _get_tls_config(ca_cert=auth['cacert_path'],
assert_hostname=auth['tls_hostname'],
verify=True,
ssl_version=auth['ssl_version'],
fail_function=fail_function)
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
if auth['tls_verify']:
# TLS with verify and no certs
tls_config = _get_tls_config(verify=True,
assert_hostname=auth['tls_hostname'],
ssl_version=auth['ssl_version'],
fail_function=fail_function)
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
if auth['tls'] and auth['cert_path'] and auth['key_path']:
# TLS with certs and no host verification
tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
verify=False,
ssl_version=auth['ssl_version'],
fail_function=fail_function)
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
if auth['tls']:
# TLS with no certs and not host verification
tls_config = _get_tls_config(verify=False,
ssl_version=auth['ssl_version'],
fail_function=fail_function)
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
# No TLS
return dict(base_url=auth['docker_host'],
version=auth['api_version'],
timeout=auth['timeout'])
DOCKERPYUPGRADE_SWITCH_TO_DOCKER = "Try `pip uninstall docker-py` followed by `pip install docker`."
DOCKERPYUPGRADE_UPGRADE_DOCKER = "Use `pip install --upgrade docker` to upgrade."
DOCKERPYUPGRADE_RECOMMEND_DOCKER = ("Use `pip install --upgrade docker-py` to upgrade. "
"Hint: if you do not need Python 2.6 support, try "
"`pip uninstall docker-py` instead, followed by `pip install docker`.")
class AnsibleDockerClient(Client):
def __init__(self, argument_spec=None, supports_check_mode=False, mutually_exclusive=None,
required_together=None, required_if=None, min_docker_version=MIN_DOCKER_VERSION,
min_docker_api_version=None, option_minimal_versions=None,
option_minimal_versions_ignore_params=None, fail_results=None):
# Modules can put information in here which will always be returned
# in case client.fail() is called.
self.fail_results = fail_results or {}
merged_arg_spec = dict()
merged_arg_spec.update(DOCKER_COMMON_ARGS)
if argument_spec:
merged_arg_spec.update(argument_spec)
self.arg_spec = merged_arg_spec
mutually_exclusive_params = []
mutually_exclusive_params += DOCKER_MUTUALLY_EXCLUSIVE
if mutually_exclusive:
mutually_exclusive_params += mutually_exclusive
required_together_params = []
required_together_params += DOCKER_REQUIRED_TOGETHER
if required_together:
required_together_params += required_together
self.module = AnsibleModule(
argument_spec=merged_arg_spec,
supports_check_mode=supports_check_mode,
mutually_exclusive=mutually_exclusive_params,
required_together=required_together_params,
required_if=required_if)
NEEDS_DOCKER_PY2 = (LooseVersion(min_docker_version) >= LooseVersion('2.0.0'))
self.docker_py_version = LooseVersion(docker_version)
if HAS_DOCKER_MODELS and HAS_DOCKER_SSLADAPTER:
self.fail("Cannot have both the docker-py and docker python modules (old and new version of Docker "
"SDK for Python) installed together as they use the same namespace and cause a corrupt "
"installation. Please uninstall both packages, and re-install only the docker-py or docker "
"python module (for %s's Python %s). It is recommended to install the docker module if no "
"support for Python 2.6 is required. Please note that simply uninstalling one of the modules "
"can leave the other module in a broken state." % (platform.node(), sys.executable))
if not HAS_DOCKER_PY:
if NEEDS_DOCKER_PY2:
msg = missing_required_lib("Docker SDK for Python: docker")
msg = msg + ", for example via `pip install docker`. The error was: %s"
else:
msg = missing_required_lib("Docker SDK for Python: docker (Python >= 2.7) or docker-py (Python 2.6)")
msg = msg + ", for example via `pip install docker` or `pip install docker-py` (Python 2.6). The error was: %s"
self.fail(msg % HAS_DOCKER_ERROR)
if self.docker_py_version < LooseVersion(min_docker_version):
msg = "Error: Docker SDK for Python version is %s (%s's Python %s). Minimum version required is %s."
if not NEEDS_DOCKER_PY2:
# The minimal required version is < 2.0 (and the current version as well).
# Advertise docker (instead of docker-py) for non-Python-2.6 users.
msg += DOCKERPYUPGRADE_RECOMMEND_DOCKER
elif docker_version < LooseVersion('2.0'):
msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER
else:
msg += DOCKERPYUPGRADE_UPGRADE_DOCKER
self.fail(msg % (docker_version, platform.node(), sys.executable, min_docker_version))
self.debug = self.module.params.get('debug')
self.check_mode = self.module.check_mode
self._connect_params = get_connect_params(self.auth_params, fail_function=self.fail)
try:
super(AnsibleDockerClient, self).__init__(**self._connect_params)
self.docker_api_version_str = self.version()['ApiVersion']
except APIError as exc:
self.fail("Docker API error: %s" % exc)
except Exception as exc:
self.fail("Error connecting: %s" % exc)
self.docker_api_version = LooseVersion(self.docker_api_version_str)
if min_docker_api_version is not None:
if self.docker_api_version < LooseVersion(min_docker_api_version):
self.fail('Docker API version is %s. Minimum version required is %s.' % (self.docker_api_version_str, min_docker_api_version))
if option_minimal_versions is not None:
self._get_minimal_versions(option_minimal_versions, option_minimal_versions_ignore_params)
def log(self, msg, pretty_print=False):
pass
# if self.debug:
# log_file = open('docker.log', 'a')
# if pretty_print:
# log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': ')))
# log_file.write(u'\n')
# else:
# log_file.write(msg + u'\n')
def fail(self, msg, **kwargs):
self.fail_results.update(kwargs)
self.module.fail_json(msg=msg, **sanitize_result(self.fail_results))
@staticmethod
def _get_value(param_name, param_value, env_variable, default_value):
if param_value is not None:
# take module parameter value
if param_value in BOOLEANS_TRUE:
return True
if param_value in BOOLEANS_FALSE:
return False
return param_value
if env_variable is not None:
env_value = os.environ.get(env_variable)
if env_value is not None:
# take the env variable value
if param_name == 'cert_path':
return os.path.join(env_value, 'cert.pem')
if param_name == 'cacert_path':
return os.path.join(env_value, 'ca.pem')
if param_name == 'key_path':
return os.path.join(env_value, 'key.pem')
if env_value in BOOLEANS_TRUE:
return True
if env_value in BOOLEANS_FALSE:
return False
return env_value
# take the default
return default_value
@property
def auth_params(self):
# Get authentication credentials.
# Precedence: module parameters-> environment variables-> defaults.
self.log('Getting credentials')
params = dict()
for key in DOCKER_COMMON_ARGS:
params[key] = self.module.params.get(key)
if self.module.params.get('use_tls'):
# support use_tls option in docker_image.py. This will be deprecated.
use_tls = self.module.params.get('use_tls')
if use_tls == 'encrypt':
params['tls'] = True
if use_tls == 'verify':
params['validate_certs'] = True
result = dict(
docker_host=self._get_value('docker_host', params['docker_host'], 'DOCKER_HOST',
DEFAULT_DOCKER_HOST),
tls_hostname=self._get_value('tls_hostname', params['tls_hostname'],
'DOCKER_TLS_HOSTNAME', DEFAULT_TLS_HOSTNAME),
api_version=self._get_value('api_version', params['api_version'], 'DOCKER_API_VERSION',
'auto'),
cacert_path=self._get_value('cacert_path', params['ca_cert'], 'DOCKER_CERT_PATH', None),
cert_path=self._get_value('cert_path', params['client_cert'], 'DOCKER_CERT_PATH', None),
key_path=self._get_value('key_path', params['client_key'], 'DOCKER_CERT_PATH', None),
ssl_version=self._get_value('ssl_version', params['ssl_version'], 'DOCKER_SSL_VERSION', None),
tls=self._get_value('tls', params['tls'], 'DOCKER_TLS', DEFAULT_TLS),
tls_verify=self._get_value('tls_verfy', params['validate_certs'], 'DOCKER_TLS_VERIFY',
DEFAULT_TLS_VERIFY),
timeout=self._get_value('timeout', params['timeout'], 'DOCKER_TIMEOUT',
DEFAULT_TIMEOUT_SECONDS),
)
update_tls_hostname(result)
return result
def _handle_ssl_error(self, error):
match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error))
if match:
self.fail("You asked for verification that Docker daemons certificate's hostname matches %s. "
"The actual certificate's hostname is %s. Most likely you need to set DOCKER_TLS_HOSTNAME "
"or pass `tls_hostname` with a value of %s. You may also use TLS without verification by "
"setting the `tls` parameter to true."
% (self.auth_params['tls_hostname'], match.group(1), match.group(1)))
self.fail("SSL Exception: %s" % (error))
def _get_minimal_versions(self, option_minimal_versions, ignore_params=None):
self.option_minimal_versions = dict()
for option in self.module.argument_spec:
if ignore_params is not None:
if option in ignore_params:
continue
self.option_minimal_versions[option] = dict()
self.option_minimal_versions.update(option_minimal_versions)
for option, data in self.option_minimal_versions.items():
# Test whether option is supported, and store result
support_docker_py = True
support_docker_api = True
if 'docker_py_version' in data:
support_docker_py = self.docker_py_version >= LooseVersion(data['docker_py_version'])
if 'docker_api_version' in data:
support_docker_api = self.docker_api_version >= LooseVersion(data['docker_api_version'])
data['supported'] = support_docker_py and support_docker_api
# Fail if option is not supported but used
if not data['supported']:
# Test whether option is specified
if 'detect_usage' in data:
used = data['detect_usage'](self)
else:
used = self.module.params.get(option) is not None
if used and 'default' in self.module.argument_spec[option]:
used = self.module.params[option] != self.module.argument_spec[option]['default']
if used:
# If the option is used, compose error message.
if 'usage_msg' in data:
usg = data['usage_msg']
else:
usg = 'set %s option' % (option, )
if not support_docker_api:
msg = 'Docker API version is %s. Minimum version required is %s to %s.'
msg = msg % (self.docker_api_version_str, data['docker_api_version'], usg)
elif not support_docker_py:
msg = "Docker SDK for Python version is %s (%s's Python %s). Minimum version required is %s to %s. "
if LooseVersion(data['docker_py_version']) < LooseVersion('2.0.0'):
msg += DOCKERPYUPGRADE_RECOMMEND_DOCKER
elif self.docker_py_version < LooseVersion('2.0.0'):
msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER
else:
msg += DOCKERPYUPGRADE_UPGRADE_DOCKER
msg = msg % (docker_version, platform.node(), sys.executable, data['docker_py_version'], usg)
else:
# should not happen
msg = 'Cannot %s with your configuration.' % (usg, )
self.fail(msg)
def get_container_by_id(self, container_id):
try:
self.log("Inspecting container Id %s" % container_id)
result = self.inspect_container(container=container_id)
self.log("Completed container inspection")
return result
except NotFound as dummy:
return None
except Exception as exc:
self.fail("Error inspecting container: %s" % exc)
def get_container(self, name=None):
'''
Lookup a container and return the inspection results.
'''
if name is None:
return None
search_name = name
if not name.startswith('/'):
search_name = '/' + name
result = None
try:
for container in self.containers(all=True):
self.log("testing container: %s" % (container['Names']))
if isinstance(container['Names'], list) and search_name in container['Names']:
result = container
break
if container['Id'].startswith(name):
result = container
break
if container['Id'] == name:
result = container
break
except SSLError as exc:
self._handle_ssl_error(exc)
except Exception as exc:
self.fail("Error retrieving container list: %s" % exc)
if result is None:
return None
return self.get_container_by_id(result['Id'])
def get_network(self, name=None, network_id=None):
'''
Lookup a network and return the inspection results.
'''
if name is None and network_id is None:
return None
result = None
if network_id is None:
try:
for network in self.networks():
self.log("testing network: %s" % (network['Name']))
if name == network['Name']:
result = network
break
if network['Id'].startswith(name):
result = network
break
except SSLError as exc:
self._handle_ssl_error(exc)
except Exception as exc:
self.fail("Error retrieving network list: %s" % exc)
if result is not None:
network_id = result['Id']
if network_id is not None:
try:
self.log("Inspecting network Id %s" % network_id)
result = self.inspect_network(network_id)
self.log("Completed network inspection")
except NotFound as dummy:
return None
except Exception as exc:
self.fail("Error inspecting network: %s" % exc)
return result
def find_image(self, name, tag):
'''
Lookup an image (by name and tag) and return the inspection results.
'''
if not name:
return None
self.log("Find image %s:%s" % (name, tag))
images = self._image_lookup(name, tag)
if not images:
# In API <= 1.20 seeing 'docker.io/<name>' as the name of images pulled from docker hub
registry, repo_name = auth.resolve_repository_name(name)
if registry == 'docker.io':
# If docker.io is explicitly there in name, the image
# isn't found in some cases (#41509)
self.log("Check for docker.io image: %s" % repo_name)
images = self._image_lookup(repo_name, tag)
if not images and repo_name.startswith('library/'):
# Sometimes library/xxx images are not found
lookup = repo_name[len('library/'):]
self.log("Check for docker.io image: %s" % lookup)
images = self._image_lookup(lookup, tag)
if not images:
# Last case: if docker.io wasn't there, it can be that
# the image wasn't found either (#15586)
lookup = "%s/%s" % (registry, repo_name)
self.log("Check for docker.io image: %s" % lookup)
images = self._image_lookup(lookup, tag)
if len(images) > 1:
self.fail("Registry returned more than one result for %s:%s" % (name, tag))
if len(images) == 1:
try:
inspection = self.inspect_image(images[0]['Id'])
except Exception as exc:
self.fail("Error inspecting image %s:%s - %s" % (name, tag, str(exc)))
return inspection
self.log("Image %s:%s not found." % (name, tag))
return None
def find_image_by_id(self, image_id):
'''
Lookup an image (by ID) and return the inspection results.
'''
if not image_id:
return None
self.log("Find image %s (by ID)" % image_id)
try:
inspection = self.inspect_image(image_id)
except Exception as exc:
self.fail("Error inspecting image ID %s - %s" % (image_id, str(exc)))
return inspection
def _image_lookup(self, name, tag):
'''
Including a tag in the name parameter sent to the Docker SDK for Python images method
does not work consistently. Instead, get the result set for name and manually check
if the tag exists.
'''
try:
response = self.images(name=name)
except Exception as exc:
self.fail("Error searching for image %s - %s" % (name, str(exc)))
images = response
if tag:
lookup = "%s:%s" % (name, tag)
lookup_digest = "%s@%s" % (name, tag)
images = []
for image in response:
tags = image.get('RepoTags')
digests = image.get('RepoDigests')
if (tags and lookup in tags) or (digests and lookup_digest in digests):
images = [image]
break
return images
def pull_image(self, name, tag="latest"):
'''
Pull an image
'''
self.log("Pulling image %s:%s" % (name, tag))
old_tag = self.find_image(name, tag)
try:
for line in self.pull(name, tag=tag, stream=True, decode=True):
self.log(line, pretty_print=True)
if line.get('error'):
if line.get('errorDetail'):
error_detail = line.get('errorDetail')
self.fail("Error pulling %s - code: %s message: %s" % (name,
error_detail.get('code'),
error_detail.get('message')))
else:
self.fail("Error pulling %s - %s" % (name, line.get('error')))
except Exception as exc:
self.fail("Error pulling image %s:%s - %s" % (name, tag, str(exc)))
new_tag = self.find_image(name, tag)
return new_tag, old_tag == new_tag
def report_warnings(self, result, warnings_key=None):
'''
Checks result of client operation for warnings, and if present, outputs them.
warnings_key should be a list of keys used to crawl the result dictionary.
For example, if warnings_key == ['a', 'b'], the function will consider
result['a']['b'] if these keys exist. If the result is a non-empty string, it
will be reported as a warning. If the result is a list, every entry will be
reported as a warning.
In most cases (if warnings are returned at all), warnings_key should be
['Warnings'] or ['Warning']. The default value (if not specified) is ['Warnings'].
'''
if warnings_key is None:
warnings_key = ['Warnings']
for key in warnings_key:
if not isinstance(result, Mapping):
return
result = result.get(key)
if isinstance(result, Sequence):
for warning in result:
self.module.warn('Docker warning: {0}'.format(warning))
elif isinstance(result, string_types) and result:
self.module.warn('Docker warning: {0}'.format(result))
def inspect_distribution(self, image, **kwargs):
'''
Get image digest by directly calling the Docker API when running Docker SDK < 4.0.0
since prior versions did not support accessing private repositories.
'''
if self.docker_py_version < LooseVersion('4.0.0'):
registry = auth.resolve_repository_name(image)[0]
header = auth.get_config_header(self, registry)
if header:
return self._result(self._get(
self._url('/distribution/{0}/json', image),
headers={'X-Registry-Auth': header}
), json=True)
return super(AnsibleDockerClient, self).inspect_distribution(image, **kwargs)
def compare_dict_allow_more_present(av, bv):
'''
Compare two dictionaries for whether every entry of the first is in the second.
'''
for key, value in av.items():
if key not in bv:
return False
if bv[key] != value:
return False
return True
def compare_generic(a, b, method, datatype):
'''
Compare values a and b as described by method and datatype.
Returns ``True`` if the values compare equal, and ``False`` if not.
``a`` is usually the module's parameter, while ``b`` is a property
of the current object. ``a`` must not be ``None`` (except for
``datatype == 'value'``).
Valid values for ``method`` are:
- ``ignore`` (always compare as equal);
- ``strict`` (only compare if really equal)
- ``allow_more_present`` (allow b to have elements which a does not have).
Valid values for ``datatype`` are:
- ``value``: for simple values (strings, numbers, ...);
- ``list``: for ``list``s or ``tuple``s where order matters;
- ``set``: for ``list``s, ``tuple``s or ``set``s where order does not
matter;
- ``set(dict)``: for ``list``s, ``tuple``s or ``sets`` where order does
not matter and which contain ``dict``s; ``allow_more_present`` is used
for the ``dict``s, and these are assumed to be dictionaries of values;
- ``dict``: for dictionaries of values.
'''
if method == 'ignore':
return True
# If a or b is None:
if a is None or b is None:
# If both are None: equality
if a == b:
return True
# Otherwise, not equal for values, and equal
# if the other is empty for set/list/dict
if datatype == 'value':
return False
# For allow_more_present, allow a to be None
if method == 'allow_more_present' and a is None:
return True
# Otherwise, the iterable object which is not None must have length 0
return len(b if a is None else a) == 0
# Do proper comparison (both objects not None)
if datatype == 'value':
return a == b
elif datatype == 'list':
if method == 'strict':
return a == b
else:
i = 0
for v in a:
while i < len(b) and b[i] != v:
i += 1
if i == len(b):
return False
i += 1
return True
elif datatype == 'dict':
if method == 'strict':
return a == b
else:
return compare_dict_allow_more_present(a, b)
elif datatype == 'set':
set_a = set(a)
set_b = set(b)
if method == 'strict':
return set_a == set_b
else:
return set_b >= set_a
elif datatype == 'set(dict)':
for av in a:
found = False
for bv in b:
if compare_dict_allow_more_present(av, bv):
found = True
break
if not found:
return False
if method == 'strict':
# If we would know that both a and b do not contain duplicates,
# we could simply compare len(a) to len(b) to finish this test.
# We can assume that b has no duplicates (as it is returned by
# docker), but we don't know for a.
for bv in b:
found = False
for av in a:
if compare_dict_allow_more_present(av, bv):
found = True
break
if not found:
return False
return True
class DifferenceTracker(object):
def __init__(self):
self._diff = []
def add(self, name, parameter=None, active=None):
self._diff.append(dict(
name=name,
parameter=parameter,
active=active,
))
def merge(self, other_tracker):
self._diff.extend(other_tracker._diff)
@property
def empty(self):
return len(self._diff) == 0
def get_before_after(self):
'''
Return texts ``before`` and ``after``.
'''
before = dict()
after = dict()
for item in self._diff:
before[item['name']] = item['active']
after[item['name']] = item['parameter']
return before, after
def has_difference_for(self, name):
'''
Returns a boolean if a difference exists for name
'''
return any(diff for diff in self._diff if diff['name'] == name)
def get_legacy_docker_container_diffs(self):
'''
Return differences in the docker_container legacy format.
'''
result = []
for entry in self._diff:
item = dict()
item[entry['name']] = dict(
parameter=entry['parameter'],
container=entry['active'],
)
result.append(item)
return result
def get_legacy_docker_diffs(self):
'''
Return differences in the docker_container legacy format.
'''
result = [entry['name'] for entry in self._diff]
return result
def clean_dict_booleans_for_docker_api(data):
'''
Go doesn't like Python booleans 'True' or 'False', while Ansible is just
fine with them in YAML. As such, they need to be converted in cases where
we pass dictionaries to the Docker API (e.g. docker_network's
driver_options and docker_prune's filters).
'''
result = dict()
if data is not None:
for k, v in data.items():
if v is True:
v = 'true'
elif v is False:
v = 'false'
else:
v = str(v)
result[str(k)] = v
return result
def convert_duration_to_nanosecond(time_str):
"""
Return time duration in nanosecond.
"""
if not isinstance(time_str, str):
raise ValueError('Missing unit in duration - %s' % time_str)
regex = re.compile(
r'^(((?P<hours>\d+)h)?'
r'((?P<minutes>\d+)m(?!s))?'
r'((?P<seconds>\d+)s)?'
r'((?P<milliseconds>\d+)ms)?'
r'((?P<microseconds>\d+)us)?)$'
)
parts = regex.match(time_str)
if not parts:
raise ValueError('Invalid time duration - %s' % time_str)
parts = parts.groupdict()
time_params = {}
for (name, value) in parts.items():
if value:
time_params[name] = int(value)
delta = timedelta(**time_params)
time_in_nanoseconds = (
delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10 ** 6
) * 10 ** 3
return time_in_nanoseconds
def parse_healthcheck(healthcheck):
"""
Return dictionary of healthcheck parameters and boolean if
healthcheck defined in image was requested to be disabled.
"""
if (not healthcheck) or (not healthcheck.get('test')):
return None, None
result = dict()
# All supported healthcheck parameters
options = dict(
test='test',
interval='interval',
timeout='timeout',
start_period='start_period',
retries='retries'
)
duration_options = ['interval', 'timeout', 'start_period']
for (key, value) in options.items():
if value in healthcheck:
if healthcheck.get(value) is None:
# due to recursive argument_spec, all keys are always present
# (but have default value None if not specified)
continue
if value in duration_options:
time = convert_duration_to_nanosecond(healthcheck.get(value))
if time:
result[key] = time
elif healthcheck.get(value):
result[key] = healthcheck.get(value)
if key == 'test':
if isinstance(result[key], (tuple, list)):
result[key] = [str(e) for e in result[key]]
else:
result[key] = ['CMD-SHELL', str(result[key])]
elif key == 'retries':
try:
result[key] = int(result[key])
except ValueError:
raise ValueError(
'Cannot parse number of retries for healthcheck. '
'Expected an integer, got "{0}".'.format(result[key])
)
if result['test'] == ['NONE']:
# If the user explicitly disables the healthcheck, return None
# as the healthcheck object, and set disable_healthcheck to True
return None, True
return result, False
def omit_none_from_dict(d):
"""
Return a copy of the dictionary with all keys with value None omitted.
"""
return dict((k, v) for (k, v) in d.items() if v is not None)
|
[
"ansible_migration@example.com"
] |
ansible_migration@example.com
|
3432c669a205ebeb2183bb3ff7633ed17489ed0d
|
dd8879e3a9c076aa997bddbccf9ad05281d8842b
|
/data_processing.py
|
adea7f58d0af46472bc195907ae090cea3cf6b89
|
[] |
no_license
|
Celinarabe/sentiment-analysis
|
90b57bd58d8213d2bf2154c89e4eef2aacd99972
|
15eef2dfe51dd3e30e56d6da106311f3eb0940c6
|
refs/heads/master
| 2023-01-24T10:34:57.246085
| 2020-12-06T03:13:15
| 2020-12-06T03:13:15
| 296,750,237
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 832
|
py
|
# original data source: http://jmcauley.ucsd.edu/data/amazon/
import json
import random
import string
data = []
file_name = 'Beauty'
# select only reviews from 2012 or later
with open(f'reviews_{file_name}_5.json', 'r') as f:
for line in f:
review = json.loads(line)
year = int(review['reviewTime'].split(' ')[-1])
if year >= 2012:
data.append(review)
# select 2000 random reviews
data_2000 = random.sample(data, 2000)
print(len(data_2000))
print(data_2000[0:3])
# convert to all lowercase and strip punctuation
for x in data_2000:
no_punc = x['reviewText'].translate(str.maketrans('', '', string.punctuation))
lower_case = no_punc.lower()
x['reviewText'] = lower_case
# export cleaned to file
with open(f'{file_name}_review_data.json', 'w') as f:
for review in data_2000:
f.write(json.dumps(review)+'\n')
|
[
"noreply@github.com"
] |
Celinarabe.noreply@github.com
|
1e27088833333373837b9ad5b27ea6ecbb1502c8
|
303416ce779a19dd37228d843f66b8466bba06fb
|
/benchmarks/framework_overhead_benchmark/C2Module.py
|
acd2f2b13bf94a0ba7ccc8a26c0aa568d1fe7c77
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
linziyi96/pytorch
|
dc3f5f4c7539a81e3a368c799065a5557af6bbd2
|
c362138f4380c11ddeb07d7e7e34d75300091597
|
refs/heads/master
| 2021-02-10T09:51:31.802098
| 2020-06-25T15:49:04
| 2020-06-25T15:54:05
| 256,582,228
| 4
| 3
|
NOASSERTION
| 2020-04-17T18:38:38
| 2020-04-17T18:38:37
| null |
UTF-8
|
Python
| false
| false
| 1,655
|
py
|
from __future__ import absolute_import, division, print_function, unicode_literals
from caffe2.python import workspace, core
import numpy as np
from utils import NUM_LOOP_ITERS
workspace.GlobalInit(['caffe2'])
def add_blob(ws, blob_name, tensor_size):
blob_tensor = np.random.randn(*tensor_size).astype(np.float32)
ws.FeedBlob(blob_name, blob_tensor)
class C2SimpleNet(object):
"""
This module constructs a net with 'op_name' operator. The net consist
a series of such operator.
It initializes the workspace with input blob equal to the number of parameters
needed for the op.
Provides forward method to run the net niter times.
"""
def __init__(self, op_name, num_inputs=1, debug=False):
self.input_names = []
self.net = core.Net("framework_benchmark_net")
self.input_names = ["in_{}".format(i) for i in range(num_inputs)]
for i in range(num_inputs):
add_blob(workspace, self.input_names[i], [1])
self.net.AddExternalInputs(self.input_names)
op_constructor = getattr(self.net, op_name)
op_constructor(self.input_names)
self.output_name = self.net._net.op[-1].output
print("Benchmarking op {}:".format(op_name))
for _ in range(NUM_LOOP_ITERS):
output_name = self.net._net.op[-1].output
self.input_names[-1] = output_name[0]
assert len(self.input_names) == num_inputs
op_constructor(self.input_names)
workspace.CreateNet(self.net)
if debug:
print(self.net._net)
def forward(self, niters):
workspace.RunNet(self.net, niters, False)
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
1f7ea074d1dadb36eb277e74f417e24a9f0f7758
|
9143d8b843b387170f7146b3319bde6ad4183d7b
|
/nu_trans_detect_stable.py
|
10e0cff2c50b05c2c57096ec48301e2992f70014
|
[] |
no_license
|
Konstancja/cta-neutrino
|
1e32640b8806f4dd3dba622f0fb9b7bf451fb144
|
52d5a6bacd6c42a8cf17a60792cbf707e1e1897a
|
refs/heads/master
| 2020-05-06T13:35:30.761631
| 2020-04-14T15:29:53
| 2020-04-14T15:29:53
| 180,145,142
| 0
| 0
| null | 2019-04-08T12:33:26
| 2019-04-08T12:33:26
| null |
UTF-8
|
Python
| false
| false
| 5,478
|
py
|
import gammalib
import ctools
import cscripts
import numpy as np
from ebltable.tau_from_model import OptDepth
from random import randint, uniform
import xml_generator as xml
from astropy.io import fits
import argparse
tau = OptDepth.readmodel(model = 'dominguez')
parser = argparse.ArgumentParser()
parser.add_argument('-alert', action='store', dest='alertfile',
default='3e-9_all.out.alert', help='File with alerts')
parser.add_argument('--nu_min', action='store', dest='imin',
type=int, default=0,
help='First alert to process (min. index, default 0)')
parser.add_argument('--nu_max', action='store', dest='imax',
type=int, default=10,
help='Last alert to process (max. index, default 10)')
parser.add_argument('--irf', action='store', dest='irf',
default='North_z20_average_30m', help='IRF')
parser.add_argument('--obs', action='store', dest='tobs',
type=float, default=600.,
help='Observation duration time in [s]')
parser.add_argument('--inter', action='store', dest='interaction',
default='no',
help='Interaction type: pp (proton-proton), pph (proton-photon), txs (TXS-like sources), no (no scaling)')
parser.add_argument('--offdec', action='store', dest='offdec',
default='0',
help='DEC offset')
parser.add_argument('--offra', action='store', dest='offra',
default='0',
help='RA offset')
options = parser.parse_args()
input_model= options.alertfile
gam = 2.19
ep = 100.
tobscta = options.tobs
debug = True
edisp = True
caldb = 'prod3b-v1'
irf = options.irf
declination,redshift,A = np.loadtxt(input_model, unpack=True)
#print (declination,redshift,A)
offsetdec = float(options.offdec)
offsetra = float(options.offra)
# flux scaling according to intearction type pp, p-gamma or no scaling
if options.interaction == 'no':
A_prefix = 1.0
if options.interaction == 'pp':
A_prefix = np.pow(2.,-gam-1)
if options.interaction == 'pph':
A_prefix = np.pow(2.,-gam)
imin = options.imax-1 #options.imin
imax = options.imax #len(redshift)
nusrcts=open('nu_src_ts_'+irf+'_'+str(int(tobscta))+'s_'+str(imin+1)+'-'+str(imax)+'.dat', 'w')
for i in range(imin, imax):
z = redshift[i]
dec0 = declination[i]
if z < 4.:
lib,doc = xml.CreateLib()
ra0 = uniform(0.,360.)
if offsetra == 0:
dra = 0.
else:
dra = uniform(-1.*offsetra,offsetra)
ra = ra0 + dra
if offsetdec == 0:
ddec = 0.
else:
ddec = uniform(-1.*offsetdec,offsetdec)
dec = dec0 + ddec
ETeV = np.logspace(-2,2.5,45)
EMeV = ETeV * 1e6
if z < 0.01:
atten = 1.
else:
atten = np.exp(-1. * tau.opt_depth(z,ETeV))
if options.interaction == 'txs': # reference: https://arxiv.org/abs/1811.07439
prefac = A[i] * 1e-13
spec = prefac * (ETeV / ep) ** (-2) * exp(-0.1*(z+1)/ETeV - ETeV/(20.*(z+1)))
else:
prefac = A[i] * A_prefix * 1e-13
spec = prefac * (ETeV / ep) ** (-gam)
specebl = spec * atten
sourcename = 'nu_'+str(i+1)
Filefunction = 'spec_'+str(i+1)+'.dat'
np.savetxt(Filefunction, np.column_stack([EMeV,specebl + 1.e-300]))
speci = xml.addFileFunction(lib, sourcename, type = "PointSource", filefun=Filefunction, flux_free=1, flux_value=1., flux_scale=1., flux_max=100000000.0, flux_min=0.0)
spatial = xml.AddPointLike(doc,ra0,dec0)
speci.appendChild(spatial)
lib.appendChild(speci)
bkg = xml.addCTAIrfBackground(lib)
lib.appendChild(bkg)
open('nu_sources_'+str(i+1)+'.xml', 'w').write(doc.toprettyxml(' '))
nuseed = randint(1, 1000000000)
sim = ctools.ctobssim()
sim['inmodel'] = 'nu_sources_'+str(i+1)+'.xml'
sim['caldb'] = caldb
sim['irf'] = irf
sim['ra'] = ra
sim['dec'] = dec
sim['rad'] = 5.0
sim['tmin'] = '2020-05-31T12:00:00'
sim['tmax'] = '2020-05-31T12:10:00'
sim['emin'] = 0.02
sim['emax'] = 199.0
sim['maxrate'] = 1.0e9
sim['seed'] = nuseed
sim['debug'] = debug
sim['edisp'] = edisp
sim['logfile'] = 'nu_sources_'+str(i+1)+'.log'
sim.run()
like = ctools.ctlike(sim.obs())
like['debug'] = debug
like['edisp'] = edisp
like.run()
nuts = like.obs().models()[sourcename].ts()
nunormsp = like.obs().models()[sourcename].spectral()['Normalization'].value()
nunormsp_error = like.obs().models()[sourcename].spectral()['Normalization'].error()
real_nu = str(i+1)+' '+str(z)+' '+str(nuts)+' '+str(nunormsp)+' '+str(nunormsp_error)+' '+str(ra)+' '+str(dec)+' '+str(nuseed)+'\n'
else:
nuts = -1
nunormsp = -1
nunormsp_error = -1
ra = -1
nuseed = -1
real_nu = str(i+1)+' '+str(z)+' '+str(nuts)+' '+str(nunormsp)+' '+str(nunormsp_error)+' '+str(ra)+' '+str(dec0)+' '+str(nuseed)+'\n'
nusrcts.write(real_nu)
nusrcts.close()
|
[
"noreply@github.com"
] |
Konstancja.noreply@github.com
|
7c52b096813c3427fc0f8ab76227863f9b0ca327
|
eafc721b3fc8adcaea1c0256cabc917874f102ec
|
/Networking/tcp_client.py
|
ab62ba261bbc16cd8b73b581b5b584ae41011fa6
|
[] |
no_license
|
christophviehoff/LearnKivy
|
dd8f87374c87c1717503061c170ab80f5dbc191e
|
1eb90084f8bf755549246780c1f7c6e5a2066dd5
|
refs/heads/master
| 2021-01-19T02:55:54.074280
| 2016-07-01T20:04:24
| 2016-07-01T20:04:24
| 62,416,999
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 824
|
py
|
import socket
HOST = 'localhost'
PORT = 12345
BUFSIZ = 256
if __name__ == '__main__':
client_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = raw_input(
"Enter hostname [%s]: " %HOST) or HOST
port = raw_input("Enter port [%s]: " %PORT) or PORT
sock_addr = (host, int(port))
client_sock.connect(sock_addr)
payload = 'GET TIME'
try:
while True:
client_sock.send(payload.encode('utf-8'))
data = client_sock.recv(BUFSIZ)
print(repr(data))
more = raw_input("Want to send more data to server[y/n] :")
if more.lower() == 'y':
payload = raw_input("Enter payload: ")
else:
break
except KeyboardInterrupt:
print("Exited by user")
client_sock.close()
|
[
"christoph.viehoff@nike.com"
] |
christoph.viehoff@nike.com
|
ac9e075280df7b0306320fdff44cdaed3f4b6246
|
0620cfc78914dd477b62f803be7a3dda6e9f5bc6
|
/log_FB_seq.py
|
4905e5b6e41fcea91705303800290b462a2e8c9d
|
[] |
no_license
|
tylii/group_project_545
|
0f374c519a00279d100d1d3a45a8815afb753dc5
|
7e51d3a94fe7da00662b73edab805c184e36ccdd
|
refs/heads/master
| 2020-04-05T16:16:18.373600
| 2018-12-13T00:37:12
| 2018-12-13T00:37:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,729
|
py
|
### Log computation of the forward and backward probabilities
import numpy as np
from hmm import cal_b_matrix
#%% define the extended helper functions
def eexp(x):
""" Extended exponential"""
if np.isnan(x):
out = 0
else:
out = np.exp(x)
return out
def eln(x):
""" Extended natural log"""
if x == 0:
out = np.nan
elif x > 0:
out = np.log(x)
else:
print("negative input in eln")
return out
def elnsum(eln_x,eln_y):
"""Extended logarithm sum function"""
if np.isnan(eln_x) or np.isnan(eln_y):
if np.isnan(eln_x):
out = eln_y
else:
out = eln_x
else:
if eln_x > eln_y:
out = eln_x + eln(1+np.exp(eln_y-eln_x))
else:
out = eln_y + eln(1+np.exp(eln_x-eln_y))
return out
def elnproduct(eln_x,eln_y):
"""Extended logarithm product"""
if np.isnan(eln_x) or np.isnan(eln_y):
out = np.nan
else:
out = eln_x + eln_y
return out
#%% Computing the probability of observing a sequence
# forward algorithm in log space
def forward_step(A, B, pi, H, K):
""" Forward step in the log domain."""
# A is H x H transition matrix
# B should be K x N matrix of the log pdf's,
# pi is 1 x H vector of prior probabilities
alpha = np.zeros((H,K))
for i in range(0,H): # loop through all states at time t = 1
alpha[i,0] = elnproduct(eln(pi[i]), eln(B[0,i]))
for t in range(1,K):
for j in range(H):
logalpha = np.nan
for i in range(H):
tmp = elnproduct(alpha[i,t-1], eln(A[i,j]))
logalpha = elnsum(logalpha, tmp)
alpha[j,t] = elnproduct(logalpha, eln(B[t,j]))
return alpha
def backward_step(A, B, pi, H, K):
""" Backward step in the log domain"""
beta = np.zeros((H,K))
for i in range(H):
beta[i,K-1] = 0
for t in range(K-2,-1,-1):
for i in range(H):
logbeta = np.nan
for j in range(H):
tmp1 = elnproduct(B[t+1,j],beta[j,t+1]) # changed to eln(B)
tmp2 = elnproduct(eln(A[i,j]),tmp1)
logbeta = elnsum(logbeta, tmp2)
beta[i,t] = logbeta
return beta
def calc_gamma(alpha, beta, H, K):
""" Calculate the gamma probabilities"""
gamma = np.zeros((H,K))
for t in range(K):
normalizer = np.nan
for i in range(H):
gamma[i,t] = elnproduct(alpha[i,t],beta[i,t])
normalizer = elnsum(normalizer,gamma[i,t])
for i in range(H):
gamma[i,t] = elnproduct(gamma[i,t], -normalizer)
return gamma
def calc_xi(alpha, beta, A, B, H, K):
"""Compute probability of being in state i at time t, and state j at
time t+1 in log space"""
xi = np.zeros((K,H,H))
for t in range(K-1):
normalizer = np.nan
for i in range(H):
for j in range(H):
tmp1 = elnproduct(eln(B[t+1,j]),beta[j,t+1])
tmp2 = elnproduct(eln(A[i,j]),tmp1)
xi[t,i,j] = elnproduct(alpha[i,t],tmp2)
normalizer = elnsum(normalizer,xi[t,i,j])
for i in range(H):
for j in range(H):
xi[t,i,j] = elnproduct(xi[t,i,j],-normalizer)
return xi
def update_pi(gamma, H):
pi = []
for i in range(H):
pi.append(eexp(gamma[i,0]))
return pi
def update_A(gamma, xi, H, K):
A = np.zeros((H,H))
for i in range(H):
for j in range(H):
numerator = np.nan
denominator = np.nan
for t in range(K-1):
numerator = elnsum(numerator, xi[t,i,j])
denominator = elnsum(denominator, gamma[i,t])
A[i,j] = eexp(elnproduct(numerator,-denominator))
return A
def update_miu(gamma, x, H, K):
""" Update the means of the Gaussians using
one sequence of the training data.
Returns the elementwise-log of the mean"""
num = 0
den = 0
miu = np.zeros((H,x.shape[1]))
for i in range(H):
for t in range(0,K):
num += eexp(gamma[i,t])*x[t,:]
den += eexp(gamma[i,t])
miu[i,:] = np.divide(num,den)
# miu[i,:] = elnproduct(np.log(num),-den)
return miu
def update_var(gamma, x, H, K, miu):
""" Update the covariance matrix using
one sequence"""
num = 0
den = 0
var = np.zeros((H, x.shape[1]))
for i in range(H):
for t in range(0,K):
num += eexp(gamma[i,t])*np.outer(x[t,:]-miu[i,:], x[t,:]-miu[i,:])
den += eexp(gamma[i,t])
var[i,:] = np.diag(np.divide(num,den))
# var[i,:] = elnproduct(np.lognum,-den).diag()
# set lower bound on variances
for j in range(0,x.shape[1]):
if var[i,j] < 1e-3:
var[i,j] = 1e-3
return var
def forward_backward_algorithm(x, A, B_mean, B_var, pi, H, K, d):
""" Performs a full pass through the Baum-Welch algorithm
and updates A and pi, miu and var. Need to loop through all the
sequences, computing alpha, beta, gamma, and xi for each."""
# input x should be a combination of all the 7 x 561 segments
# d is a list of the features to include
# initialize alpha, beta, gamma, and xi matrices
E = len(x) # number of sequences
alpha_mat = []
beta_mat = []
gamma_mat = []
xi_mat = []
for e in range(E):
x_train = x[e][:,d]
B = cal_b_matrix(x_train, B_mean, B_var, H, K)
alpha = forward_step(A, B, pi, H, K)
beta = backward_step(A, B, pi, H, K)
gamma = calc_gamma(alpha, beta, H, K)
xi = calc_xi(alpha, beta, A, B, H, K)
alpha_mat.append(alpha)
beta_mat.append(beta)
gamma_mat.append(gamma)
xi_mat.append(xi)
# update pi
pi_tmp = np.zeros((H,))
for e in range(E):
pi_tmp += np.asarray(update_pi(gamma_mat[e], H))
pi = pi_tmp/E
# update A
A = np.zeros((H,H))
for i in range(H):
for j in range(H):
super_num = 0
super_den = 0
for e in range(E):
numerator = np.nan
denominator = np.nan
for t in range(K-1):
numerator = elnsum(numerator, xi_mat[e][t,i,j])
denominator = elnsum(denominator, gamma_mat[e][i,t])
super_num += eexp(numerator)
super_den += eexp(denominator)
A[i,j] = super_num/super_den
# update mean of B
miu = np.zeros((H,len(d)))
for i in range(H):
super_num = 0
super_den = 0
for e in range(E):
num = 0
den = 0
for t in range(0,K):
num += eexp(gamma_mat[e][i,t])*x[e][t,d]
den += eexp(gamma_mat[e][i,t])
super_num += num
super_den += den
miu[i,:] = np.divide(super_num,super_den)
# update variance of B
var = np.zeros((H, len(d)))
for i in range(H):
super_num = 0
super_den = 0
for e in range(E):
num = 0
den = 0
for t in range(0,K):
num += eexp(gamma_mat[e][i,t])*np.outer(x[e][t,d]-B_mean[i,:], x[e][t,d]-B_mean[i,:])
den += eexp(gamma_mat[e][i,t])
super_num += num
super_den += den
var[i,:] = np.diag(np.divide(super_num,super_den))
# set lower bound on variances
for j in range(0,len(d)):
if var[i,j] < 1e-6:
var[i,j] = 1e-6
return A, miu, var, pi
|
[
"SARL@ME-SARL.local"
] |
SARL@ME-SARL.local
|
3e2582daa11ba24e8f0271fddf3a8ecde0c847ab
|
94589f1a3565022b28f6bdc2cd7b2dcf99c48e5c
|
/grouper.py
|
9bca8d7b54a1a4d25b76b695361de93b77d7d18b
|
[] |
no_license
|
brat002/ExpertBilling
|
e8825c10487c2f5b179fa5548977c262982c4fc7
|
bb7e041b21736f16ee69759a9cce52c4ce544a95
|
refs/heads/trunk
| 2023-01-20T18:36:01.075704
| 2017-08-31T07:23:53
| 2017-08-31T07:23:53
| 39,003,439
| 8
| 9
| null | 2022-12-27T15:01:29
| 2015-07-13T09:36:11
|
PLpgSQL
|
UTF-8
|
Python
| false
| false
| 17,622
|
py
|
#-*-coding=utf-8-*-
import signal
import asyncore
import isdlogger
import threading
import ConfigParser
import psycopg2, psycopg2.extras
import time, datetime, os, sys, gc, traceback
from IPy import intToIp
from marshal import dumps, loads
from daemonize import daemonize
from threading import Thread, Lock
from copy import copy, deepcopy
from DBUtils.PooledDB import PooledDB
from DBUtils.PersistentDB import PersistentDB
from collections import deque, defaultdict
class groupDequeThread(Thread):
def __init__ (self):
Thread.__init__(self)
def run(self):
connection = persist.connection()
connection._con.set_client_encoding('UTF8')
cur = connection.cursor()
global groupAggrDict, groupAggrTime
global groupDeque, groupLock
#direction type->operations
gops = {1: lambda xdct: xdct['INPUT'], 2: lambda xdct: xdct['OUTPUT'] , 3: lambda xdct: xdct['INPUT'] + xdct['OUTPUT'], 4: lambda xdct: max(xdct['INPUT'], xdct['OUTPUT'])}
global writeProf
icount = 0
timecount = 0
while True:
#gdata[1] - group_id, group_dir, group_type
#gkey[0] - account_id, gkey[2] - date
#ftm = open('routtmp', 'ab+')
try:
groupLock.acquire()
#check whether double aggregation time passed - updates are rather costly
if groupDeque[0][1] + 30 < time.time():
gkey = groupDeque.popleft()[0]
groupLock.release()
else:
groupLock.release()
time.sleep(10)
continue
#get data
groupData = groupAggrDict.pop(gkey)
groupInfo = groupData[1]
#get needed method
gop = gops[groupInfo[1]]
octlist = []
classes = []
max_class = None
octets = 0
gdate = datetime.datetime.fromtimestamp(gkey[2])
account_id = gkey[0]
#second type groups
if groupInfo[2] == 2:
max_oct = 0
#get class octets, calculate with direction method, find maxes
for class_, gdict in groupData[0].iteritems():
octs = gop(gdict)
classes.append(class_)
octlist.append(octs)
if octs > max_oct:
max_oct = octs
max_class = class_
octets = max_oct
if not max_class: continue
cur.execute("""SELECT group_type2_fn(%s, %s, %s, %s, %s, %s, %s);""" , (groupInfo[0], account_id, octets, gdate, classes, octlist, max_class))
connection.commit()
#first type groups
elif groupInfo[2] == 1:
#get class octets, calculate sum with direction method
for class_, gdict in groupData[0].iteritems():
#classes.append(class_)
octs = gop(gdict)
octets += octs
cur.execute("""SELECT group_type1_fn(%s, %s, %s, %s, %s, %s, %s);""" , (groupInfo[0], account_id, octets, gdate, classes, octlist, max_class))
connection.commit()
else:
continue
except IndexError, ierr:
groupLock.release()
time.sleep(10)
continue
except Exception, ex:
print "%s : exception: %s" % (self.getName(), repr(ex))
class statDequeThread(Thread):
'''Thread picks out and sends to the DB global statistics'''
def __init__ (self):
Thread.__init__(self)
def run(self):
connection = persist.connection()
connection._con.set_client_encoding('UTF8')
cur = connection.cursor()
global statAggrDict, statAggrTime
global statDeque, statLock
global writeProf
icount = 0
timecount = 0
while True:
try:
#check whether double aggregation time passed - updates are rather costly
statLock.acquire()
#if statDeque[0][1]:
if statDeque[0][1] + 50 < time.time():
#get a key
skey = statDeque.popleft()[0]
statLock.release()
else:
statLock.release()
time.sleep(10)
continue
#get data
statData = statAggrDict.pop(skey)
statInfo = statData[1]
nas_id = statInfo[0]
#total octets
sum_bytes = statInfo[1]
octlist = []
classes = []
sdate = datetime.datetime.fromtimestamp(skey[1])
account_id = skey[0]
#get octets for every class
for class_, sdict in statData[0].iteritems():
classes.append(class_)
octlist.append([sdict['INPUT'], sdict['OUTPUT']])
octets_in = sum_bytes['INPUT']
octets_out = sum_bytes['OUTPUT']
cur.execute("""SELECT global_stat_fn(%s, %s, %s, %s, %s, %s, %s);""" , (account_id, octets_in, octets_out, sdate, nas_id, classes, octlist))
connection.commit()
except IndexError, ierr:
statLock.release()
time.sleep(10)
continue
except Exception, ex:
print "%s : exception: %s" % (self.getName(), repr(ex))
class NetFlowRoutine(Thread):
'''Thread that handles NetFlow statistic packets and bills according to them'''
def __init__ (self):
Thread.__init__(self)
def isect_classes(self, groups_rec, class_list):
'''Calculates intersection of group classes and flow classes.
Returns a tuple.
'''
groups_rec[1].intersection_update(class_list)
groups_rec[1] = tuple(groups_rec[1])
return tuple(groups_rec)
def run(self):
connection = persist.connection()
#connection._con._con.set_client_encoding('UTF8')
connection._con.set_client_encoding('UTF8')
#connection._con._con.set_isolation_level(0)
global groupAggrDict, statAggrDict
global groupAggrTime, statAggrTime
global groupDeque, statDeque
global groupLast, statLast
global lastOneFname
cur = connection.cursor()
curDay = dateStart
while curDay <= dateEnd:
if curDay == dateEnd:
fname = lastOneFname
else:
fname = tmpFolder + curDay.strftime('%Y%m%d')
f = open(fname, 'w')
try:
cur.copy_to(f, 'nfs' + curDay.strftime('%Y%m%d'), sep='|', columns=['account_id', 'date_start', 'traffic_class_id', 'octets', 'direction', 'nas_id', 'tarif_id'])
except Exception, ex:
print repr(ex)
f.close()
continue
f.close()
connection.commit()
print curDay.strftime('%Y%m%d')
fr = open(fname, 'r')
for dbline in fr:
dblst = dbline.split('|')
account_id = int(dblst[0])
ftime = time.mktime(time.strptime(dblst[1], '%Y-%m-%d %H:%M:%S'))
flow_classes = eval('['+dblst[2][1:-1]+']')
octets = int(dblst[3])
flow_dir = dblst[4]
nas_id = int(dblst[5])
tarif_id = int(dblst[6])
if dblst[1] == '2009-01-13 07:52:20':
pass
has_groups = False
tarifGroups = tarif_groupsCache.get(tarif_id)
if tarifGroups:
has_groups = True
if has_groups:
dr = 0
if flow_dir == 'INPUT':
dr = 2
elif flow_dir == 'OUTPUT':
dr = 1
groupLst = []
fcset = set(flow_classes)
for tgrp in tarifGroups:
if (tgrp[2] == dr) or (tgrp[0] == 0):
continue
group_cls = fcset.intersection(tgrp[1])
if group_cls:
group_add = tgrp[:]
group_add[1] = tuple(group_cls)
groupLst.append(tuple(group_add))
groups = groupLst
'''for tcl in flow_classes:
groupLst.update(tarifGroups.intersection(class_groupsCache.get((tcl, flow_dir), set())))
groups = tuple([self.isect_classes(groupsCache[group_][:], flow_classes) for group_ in groupLst])'''
gtime = ftime - (ftime % groupAggrTime)
for group in groups:
try:
group_id, group_classes, group_dir, group_type = group
#calculate a key and check the dictionary
gkey = (account_id, group_id, gtime)
grec = groupAggrDict.get(gkey)
if not grec:
#add new record to the queue and the dictionary
groupDeque.append((gkey, time.time()))
grec = [defaultdict(lambda: {'INPUT':0, 'OUTPUT':0}), (group_id, group_dir, group_type)]
groupAggrDict[gkey] = grec
#aggregate bytes for every class/direction
for class_ in group_classes:
grec[0][class_][flow_dir] += octets
except Exception, ex:
print '%s groupstat exception: %s' % (self.getName(), repr(ex))
traceback.print_exc(file=sys.stderr)
#global statistics calculation
stime = ftime - (ftime % statAggrTime)
skey = (account_id, stime)
try:
srec = statAggrDict.get(skey)
if not srec:
statDeque.append((skey, time.time()))
srec = [defaultdict(lambda: {'INPUT':0, 'OUTPUT':0}), [nas_id, {'INPUT':0, 'OUTPUT':0}]]
statAggrDict[skey] = srec
#calculation for every class
for class_ in flow_classes:
srec[0][class_][flow_dir] += octets
#global data
srec[1][1][flow_dir] += octets
except Exception, ex:
print '%s globalstat exception: %s' % (self.getName(), repr(ex))
traceback.print_exc(file=sys.stderr)
fr.close()
os.unlink(fname)
curDay += day_
class AccountServiceThread(Thread):
'''Handles simultaniously updated READ ONLY caches connected to account-tarif tables'''
def __init__ (self):
Thread.__init__(self)
def run(self):
connection = persist.connection()
connection._con.set_client_encoding('UTF8')
global groupsCache, class_groupsCache, tarif_groupsCache
cur = connection.cursor()
cur.execute("SELECT id, ARRAY(SELECT trafficclass_id from billservice_group_trafficclass as bgtc WHERE bgtc.group_id = bsg.id) AS trafficclass, direction, type FROM billservice_group AS bsg;")
groups = cur.fetchall()
cur.execute("SELECT tarif_id, int_array_aggregate(group_id) AS group_ids FROM (SELECT tarif_id, group_id FROM billservice_trafficlimit UNION SELECT bt.id, btn.group_id FROM billservice_tariff AS bt JOIN billservice_traffictransmitnodes AS btn ON bt.traffic_transmit_service_id=btn.traffic_transmit_service_id WHERE btn.group_id IS NOT NULL) AS tarif_group GROUP BY tarif_id;")
tarif_groups = cur.fetchall()
connection.commit()
#id, trafficclass, in_direction, out_direction, type
gpcTmp = defaultdict(set)
groups_ = {}
for group in groups:
if not group[1]: continue
#direction = group[2]
#g_id = group[0]
#g_type = group[3]
#classes_ = group[1]
lgroup = list(group)
#lgroup[1] = set(lgroup[1])
groups_[group[0]] = lgroup
'''for tclass in group[1]:
if direction == 1:
gpcTmp[(tclass, 'INPUT')].add(g_id)
elif direction == 2:
gpcTmp[(tclass, 'OUTPUT')].add(g_id)
elif direction in (3,4):
gpcTmp[(tclass, 'INPUT')].add(g_id)
gpcTmp[(tclass, 'OUTPUT')].add(g_id)'''
groupsCache = groups_
class_groupsCache = gpcTmp
del gpcTmp
tg_ = defaultdict(list)
for tarif_id, groups__ in tarif_groups:
for grp in set(groups__):
tg_[tarif_id].append(groupsCache.get(grp, [0,[]]))
tarif_groupsCache = tg_
global lastOneFname
lastOneFname = tmpFolder + dateEnd.strftime('%Y%m%d')
lastone = open(lastOneFname, 'w')
cur.copy_to(lastone, 'nfs' + dateEnd.strftime('%Y%m%d'), sep='|', columns=['account_id', 'date_start', 'traffic_class_id', 'octets', 'direction', 'nas_id', 'tarif_id'])
lastone.close()
connection.commit()
cur.close()
global cachesRead
cachesRead = True
def main():
global cachesRead
threads=[]
for i in xrange(1):
newNfr = NetFlowRoutine()
newNfr.setName('grouper NetFlowRoutine #%s ' % i)
threads.append(newNfr)
for i in xrange(1):
grdqTh = groupDequeThread()
grdqTh.setName('grouper groupDequeThread #%i' % i)
threads.append(grdqTh)
for i in xrange(1):
stdqTh = statDequeThread()
stdqTh.setName('grouper statDequeThread #%i' % i)
threads.append(stdqTh)
cacheThr = AccountServiceThread()
cacheThr.setName('NFR AccountServiceThread')
cacheThr.start()
while not cachesRead:
time.sleep(0.2)
if not cacheThr.isAlive:
sys.exit()
#i= range(len(threads))
for th in threads:
th.start()
print "%s start" % th.getName()
time.sleep(0.1)
if __name__ == "__main__":
dateStart = datetime.date(int(sys.argv[1][0:4]), int(sys.argv[1][4:6]), int(sys.argv[1][6:8]))
if sys.argv[2] == 'now':
dateEnd = datetime.date.today()
else:
dateEnd = datetime.date(int(sys.argv[2][0:4]), int(sys.argv[2][4:6]), int(sys.argv[2][6:8]))
day_ = datetime.timedelta(days=1)
tmpFolder = ''
if len(sys.argv) > 3:
tmpFolder = sys.argv[3] + '/'
lastOneFname = ''
config = ConfigParser.ConfigParser()
config.read("ebs_config.ini")
persist = PersistentDB(
setsession=["SET synchronous_commit TO OFF;", 'SET DATESTYLE TO ISO;'],
creator=psycopg2,
dsn="dbname='%s' user='%s' host='%s' password='%s'" % (config.get("db", "name"), config.get("db", "username"),
config.get("db", "host"), config.get("db", "password")))
#--------------------------------------------------------
cachesRead = False
#group statistinc an global statistics objects
#key = account, group id , time
#[(1,2,3)][0][4]['INPUT']
#[(1,2, )][1] = group info
#lambda: [defaultdict(lambda: {'INPUT':0, 'OUTPUT':0}), None])
groupAggrDict = {}
#key - account_id, time
#[(1,2,3)][0][4]['INPUT']
#[(1,2, )][1] = nas etc
statAggrDict = {}
groupAggrTime = 300
statAggrTime = 1800
groupDeque = deque()
statDeque = deque()
groupLock = Lock()
statLock = Lock()
groupLast = None
statLast = None
main()
|
[
"sasha"
] |
sasha
|
ff6a804bbce1c2bcc1756d3d6d227e721061bbd1
|
265af0af6ef3e99ae07aa59aadf9ee1f59785667
|
/samples/openapi3/client/petstore/python/petstore_api/paths/pet_pet_id_upload_image/post.py
|
375b2cf16ee7873a547f7ca61135999242aa01c2
|
[
"Apache-2.0"
] |
permissive
|
ingenovishealth/openapi-generator
|
0a936b884f7554639dd73eb389a14898101b819a
|
22beeaac4e9edac5c886a6b2078afbacfeaef102
|
refs/heads/master
| 2023-02-09T03:17:32.449794
| 2023-02-01T04:27:27
| 2023-02-01T04:27:27
| 218,272,372
| 0
| 0
|
Apache-2.0
| 2023-09-12T14:00:27
| 2019-10-29T11:35:27
|
Java
|
UTF-8
|
Python
| false
| false
| 16,647
|
py
|
# coding: utf-8
"""
Generated by: https://openapi-generator.tech
"""
from dataclasses import dataclass
import typing_extensions
import urllib3
from urllib3._collections import HTTPHeaderDict
from petstore_api import api_client, exceptions
from datetime import date, datetime # noqa: F401
import decimal # noqa: F401
import functools # noqa: F401
import io # noqa: F401
import re # noqa: F401
import typing # noqa: F401
import typing_extensions # noqa: F401
import uuid # noqa: F401
import frozendict # noqa: F401
from petstore_api import schemas # noqa: F401
from petstore_api.model.api_response import ApiResponse
from . import path
# Path params
PetIdSchema = schemas.Int64Schema
RequestRequiredPathParams = typing_extensions.TypedDict(
'RequestRequiredPathParams',
{
'petId': typing.Union[PetIdSchema, decimal.Decimal, int, ],
}
)
RequestOptionalPathParams = typing_extensions.TypedDict(
'RequestOptionalPathParams',
{
},
total=False
)
class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams):
pass
request_path_pet_id = api_client.PathParameter(
name="petId",
style=api_client.ParameterStyle.SIMPLE,
schema=PetIdSchema,
required=True,
)
# body param
class SchemaForRequestBodyMultipartFormData(
schemas.DictSchema
):
class MetaOapg:
class properties:
additionalMetadata = schemas.StrSchema
file = schemas.BinarySchema
__annotations__ = {
"additionalMetadata": additionalMetadata,
"file": file,
}
@typing.overload
def __getitem__(self, name: typing_extensions.Literal["additionalMetadata"]) -> MetaOapg.properties.additionalMetadata: ...
@typing.overload
def __getitem__(self, name: typing_extensions.Literal["file"]) -> MetaOapg.properties.file: ...
@typing.overload
def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ...
def __getitem__(self, name: typing.Union[typing_extensions.Literal["additionalMetadata", "file", ], str]):
# dict_instance[name] accessor
return super().__getitem__(name)
@typing.overload
def get_item_oapg(self, name: typing_extensions.Literal["additionalMetadata"]) -> typing.Union[MetaOapg.properties.additionalMetadata, schemas.Unset]: ...
@typing.overload
def get_item_oapg(self, name: typing_extensions.Literal["file"]) -> typing.Union[MetaOapg.properties.file, schemas.Unset]: ...
@typing.overload
def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["additionalMetadata", "file", ], str]):
return super().get_item_oapg(name)
def __new__(
cls,
*_args: typing.Union[dict, frozendict.frozendict, ],
additionalMetadata: typing.Union[MetaOapg.properties.additionalMetadata, str, schemas.Unset] = schemas.unset,
file: typing.Union[MetaOapg.properties.file, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset,
_configuration: typing.Optional[schemas.Configuration] = None,
**kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes],
) -> 'SchemaForRequestBodyMultipartFormData':
return super().__new__(
cls,
*_args,
additionalMetadata=additionalMetadata,
file=file,
_configuration=_configuration,
**kwargs,
)
request_body_body = api_client.RequestBody(
content={
'multipart/form-data': api_client.MediaType(
schema=SchemaForRequestBodyMultipartFormData),
},
)
_auth = [
'petstore_auth',
]
SchemaFor200ResponseBodyApplicationJson = ApiResponse
@dataclass
class ApiResponseFor200(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor200ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_200 = api_client.OpenApiResponse(
response_cls=ApiResponseFor200,
content={
'application/json': api_client.MediaType(
schema=SchemaFor200ResponseBodyApplicationJson),
},
)
_status_code_to_response = {
'200': _response_for_200,
}
_all_accept_content_types = (
'application/json',
)
class BaseApi(api_client.Api):
@typing.overload
def _upload_image_oapg(
self,
content_type: typing_extensions.Literal["multipart/form-data"] = ...,
body: typing.Union[SchemaForRequestBodyMultipartFormData, dict, frozendict.frozendict, schemas.Unset] = schemas.unset,
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def _upload_image_oapg(
self,
content_type: str = ...,
body: typing.Union[SchemaForRequestBodyMultipartFormData, dict, frozendict.frozendict, schemas.Unset] = schemas.unset,
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def _upload_image_oapg(
self,
skip_deserialization: typing_extensions.Literal[True],
content_type: str = ...,
body: typing.Union[SchemaForRequestBodyMultipartFormData, dict, frozendict.frozendict, schemas.Unset] = schemas.unset,
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def _upload_image_oapg(
self,
content_type: str = ...,
body: typing.Union[SchemaForRequestBodyMultipartFormData, dict, frozendict.frozendict, schemas.Unset] = schemas.unset,
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def _upload_image_oapg(
self,
content_type: str = 'multipart/form-data',
body: typing.Union[SchemaForRequestBodyMultipartFormData, dict, frozendict.frozendict, schemas.Unset] = schemas.unset,
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
"""
uploads an image
:param skip_deserialization: If true then api_response.response will be set but
api_response.body and api_response.headers will not be deserialized into schema
class instances
"""
self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params)
used_path = path.value
_path_params = {}
for parameter in (
request_path_pet_id,
):
parameter_data = path_params.get(parameter.name, schemas.unset)
if parameter_data is schemas.unset:
continue
serialized_data = parameter.serialize(parameter_data)
_path_params.update(serialized_data)
for k, v in _path_params.items():
used_path = used_path.replace('{%s}' % k, v)
_headers = HTTPHeaderDict()
# TODO add cookie handling
if accept_content_types:
for accept_content_type in accept_content_types:
_headers.add('Accept', accept_content_type)
_fields = None
_body = None
if body is not schemas.unset:
serialized_data = request_body_body.serialize(body, content_type)
_headers.add('Content-Type', content_type)
if 'fields' in serialized_data:
_fields = serialized_data['fields']
elif 'body' in serialized_data:
_body = serialized_data['body']
response = self.api_client.call_api(
resource_path=used_path,
method='post'.upper(),
headers=_headers,
fields=_fields,
body=_body,
auth_settings=_auth,
stream=stream,
timeout=timeout,
)
if skip_deserialization:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
else:
response_for_status = _status_code_to_response.get(str(response.status))
if response_for_status:
api_response = response_for_status.deserialize(response, self.api_client.configuration)
else:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
if not 200 <= response.status <= 299:
raise exceptions.ApiException(api_response=api_response)
return api_response
class UploadImage(BaseApi):
# this class is used by api classes that refer to endpoints with operationId fn names
@typing.overload
def upload_image(
self,
content_type: typing_extensions.Literal["multipart/form-data"] = ...,
body: typing.Union[SchemaForRequestBodyMultipartFormData, dict, frozendict.frozendict, schemas.Unset] = schemas.unset,
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def upload_image(
self,
content_type: str = ...,
body: typing.Union[SchemaForRequestBodyMultipartFormData, dict, frozendict.frozendict, schemas.Unset] = schemas.unset,
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def upload_image(
self,
skip_deserialization: typing_extensions.Literal[True],
content_type: str = ...,
body: typing.Union[SchemaForRequestBodyMultipartFormData, dict, frozendict.frozendict, schemas.Unset] = schemas.unset,
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def upload_image(
self,
content_type: str = ...,
body: typing.Union[SchemaForRequestBodyMultipartFormData, dict, frozendict.frozendict, schemas.Unset] = schemas.unset,
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def upload_image(
self,
content_type: str = 'multipart/form-data',
body: typing.Union[SchemaForRequestBodyMultipartFormData, dict, frozendict.frozendict, schemas.Unset] = schemas.unset,
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
return self._upload_image_oapg(
body=body,
path_params=path_params,
content_type=content_type,
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
class ApiForpost(BaseApi):
# this class is used by api classes that refer to endpoints by path and http method names
@typing.overload
def post(
self,
content_type: typing_extensions.Literal["multipart/form-data"] = ...,
body: typing.Union[SchemaForRequestBodyMultipartFormData, dict, frozendict.frozendict, schemas.Unset] = schemas.unset,
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def post(
self,
content_type: str = ...,
body: typing.Union[SchemaForRequestBodyMultipartFormData, dict, frozendict.frozendict, schemas.Unset] = schemas.unset,
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def post(
self,
skip_deserialization: typing_extensions.Literal[True],
content_type: str = ...,
body: typing.Union[SchemaForRequestBodyMultipartFormData, dict, frozendict.frozendict, schemas.Unset] = schemas.unset,
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def post(
self,
content_type: str = ...,
body: typing.Union[SchemaForRequestBodyMultipartFormData, dict, frozendict.frozendict, schemas.Unset] = schemas.unset,
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def post(
self,
content_type: str = 'multipart/form-data',
body: typing.Union[SchemaForRequestBodyMultipartFormData, dict, frozendict.frozendict, schemas.Unset] = schemas.unset,
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
return self._upload_image_oapg(
body=body,
path_params=path_params,
content_type=content_type,
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
|
[
"noreply@github.com"
] |
ingenovishealth.noreply@github.com
|
8e51828497357d520d0a34c043e7316af144e587
|
1de32582d2092b11e3ae4fa3875c91d55146976f
|
/Blackjack.py
|
8367664603eedc956bd41802f9b8e84b1ba5799a
|
[
"MIT"
] |
permissive
|
berkesenturk/Blackjack
|
b6c97b425bf468d9fc49c8e07410e3422534ba85
|
ea50f503b15b9703a65e48041eb5da9eca51a976
|
refs/heads/master
| 2022-07-15T00:18:26.912912
| 2020-05-17T21:27:55
| 2020-05-17T21:27:55
| 264,535,365
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,911
|
py
|
from enum import auto, Enum #enum okundu
# Tuple'larda keyler integer olarak tanımlanır. Enum burda bunun önüne geçer bir bakıma ve bize sembolik isimleri
# değerler için atamamıza olanak tanır.
from itertools import product
"""
#itertools -> product
cartesian product, equivalent to a nested for-loop
ex -> product('ABCD', repeat=2)
output -> AA AB AC AD BA BB BC BD CA CB CC CD DA DB DC DD
"""
from random import shuffle
from typing import List, NamedTuple #namedtuple okundu
# class Vault:
# def __init__(self,your_money,your_bet,iscond):
# self.your_money = your_money
# self.your_bet = your_bet
# self.iscond = iscond
# def money(self):
# # global your_money
# # global ask_sit
# # global ask_bet
# # global iscond
# self.your_money = 0
# self.your_bet = 0
# ask_sit = int(input("sit with: "))
# self.your_money += ask_sit
# ask_bet = int(input("bet with: "))
# self.your_bet = ask_bet
# self.iscond = 0
class Rank(Enum): #RANKLER TANIMLANIR
ACE = 1
TWO = 2
THREE = 3
FOUR = 4
FIVE = 5
SIX = 6
SEVEN = 7
EIGHT = 8
NINE = 9
TEN = 10
JACK = 10
QUEEN = 10
KING = 10
class Suit(Enum): #TAKIMLARI TANIMLANIR
SPADES = auto()
CLUBS = auto()
DIAMONDS = auto()
HEARTS = auto()
class Card(NamedTuple): #Card classı içine alarak bir kart fiziksel olarak oluşmuş olur.
rank: Rank
suit: Suit
def __repr__(self) -> str:
"""Pretty-print the name of the card, e.g. 'Queen of Hearts'"""
return f"{self.rank.name.title()} of {self.suit.name.title()}"
deck = [Card(rank, suit) for rank, suit in product(Rank, Suit)] #Cartesian product of input iterables. Tanımlanmış kartların altyapısı burda bir liste dönüşür ve böylece deste tamamlanmış olur.
print("Here's our deck fresh from the factory: ", deck)
shuffle(deck)
print("Here it is all shuffled: ", deck)
def best_value(hand: List[Card]) -> int:
"""Get the best blackjack value for a hand of cards (highest without busting)"""
value = sum([card.rank.value for card in hand])
if value < 12 and any([card.rank == Rank.ACE for card in hand]):
# Count one of our aces as 11, adding 10 to the total.
value += 10
return value
print("Deal me two!")
hand = [deck.pop(), deck.pop()]
hand_op = [deck.pop(), deck.pop()]
print(f"My hand is {hand}, which is worth {best_value(hand)}")
print(f"Opponent has: {hand_op} = {best_value(hand_op)}")
# PROBLEMLER!!!!
# opponent kart aldıktan sonra kazanma senaryo eksik
# 11'den ufakken as 11 geldi
# 1. ben kart aldım 19-16 oldu, 2.stand dedim opponent kart aldı 19-18 oldu you win!! dedi ve bana hit or stand sordu.
#HERŞEY BİTİNCE VEYA ÖNCEDEN WORKFLOWA GEÇİR
def Vault(): # Sit table with the money you want and bet
your_money = 0
your_bet = 0
ask_sit = int(input("sit with: "))
your_money += ask_sit
ask_bet = int(input("bet with: "))
your_bet = ask_bet
iscond = 0
main()
if iscond ==True:
your_money += your_bet
print(your_money)
if iscond==False:
your_money -= your_bet
print(your_money)
if iscond == 0 :
pass
def main():
if best_value(hand) == 21 :
print("Won <line 75>!!")
iscond = True
if best_value(hand_op) == 21 :
print("Opponent wins!!")
iscond = False
choice = int(input("Hit or Stand (HIT:1, STAND: 2): "))
if choice == 2: #OPPONENT
while best_value(hand_op) < 17: #Dealer 17'ye kadar kart çeker sonra sıra bize gelir eğer bir gün mp yaparsan sıra bekleyen olarak anla
print("Opponent says: Hit me!")
card = deck.pop()
hand_op.append(card)
print("Opponent got ", card)
if best_value(hand_op)==best_value(hand):
print("DRAW!")
iscond = 0
"""
if best_value(hand_op) == 21 :
print("Won!! <line 91>")
"""
if best_value(hand)<21 and best_value(hand_op)<best_value(hand):
print("YOU WİN!!!")
print(f"My hand was {hand}, which is worth {best_value(hand)}")
print(f"Opponent had: {hand_op} = {best_value(hand_op)}")
iscond = True
if best_value(hand_op)<21 and (best_value(hand)<best_value(hand_op)):
print("OPPONENT WİNS!!")
print(f"Opponent had: {hand_op} = {best_value(hand_op)}")
iscond = False
if choice == 1: #YOUR NEXT PICK
if best_value(hand) < 21:
print("You Say: Hit me!")
card = deck.pop()
hand.append(card)
print("I got ", card)
print(f"You have: {hand} = {best_value(hand)}")
print(f"Opponent has: {hand_op} = {best_value(hand_op)}")
if best_value(hand)<21 and best_value(hand) < best_value(hand_op):
main()
if best_value(hand)<21 and best_value(hand) > best_value(hand_op):
main()
"""
if best_value(hand) == 21 :
print("Won!! <line 121>")
main()
if best_value(hand) > 21:
print("YOU LOSE LINE 108 HAND >21")
"""
if best_value(hand_op) > 21:
print("Opponent bust you won")
iscond = True
if best_value(hand) > 21:
print("You Bust Opponent won!")
print(f"{hand} = {best_value(hand)}")
print(f"{hand_op} = {best_value(hand_op)}")
iscond =False
Vault()
|
[
"45045846+berkesenturk@users.noreply.github.com"
] |
45045846+berkesenturk@users.noreply.github.com
|
65f356f05a10845b19da1aaec0dd4ee96c4112f9
|
4d5dedb096e64c4da22b0f044cfa11ac7cfab741
|
/python/skype_auto_answer_call.py
|
1c2715509f68966d2c8acb1e46d5e25fd69f2d71
|
[] |
no_license
|
kitech/triline
|
4a1305265ad41fc7305075ad862bee9a3a27ee3b
|
65619eeab2a3ce516718ecb4eb3fadca1cd494de
|
refs/heads/master
| 2021-08-08T05:31:58.969551
| 2020-12-29T09:55:55
| 2020-12-29T09:55:55
| 133,601,942
| 4
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,168
|
py
|
#!/usr/bin/env python2
import Skype4Py
import time
import re
class SkypeBot(object):
def __init__(self):
self.skype = Skype4Py.Skype(Events=self)
self.skype.FriendlyName = "Skype Bot"
self.skype.Attach()
def OnlineStatus(self, user, status):
print(user, status)
return
def AttachmentStatus(self, status):
print('attach:', status)
if status == Skype4Py.apiAttachAvailable:
self.skype.Attach()
def MessageStatus(self, msg, status):
if status == Skype4Py.cmsReceived:
if msg.Chat.Type in (Skype4Py.chatTypeDialog, Skype4Py.chatTypeLegacyDialog):
for regexp, target in self.commands.items():
match = re.match(regexp, msg.Body, re.IGNORECASE)
if match:
msg.MarkAsSeen()
reply = target(self, *match.groups())
if reply:
msg.Chat.SendMessage(reply)
break
def CallStatus(self, call, status):
print(call, status, call._GetPartnerHandle())
allow_peer = 'yat-sen'
if status == 'RINGING':
if call._GetPartnerHandle() == allow_peer:
print('auto answer...')
call.Answer()
else:
print('reject call...', call._GetPartnerHandle())
call.Finish()
elif status == 'INPROGRESS':
if call._GetPartnerHandle() == allow_peer:
call.StartVideoSend()
# self.skype._SetMute('OFF')
return
def cmd_userstatus(self, status):
if status:
try:
self.skype.ChangeUserStatus(status)
except Skype4Py.SkypeError, e:
return str(e)
return 'Current status: %s' % self.skype.CurrentUserStatus
def cmd_credit(self):
return self.skype.CurrentUserProfile.BalanceToText
commands = {
"@userstatus *(.*)": cmd_userstatus,
"@credit$": cmd_credit
}
if __name__ == "__main__":
bot = SkypeBot()
while True:
time.sleep(0.1)
|
[
"drswinghead@163.com"
] |
drswinghead@163.com
|
697c9c47ad94dc2c73ae4ef0df6e52bdefba26b3
|
8e69c85b70c1f8d2459eb40b07f0ce974ab2cdac
|
/itd/twitterbootstrap/tests.py
|
6511c5159942a2d1251e30d8bcfe55c8963007e2
|
[] |
no_license
|
itd/itd.twitterbootstrap
|
041ec12f87e2879f79f4da2032b0c3ef9aecfbe8
|
c450f3c89595abdd7c26ffcd5c6e430980c4f2b7
|
refs/heads/master
| 2020-12-30T09:38:24.990752
| 2012-06-27T07:27:41
| 2012-06-27T07:27:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,425
|
py
|
import unittest
#from zope.testing import doctestunit
#from zope.component import testing
from Testing import ZopeTestCase as ztc
from Products.Five import fiveconfigure
from Products.PloneTestCase import PloneTestCase as ptc
from Products.PloneTestCase.layer import PloneSite
ptc.setupPloneSite()
import itd.twitterbootstrap
class TestCase(ptc.PloneTestCase):
class layer(PloneSite):
@classmethod
def setUp(cls):
fiveconfigure.debug_mode = True
ztc.installPackage(itd.twitterbootstrap)
fiveconfigure.debug_mode = False
@classmethod
def tearDown(cls):
pass
def test_suite():
return unittest.TestSuite([
# Unit tests
#doctestunit.DocFileSuite(
# 'README.txt', package='itd.twitterbootstrap',
# setUp=testing.setUp, tearDown=testing.tearDown),
#doctestunit.DocTestSuite(
# module='itd.twitterbootstrap.mymodule',
# setUp=testing.setUp, tearDown=testing.tearDown),
# Integration tests that use PloneTestCase
#ztc.ZopeDocFileSuite(
# 'README.txt', package='itd.twitterbootstrap',
# test_class=TestCase),
#ztc.FunctionalDocFileSuite(
# 'browser.txt', package='itd.twitterbootstrap',
# test_class=TestCase),
])
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
[
"kurt@tool.net"
] |
kurt@tool.net
|
4ff40d4f02cbdd4f1aca1f45e661382847291759
|
ac4391f978821206666036f091522b84ba1941a8
|
/reshape.py
|
16bf938bc86d6f414b50cf0c11a84a127e453b13
|
[] |
no_license
|
sunil504/hackerrankpython
|
394c6e7684a546dd18189025b2264d985279ca70
|
4faf32e42e6537a2be88177122928671c87059fc
|
refs/heads/master
| 2021-08-22T04:56:17.441965
| 2017-11-29T10:35:22
| 2017-11-29T10:35:22
| 112,455,163
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 97
|
py
|
import numpy
x = input().strip().split(' ')
x = numpy.array(x,int)
print(numpy.reshape(x,(3,3)))
|
[
"noreply@github.com"
] |
sunil504.noreply@github.com
|
3e49a25a4de860622d8ba1b81856483455a22485
|
88e3b2782e3f9a7e62cb56ecf77d449da859c7d7
|
/mysite.venv/mysite_project/mysite_project/settings.py
|
1b2fb6538b02fd625d3e85b1e314934c0c3030c0
|
[] |
no_license
|
scooterpie/scooter-blog
|
bf5b0d5db5b1e547dd5f28469504ea89e8ce16d4
|
f0a68dfaeec4e79d6454980ec584eabf45143083
|
refs/heads/master
| 2021-07-16T02:07:20.955921
| 2017-10-23T02:39:29
| 2017-10-23T02:39:29
| 107,924,334
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,203
|
py
|
"""
Django settings for mysite_project project.
Generated by 'django-admin startproject' using Django 1.11.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'o^th5mf&w=(v)y372bzah8(k+5zxq#(53if@_z*a7ewg)if421'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'myDjangoAPP',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'djangodb',
'USER': 'djangodbuser',
'PASSWORD': 'password',
'HOST': '',
'PORT': '',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Australia/Sydney'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
|
[
"scooter@pie.com"
] |
scooter@pie.com
|
c233e69f58a62cc1d2641bbac17878a9a683d2d3
|
549d7c0456f419c5ce8b90dc42193e61e3f3eb8c
|
/mysite/settings.py
|
e080faf3a09bfe94d31e5f436d73930fc3b59f15
|
[] |
no_license
|
petitantom/my-first-tutorial
|
527a8fbe1eba69fae8d1e311646fc79530561e13
|
93cf8138d6f2be7b5e70b6a7fd6d09c673a1e81f
|
refs/heads/master
| 2020-12-31T07:54:37.342805
| 2017-06-19T13:54:22
| 2017-06-19T13:54:22
| 94,856,296
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,201
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.11.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^8!q2k(x5ts6)z+*dq0_fg4yt_&nu3xis(a-sx4y=0#zk*_6&-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', '.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Budapest'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
|
[
"szerviz.petitan.hu@gmail.com"
] |
szerviz.petitan.hu@gmail.com
|
1c3de27813851250806d1504109c55ac8b152a2a
|
dd76697bc5fccb3637fc6056e395f96075f03d8c
|
/Day-002/exercise-3-bmi-calculator.py
|
af0331c722e714aa6ea93eab8ff40da386f04099
|
[] |
no_license
|
georgggg/python-bootcamp
|
449f759ee339864e8e3bd4d00e06f32292616a9c
|
0edacb45bb05bf5622c457568a350ebb7ad4451b
|
refs/heads/master
| 2023-08-21T12:02:32.546186
| 2021-09-16T02:17:29
| 2021-09-16T02:17:29
| 358,429,006
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,595
|
py
|
# BMI Calculator
# Instructions
# Write a program that calculates the Body Mass Index (BMI) from a user's weight and height.
# The BMI is a measure of some's weight taking into account their height. e.g. If a tall person and a short person both weigh the same amount, the short person is usually more overweight.
# The BMI is calculated by dividing a person's weight (in kg) by the square of their height (in m):
# https://cdn.fs.teachablecdn.com/jKHjnLrNQjqzdz3MTMyv
# Warning you should convert the result to a whole number.
# Example Input
# weight = 80
# height = 1.75
# Example Output
# 80 ÷ (1.75 x 1.75) = 26.122448979591837
# 26
# e.g. When you hit run, this is what should happen:
# https://cdn.fs.teachablecdn.com/wmjVjddeSmGj0QVtOUrE
# Hint
# Check the data type of the inputs.
# Try to use the exponent operator in your code.
# Remember PEMDAS.
# Remember to convert your result to a whole number (int).
# Test Your Code
# Before checking the solution, try copy-pasting your code into this repl:
# https://repl.it/@appbrewery/day-2-2-test-your-code
# This repl includes my testing code that will check if your code meets this assignment's objectives.
# Solution
# https://repl.it/@appbrewery/day-2-2-solution
# 🚨 Don't change the code below 👇
height = input("enter your height in m: ")
weight = input("enter your weight in kg: ")
# 🚨 Don't change the code above 👆
#Write your code below this line 👇
weight = float(weight)
height = float(height)
BMI = weight / height**2
print(int(BMI))
|
[
"giorgggg.napone@gmail.com"
] |
giorgggg.napone@gmail.com
|
e202b6393527b1523ae9b1b7311cc6a32affe5e6
|
c56d36c10e52f569f3864790611fe9cf11bcb050
|
/lesson6/cases/test_contact.py
|
dacb0e3d606fd73bbc1dae6113b9a6790ef975f0
|
[] |
no_license
|
yr-rui/LG7
|
c9ba3e062eba4e5444a2e82a5bba8648da3258c9
|
471318a2af4e1d9e8b964d73f12f8f7ab713495f
|
refs/heads/main
| 2023-07-30T15:47:47.702967
| 2021-09-15T12:48:09
| 2021-09-15T12:48:09
| 360,015,844
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,160
|
py
|
import pytest
import yaml
from lesson6.pages.app import App
def get_datas():
with open("./contact.yaml") as f:
datas=yaml.safe_load(f)
return datas
class TestContact:
def setup_class(self):
self.app=App()
def setup(self):
self.main=self.app.start().goto_main()
self.contact=self.main.goto_contact_page()
def teardown_class(self):
self.app.stop()
@pytest.mark.parametrize("name,phone",[(i['name'], i['phone']) for i in get_datas()])
def test_add_contact(self,name,phone):
# self.contact=self.main.goto_contact_page()
self.contact.goto_add_contact_page().goto_edit_contact_page().add_contact(name,phone).verify_ok()
# @pytest.mark.parametrize("name",['test1',])
@pytest.mark.parametrize("name",[i['name'] for i in get_datas()])
def test_delete_contact(self,name):
current_contact=self.contact.goto_contact_info_page(name).goto_contact_info_setting_page().goto_contact_info_edit_page().delete_contact()
# assert False==current_contact.swipe_find_contact(name)
assert f"{name}不在通讯录中"==current_contact.swipe_find_contact(name)
|
[
"rui.yue@changhong.com"
] |
rui.yue@changhong.com
|
0bd268cc0896e9f4c5d4387a7d8c6049a64b871f
|
5d4b2841619529f586d16ca64aa212c5a5d02a4c
|
/todo/main.py
|
e3069425de642ee9648a17c4d94ddfd5e5223c61
|
[
"MIT"
] |
permissive
|
georgdonner/todo-cli
|
de7b4efa0201073c80c34d4646c97ca339823894
|
d4da43822f0697a2a5a44b31a1d421a616c91fa2
|
refs/heads/master
| 2022-12-12T03:52:52.817253
| 2018-03-26T21:31:44
| 2018-03-26T21:31:44
| 126,611,241
| 0
| 0
|
MIT
| 2022-12-08T00:55:26
| 2018-03-24T15:10:25
|
Python
|
UTF-8
|
Python
| false
| false
| 1,296
|
py
|
import argparse, os, sys
from colorama import init
from todo.commands.add import add_item
from todo.commands.done import item_done
from todo.commands.list import list_items
from todo.helpers.defaultparser import set_default_subparser
init()
argparse.ArgumentParser.set_default_subparser = set_default_subparser
def main():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(title='commands')
# list
parser_list = subparsers.add_parser('list')
parser_list.add_argument('-a', '--all', action='store_true')
parser_list.add_argument('-p', '--project', metavar='<project>', type=str, default='Inbox')
parser_list.set_defaults(func=list_items)
# done
parser_done = subparsers.add_parser('done')
parser_done.add_argument('pattern', type=str)
parser_done.set_defaults(func=item_done)
# add
parser_add = subparsers.add_parser('add')
parser_add.add_argument('content', type=str)
parser_add.add_argument('-p', '--project', metavar='<project>', type=str, default='Inbox')
parser_add.add_argument('-d', '--date', metavar='<date_string>', type=str)
parser_add.set_defaults(func=add_item)
parser.set_default_subparser('list')
args = parser.parse_args()
args.func(args)
if __name__ == '__main__':
main()
|
[
"georgdonner98@gmail.com"
] |
georgdonner98@gmail.com
|
f90a4c6e4150357b1a2e7523921b8389ac946930
|
1523054f4403e24764dc510fe4f3805a9194d2b0
|
/appdaemon/settings/apps/climate.py
|
37a1df4d228318f71583896a6ff2d08b1b94b5eb
|
[] |
no_license
|
wjl198435/smart-home
|
47da685dec3785fa17888980e52217aac12448ac
|
643177d436669cbb516de87d2a9199e389d43b73
|
refs/heads/master
| 2021-07-18T02:30:40.405155
| 2020-05-29T05:11:02
| 2020-05-29T05:11:02
| 172,010,714
| 0
| 0
| null | 2020-05-29T05:11:04
| 2019-02-22T06:55:09
|
Python
|
UTF-8
|
Python
| false
| false
| 12,721
|
py
|
"""Define automations for climate control."""
from typing import Union
import voluptuous as vol
from const import EVENT_PRESENCE_CHANGE, EVENT_PROXIMITY_CHANGE
from core import APP_SCHEMA, Base
from helpers import config_validation as cv
CONF_AQI_SENSOR = "aqi"
CONF_AQI_THRESHOLD = "aqi_threshold"
CONF_AWAY_MODE = "away_mode"
CONF_BRIGHTNESS_PERCENT_SENSOR = "sensor.outdoor_brightness_percent_sensor"
CONF_BRIGHTNESS_SENSOR = "sensor.outdoor_brightness_sensor"
CONF_ECO_HIGH_THRESHOLD = "eco_high_threshold"
CONF_ECO_LOW_THRESHOLD = "eco_low_threshold"
CONF_HUMIDITY_SENSOR = "humidity_sensor"
CONF_INDOOR_TEMPERATURE_SENSOR = "indoor_temperature_sensor"
CONF_LAST_HVAC_MODE = "last_hvac_mode"
CONF_OUTDOOR_BRIGHTNESS_PERCENT_SENSOR = "outdoor_brightness_percent_sensor"
CONF_OUTDOOR_BRIGHTNESS_SENSOR = "outdoor_brightness_sensor"
CONF_OUTDOOR_HIGH_THRESHOLD = "outdoor_high_threshold"
CONF_OUTDOOR_LOW_THRESHOLD = "outdoor_low_threshold"
CONF_OUTDOOR_TEMPERATURE_SENSOR = "outdoor_temperature_sensor"
CONF_THERMOSTAT = "thermostat"
FAN_MODE_AUTO_LOW = "Auto Low"
FAN_MODE_CIRCULATE = "Circulate"
FAN_MODE_ON_LOW = "On Low"
HVAC_MODE_AUTO = "heat_cool"
HVAC_MODE_COOL = "cool"
HVAC_MODE_HEAT = "heat"
HVAC_MODE_OFF = "off"
HANDLE_ECO_MODE = "eco_mode"
class AdjustOnProximity(Base): # pylint: disable=too-few-public-methods
"""Define a feature to adjust climate based on proximity to home."""
def configure(self) -> None:
"""Configure."""
self.listen_event(
self._on_arrive_home,
EVENT_PRESENCE_CHANGE,
new=self.presence_manager.HomeStates.just_arrived.value,
first=True,
)
self.listen_event(self._on_proximity_change, EVENT_PROXIMITY_CHANGE)
def _on_arrive_home(self, event_name: str, data: dict, kwargs: dict) -> None:
"""Last ditch: turn the thermostat to home when someone arrives."""
if self.climate_manager.away_mode:
self.log('Last ditch: setting thermostat to "Home" (arrived)')
self.climate_manager.set_home()
def _on_proximity_change(self, event_name: str, data: dict, kwargs: dict) -> None:
"""Respond to "PROXIMITY_CHANGE" events."""
if self.climate_manager.outdoor_temperature_extreme:
# Scenario 1: Anything -> Away (Extreme Temps)
if data["new"] == self.presence_manager.ProximityZones.away.value:
self.climate_manager.set_away()
# Scenario 2: Away -> Anything (Extreme Temps)
elif data["old"] == self.presence_manager.ProximityZones.away.value:
self.climate_manager.set_home()
else:
# Scenario 3: Home -> Anything
if data["old"] == self.presence_manager.ProximityZones.home.value:
self.climate_manager.set_away()
# Scenario 4: Anything -> Nearby
elif data["new"] == self.presence_manager.ProximityZones.nearby.value:
self.climate_manager.set_home()
class ClimateManager(Base): # pylint: disable=too-many-public-methods
"""Define an app to represent climate control."""
APP_SCHEMA = APP_SCHEMA.extend(
{
vol.Required(CONF_AWAY_MODE): cv.entity_id,
vol.Required(CONF_ECO_HIGH_THRESHOLD): cv.entity_id,
vol.Required(CONF_ECO_LOW_THRESHOLD): cv.entity_id,
vol.Required(CONF_HUMIDITY_SENSOR): cv.entity_id,
vol.Required(CONF_INDOOR_TEMPERATURE_SENSOR): cv.entity_id,
vol.Required(CONF_OUTDOOR_BRIGHTNESS_PERCENT_SENSOR): cv.entity_id,
vol.Required(CONF_OUTDOOR_BRIGHTNESS_SENSOR): cv.entity_id,
vol.Required(CONF_OUTDOOR_HIGH_THRESHOLD): cv.entity_id,
vol.Required(CONF_OUTDOOR_LOW_THRESHOLD): cv.entity_id,
vol.Required(CONF_OUTDOOR_TEMPERATURE_SENSOR): cv.entity_id,
vol.Required(CONF_THERMOSTAT): cv.entity_id,
}
)
def configure(self) -> None:
"""Configure."""
if self.away_mode:
self._set_away()
self.listen_state(self._on_away_mode_change, self.args[CONF_AWAY_MODE])
@property
def away_mode(self) -> bool:
"""Return the state of away mode."""
return self.get_state(self.args[CONF_AWAY_MODE]) == "on"
@property
def eco_high_temperature(self) -> float:
"""Return the upper limit of eco mode."""
return float(self.get_state(self.args[CONF_ECO_HIGH_THRESHOLD]))
@eco_high_temperature.setter
def eco_high_temperature(self, value: int) -> None:
"""Set the upper limit of eco mode."""
self.set_value(self.args[CONF_ECO_HIGH_THRESHOLD], value)
@property
def eco_low_temperature(self) -> float:
"""Return the lower limit of eco mode."""
return float(self.get_state(self.args[CONF_ECO_LOW_THRESHOLD]))
@eco_low_temperature.setter
def eco_low_temperature(self, value: int) -> None:
"""Set the upper limit of eco mode."""
self.set_value(self.args[CONF_ECO_LOW_THRESHOLD], value)
@property
def fan_mode(self) -> str:
"""Return the current fan mode."""
return self.get_state(self.args[CONF_THERMOSTAT], attribute="fan_mode")
@property
def indoor_humidity(self) -> float:
"""Return the average indoor humidity."""
return float(self.get_state(self.args[CONF_HUMIDITY_SENSOR]))
@property
def indoor_temperature(self) -> float:
"""Return the average indoor temperature."""
return float(self.get_state(self.args[CONF_INDOOR_TEMPERATURE_SENSOR]))
@property
def hvac_mode(self) -> str:
"""Return the current operating mode."""
return self.get_state(self.args[CONF_THERMOSTAT])
@property
def outdoor_brightness(self) -> float:
"""Return the outdoor brightness in lux."""
return float(self.get_state(self.args[CONF_BRIGHTNESS_SENSOR]))
@property
def outdoor_brightness_percentage(self) -> float:
"""Return the human-perception of brightness percentage."""
return float(self.get_state(self.args[CONF_BRIGHTNESS_PERCENT_SENSOR]))
@property
def outdoor_high_temperature(self) -> float:
"""Return the upper limit of "extreme" outdoor temperatures."""
return float(self.get_state(self.args[CONF_OUTDOOR_HIGH_THRESHOLD]))
@property
def outdoor_low_temperature(self) -> float:
"""Return the lower limit of "extreme" outdoor temperatures."""
return float(self.get_state(self.args[CONF_OUTDOOR_LOW_THRESHOLD]))
@property
def outdoor_temperature(self) -> float:
"""Return the outdoor temperature."""
return float(self.get_state(self.args[CONF_OUTDOOR_TEMPERATURE_SENSOR]))
@property
def outdoor_temperature_extreme(self) -> float:
"""Return whether the outside temperature is at extreme limits."""
return (
self.outdoor_temperature < self.outdoor_low_temperature
or self.outdoor_temperature > self.outdoor_high_temperature
)
@property
def target_temperature(self) -> float:
"""Return the temperature the thermostat is currently set to."""
try:
return float(
self.get_state(self.args[CONF_THERMOSTAT], attribute="temperature")
)
except TypeError:
return 0.0
def _on_away_mode_change(
self, entity: Union[str, dict], attribute: str, old: str, new: str, kwargs: dict
) -> None:
"""React when away mode is toggled."""
if new == "on":
self._set_away()
else:
self._set_home()
def _on_eco_temp_change(
self, entity: Union[str, dict], attribute: str, old: str, new: str, kwargs: dict
) -> None:
"""React when the temperature goes above or below its eco thresholds."""
current_temperature = float(new)
if (
current_temperature > self.eco_high_temperature
and self.hvac_mode != HVAC_MODE_COOL
):
self.log('Eco Mode: setting to "Cool" (%s°)', self.eco_high_temperature)
self.set_mode_cool()
self.set_temperature(self.eco_high_temperature)
elif (
current_temperature < self.eco_low_temperature
and self.hvac_mode != HVAC_MODE_HEAT
):
self.log('Eco Mode: setting to "Heat" (%s°)', self.eco_low_temperature)
self.set_mode_heat()
self.set_temperature(self.eco_low_temperature)
elif (
self.eco_low_temperature <= current_temperature <= self.eco_high_temperature
and self.hvac_mode != HVAC_MODE_OFF
):
self.log('Within eco mode limits; turning thermostat to "Off"')
self.set_mode_off()
def _restore_previous_state(self) -> None:
"""Restore the thermostat to its previous state."""
self._set_hvac_mode(self.get_state(self.args[CONF_LAST_HVAC_MODE]))
def _set_away(self) -> None:
"""Put the thermostat in "Away" mode."""
self.log('Setting thermostat to "Away" mode')
self.set_mode_off()
self.data[HANDLE_ECO_MODE] = self.listen_state(
self._on_eco_temp_change, self.args[CONF_INDOOR_TEMPERATURE_SENSOR]
)
def _set_fan_mode(self, fan_mode: str) -> None:
"""Set the themostat's fan mode."""
if fan_mode == self.fan_mode:
return
self.log('Setting fan mode to "%s"', fan_mode.title())
self.call_service(
"climate/set_fan_mode",
entity_id=self.args[CONF_THERMOSTAT],
fan_mode=fan_mode,
)
def _set_home(self) -> None:
"""Put the thermostat in "Home" mode."""
self.log('Setting thermostat to "Home" mode')
handle = self.data.pop(HANDLE_ECO_MODE)
self.cancel_listen_state(handle)
# If the thermostat isn't doing anything, set it to the previous settings
# (before away mode); otherwise, let it keep doing its thing:
if self.hvac_mode == HVAC_MODE_OFF:
self._restore_previous_state()
def _set_hvac_mode(self, hvac_mode: str) -> None:
"""Set the themostat's operation mode."""
if hvac_mode == self.hvac_mode:
return
# Set the previous HVAC mode in case we want to return to it:
if self.hvac_mode != HVAC_MODE_OFF:
self.select_option(self.args[CONF_LAST_HVAC_MODE], self.hvac_mode)
self.log('Setting operation mode to "%s"', hvac_mode.title())
self.call_service(
"climate/set_hvac_mode",
entity_id=self.args[CONF_THERMOSTAT],
hvac_mode=hvac_mode,
)
def bump_temperature(self, value: int) -> None:
"""Bump the current temperature."""
if HVAC_MODE_COOL in (self.hvac_mode, self._last_hvac_mode):
value *= -1
self.set_temperature(self.target_temperature + value)
def set_away(self) -> None:
"""Set the thermostat to away."""
self.turn_on(self.args[CONF_AWAY_MODE])
def set_fan_auto_low(self) -> None:
"""Set the fan mode to auto_low."""
self._set_fan_mode(FAN_MODE_AUTO_LOW)
def set_fan_circulate(self) -> None:
"""Set the fan mode to circulate."""
self._set_fan_mode(FAN_MODE_CIRCULATE)
def set_fan_on_low(self) -> None:
"""Set the fan mode to on_low."""
self._set_fan_mode(FAN_MODE_ON_LOW)
def set_home(self) -> None:
"""Set the thermostat to home."""
self.turn_off(self.args[CONF_AWAY_MODE])
def set_mode_auto(self) -> None:
"""Set the operation mode to auto."""
self._set_hvac_mode(HVAC_MODE_AUTO)
def set_mode_cool(self) -> None:
"""Set the operation mode to cool."""
self._set_hvac_mode(HVAC_MODE_COOL)
def set_mode_heat(self) -> None:
"""Set the operation mode to heat."""
self._set_hvac_mode(HVAC_MODE_HEAT)
def set_mode_off(self) -> None:
"""Set the operation mode to off."""
self._set_hvac_mode(HVAC_MODE_OFF)
def set_temperature(self, temperature: float) -> None:
"""Set the thermostat temperature."""
if temperature == self.target_temperature:
return
self.call_service(
"climate/set_temperature",
entity_id=self.args[CONF_THERMOSTAT],
temperature=str(int(temperature)),
)
def toggle(self) -> None:
"""Toggle the thermostat between off and its previous HVAC state/temp."""
if self.hvac_mode == HVAC_MODE_OFF:
self._restore_previous_state()
else:
self.set_mode_off()
|
[
"bachya1208@gmail.com"
] |
bachya1208@gmail.com
|
1ba65457de73f9c92a3d3951e5639c8b6378f58c
|
c0ed04491bcc9894c33f8ca796b1043d8c29d8e5
|
/pdf_sanitizer/pdf_sanitizer.py
|
a73a2dac95a0968bc2e72dbdf84a75ab586cdda5
|
[
"MIT"
] |
permissive
|
lucasmrdt/pdf-sanitizer
|
64a6dbb819645b93740003286f44877b3d20ac38
|
c09c2232c72ec27660b75727307e992733923248
|
refs/heads/main
| 2023-01-25T02:01:44.679434
| 2020-11-24T10:39:09
| 2020-11-24T10:39:09
| 314,557,875
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,663
|
py
|
import difflib
import pathlib
import argparse
from .utils import fail_with_message, progress_with_message, success_with_message
try:
import PyPDF2
except ImportError:
fail_with_message(
'Please install required dependencies before using this package.\n\t> pip3 install -r requirements.txt --user')
def parse_file(path: str):
if not pathlib.Path(path).exists():
raise argparse.ArgumentTypeError('invalid file path')
return path
def parse_ratio(x):
try:
x = float(x)
except ValueError:
raise argparse.ArgumentTypeError(
"%r not a floating-point literal" % (x,))
if x < 0.0 or x > 1.0:
raise argparse.ArgumentTypeError("%r not in range [0.0, 1.0]" % (x,))
return x
def diff(content1: str, content2: str):
return difflib.SequenceMatcher(None, content1, content2)
def has_deleted_item(diff):
for operation, *_ in diff.get_opcodes():
if operation == 'delete' or operation == 'replace':
return True
return False
def get_title(content):
return content.split('\n')[0]
def get_content(content):
return content.replace(get_title(content), '').strip()
def has_content(content):
return len(get_content(content)) != 0
def sanitize(pad_input: PyPDF2.PdfFileReader, pdf_output: PyPDF2.PdfFileWriter, title_ratio: float, content_ratio: float):
prev_page = pad_input.getPage(0)
nb_pages = pad_input.getNumPages()
for i in range(1, nb_pages):
progress_with_message('Sanitizing pdf ...', i / nb_pages)
current_page = pad_input.getPage(i)
current_content = current_page.extractText()
prev_content = prev_page.extractText()
diff_title = diff(get_title(prev_content), get_title(current_content))
diff_content = diff(get_content(prev_content),
get_content(current_content))
title_has_changed = diff_title.ratio() < title_ratio
content_has_changed = (diff_content.ratio() < content_ratio
and (has_deleted_item(diff_content) or len(prev_content) > len(current_content)))
if has_content(prev_content) and (title_has_changed or content_has_changed):
pdf_output.addPage(prev_page)
prev_page = current_page
pdf_output.addPage(prev_page)
parser = argparse.ArgumentParser(
description="Quickly remove useless page from a huge pdf to get a readable pdf")
parser.add_argument('input_file', type=parse_file,
help='pdf file to be sanitized')
parser.add_argument('output_file', type=str,
help='output sanitized pdf file name')
parser.add_argument('--title-ratio', type=parse_ratio,
help='float between [0, 1] which is responsible of detecting similar pages from title. The higher the ratio, the more sensitive the sanitizer will be to any changes. (default: 0.5)', default=.5, dest='title_ratio')
parser.add_argument('--content-ratio', type=parse_ratio,
help='float between [0, 1] which is responsible of detecting similar pages from content. The higher the ratio, the more sensitive the sanitizer will be to any changes. (default: 0.8)',
default=.8, dest='content_ratio')
def main():
args = parser.parse_args()
pdf_input = PyPDF2.PdfFileReader(args.input_file)
pdf_output = PyPDF2.PdfFileWriter()
sanitize(pdf_input, pdf_output, args.title_ratio, args.content_ratio)
with open(args.output_file, 'wb') as f:
pdf_output.write(f)
success_with_message(f'Your file has been sanitized at {args.output_file}')
if __name__ == '__main__':
main()
|
[
"lucas.mrdt@gmail.com"
] |
lucas.mrdt@gmail.com
|
8464880c154574ab8a1935128eab8e73e36e945b
|
4b8ebedccb97cb7d7d23d0f798a0ba290695e8aa
|
/code/37_inspect_currentframe.py
|
35946c5e3446c372ade38d60b795cd378bad729d
|
[] |
no_license
|
danilobellini/wta2017
|
19fce7c20865eae8615b3d6a02fc94cbfb95e5c3
|
5963dd48b46813988def356aa13aca3b6959d050
|
refs/heads/master
| 2021-01-11T14:53:19.983517
| 2017-01-27T20:49:16
| 2017-01-27T20:49:16
| 80,243,964
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 193
|
py
|
from inspect import currentframe
def caller():
data = [1, 2, 3]
change_data()
return data
def change_data():
currentframe().f_back.f_locals["data"].append(5)
print(caller())
|
[
"danilo.bellini@gmail.com"
] |
danilo.bellini@gmail.com
|
522ff5caf9d61b505a992daf3e536468393e4998
|
afa59644777f5cd8ae59d0b4b29f91c45d1264a9
|
/Arya/action_list.py
|
bf4801cc54e4d6d9e507bd3da9e72df6eb072382
|
[
"Apache-2.0"
] |
permissive
|
yezimai/oldboyProject
|
aa76beca46be29e164f65b45dda35924d8fa5bbb
|
889eebc2e6158b07ac0964b25eb01df743ad0117
|
refs/heads/master
| 2021-05-13T17:52:45.623762
| 2018-01-09T16:03:56
| 2018-01-09T16:03:57
| 116,824,907
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 110
|
py
|
# -*- coding:utf-8 -*-
from plugins import cmd,state
actions = {
'cmd':cmd.CMD,
'state':state.State,
}
|
[
"41815224@qq.com"
] |
41815224@qq.com
|
c2fecdba7d84b5999661f7fc80b389f418cc05d8
|
c6f357f7769d2d873d36946c8d35fd520d2ce07e
|
/Apriori/del.py
|
a7ddc17b3d16a56adc912fde9c9bbf31fc0f06e6
|
[] |
no_license
|
mdmuneerhasan/python
|
9cc4c50af6668da849919b09cd31193904e3bf96
|
3dc267499aad23cfd6b966748f87b9f8064e14d4
|
refs/heads/master
| 2023-01-28T07:11:16.785162
| 2020-12-10T13:11:40
| 2020-12-10T13:11:40
| 306,046,414
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,156
|
py
|
data = [
['T100', ['I1', 'I2', 'I5']],
['T200', ['I2', 'I4']],
['T300', ['I2', 'I3']],
['T400', ['I1', 'I2', 'I4']],
['T500', ['I1', 'I3']],
['T600', ['I2', 'I3']],
['T700', ['I1', 'I3']],
['T800', ['I1', 'I2', 'I3', 'I5']],
['T900', ['I1', 'I2', 'I3']]
]
table = {}
fCount={}
ms=2
cf=.5
def count(param, param1):
if len(param) == 0:
if frozenset(param1) in fCount.keys():
fCount[frozenset(param1)]+=1
else:
fCount[frozenset(param1)]=1
return
ele = param[0]
param.remove(ele)
count(list(param),list(param1))
param1.append(ele)
count(list(param),list(param1))
pass
for i in data:
count(list(i[1]),list())
for j in i[1]:
k=frozenset({j})
if k in table.keys():
table[k] += 1
else:
table[k] = 1
def printMy(table):
for i in table.keys():
print(set(i),end=" : ")
print(table[i])
def resolve(table):
newTable={}
for x in table:
for y in table:
k=frozenset(set(x).union(set(y)))
if k in fCount.keys() and y != x and fCount[k]>=ms:
newTable[k]=fCount[k]
print()
printMy(newTable)
if len(newTable) >0:
return resolve(newTable)
else:
return table
def allCombination(param):
ans = []
for x in range(1, 2 ** len(param) - 1):
left = set()
right = set()
for i in range(len(param)):
if (x >> i) & 1:
left.add(param[i])
else:
right.add(param[i])
ans.append([left, right])
return ans
def findRules(table):
for x in table:
combination = allCombination(list(x))
for rules in combination:
conf = fCount[frozenset(rules[0].union(rules[1]))] / fCount[frozenset(rules[0])]
if (conf >= cf):
print(list(rules[0]), end=" => ")
print(list(rules[1]), end=" : ")
print(conf*100 , end=" % \n")
print()
pass
printMy(table)
table=resolve(table)
print("Association rules")
findRules(table)
|
[
"md.muneerhasan@gmail.com"
] |
md.muneerhasan@gmail.com
|
30345da423017d960f32d58366f5333f389f08a6
|
9beecfd2062691fb12f45fe59144b9b47c365d12
|
/temp.py
|
49b1673afa66fcadd9a745d7443b1a4600d65ae0
|
[] |
no_license
|
wufanwillan/leet_code
|
6440ea110cf77828005c17c16f5c79787f255f92
|
e8c07c0ad68870c89a2598c85d3d4211ee177d73
|
refs/heads/master
| 2021-09-09T13:47:10.284460
| 2018-03-16T17:30:59
| 2018-03-16T17:30:59
| 103,131,224
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 636
|
py
|
class Solution:
def isToeplitzMatrix(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: bool
"""
if not matrix:
return False
l=len(matrix)
for i in range(l):
matrix[i]=matrix[i:]+matrix[:i]
import numpy as np
npmatrix=np.array(matrix)
r,c=npmatrix.shape
npcl=npmatrix-npmatrix[0]
if r>c:
front=npc1[:,:r-c]
back=npcl[:,r-c:]
else:
front=npcl[:c-r,:]
back=npcl[c-r:,:]
frontflag=front.any()
for line in back:
|
[
"noreply@github.com"
] |
wufanwillan.noreply@github.com
|
f62dcd373e600ccfb3f63f242651f99a0a4806a6
|
16f0846af81fb6030423f1812d482447b0c8b9ac
|
/smart/forms.py
|
53c694c0a3b93f412fdbf8f5775ef0e204758110
|
[] |
no_license
|
emma564/smarthealth
|
5c53c5356723fc32085f9331d4e8546b4eac22f5
|
9800139b6c5b3122575cde434db692f7df9328c8
|
refs/heads/master
| 2021-04-15T05:48:13.233650
| 2018-03-23T14:23:57
| 2018-03-23T14:23:57
| 126,494,890
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 96
|
py
|
from django import forms
class GuessDiseaseForm(forms.Form):
disease1 = forms.CharField()
|
[
"35333972+emma564@users.noreply.github.com"
] |
35333972+emma564@users.noreply.github.com
|
a0eb5d3d1707c88fdb8d35ae045786088e86ebcb
|
e4f08f52f36dc903858586caea3071710673f018
|
/projet python/EXO1.py
|
de8805a0ef7bfb09382b4f28743fbe26a9d1112c
|
[] |
no_license
|
AEJ-FORMATION-DATA-IA/EXO_python_Konan_Yassoi
|
60d93afc2fa3e2b4d137de79d2c83ec50c4323d5
|
e2cefea06c9da7b3f688ab2e8e5f67191dcd38f8
|
refs/heads/main
| 2023-08-16T03:48:26.284698
| 2021-10-10T22:34:30
| 2021-10-10T22:34:30
| 415,713,956
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,390
|
py
|
A=15
B=4
C=A+B
D=A*B
E=A**B
F=A/B
G=A//B
H=A%B
#******* LES DICTIONNAIRES *******
dico ={
"A":15,
"B":4,
"C":19,
"D":60,
"E":50625,
"F":3.75,
"G":3,
"H":3
}
print(dico)
dico["I"]=154
print(dico)
del dico["I"]
print(dico)
for i in dico.items():
print(i)
# ******** les tuples ***********
tpl=(15,4,60)
print(type(tpl))
#pour ajouter faudrait je convertisse mon tuple en list
l=list(tpl)
print(l)
l.append(3.75)# ajout de list
#ensuite ma list en tuple
tpl=tuple(l)
print(tpl)
# pour modifier en va convertir en list ensuite en tuple
l=list(tpl)
l[0]=16
print(l)
# convertir la valeur modidifer dans la list en tuple
tpl=tuple(l)
print(tpl)
# Suprimer la valeur de B dans le Tuple
l=list(tpl)
del l[1]
print(l)
tpl=tuple(l)
print(tpl)
#****** LES LISTES **********
#creation d'une liste1 contenant les lettres de a,b,d
liste1=["A", "B","C","D"]
#creons une seconde liste2 contenant les valeurs de A,B,C,D
liste2=[15,4,19,60]
#creons une troisièeme contenant les listes 1 et 2
liste3=[liste1,liste2]
print(liste3)
# Ajout E et F la liste 1
liste1.append("E")
liste1.append("F")
print(liste1)
#supprimer B dans la liste1
del liste1[1]
print(liste1)
#Remplacer la lette de A par G
liste1[0]="G"
print(liste1)
|
[
"noreply@github.com"
] |
AEJ-FORMATION-DATA-IA.noreply@github.com
|
26d531843326aa9914e7f70c9c24e9a2c300a3c7
|
0604be693be468ffa3bd9c51a6720dbd052f9eac
|
/my_env/bin/django-admin
|
44497584193af444875932eeeb293482ff81c7d0
|
[] |
no_license
|
anoopnagotra/frames_from_video
|
581951b81edcad149914293518b0d3f95574a908
|
8872cb7b9f4d201c014adefae74407a49157ecf8
|
refs/heads/master
| 2022-12-09T11:27:24.635026
| 2020-09-07T15:02:35
| 2020-09-07T15:02:35
| 274,234,115
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 296
|
#!/home/meetu/p_p/file_management/my_env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
|
[
"dwelinfo@gmail.com"
] |
dwelinfo@gmail.com
|
|
15ff81a21b836d6ba044187ed2c69b94d5c260cb
|
ab4a19a55b7e951a96faaa68a842cb58a123f31b
|
/Codeforces/381a_sereja_and_dima.py
|
2aa6439f1ab6383b026bcdbc641bfb89ea4989d6
|
[] |
no_license
|
vsseixaso/competitive-programming
|
765c299c2f1a3e46b2b4d2f4e041e2b8413f7476
|
8ce81ad69d145c6cb39e49f669af690e2d1c1356
|
refs/heads/master
| 2020-03-14T20:44:50.102327
| 2018-05-02T01:27:43
| 2018-05-02T01:27:43
| 131,781,523
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 568
|
py
|
n = int(raw_input())
l = map(int, raw_input().split())
sereja = 0
dima = 0
flag = True # True = sereja | False = dima
while len(l) > 1:
if flag:
if l[0] > l[-1]:
sereja += l[0]
l.pop(0)
else:
sereja += l[-1]
l.pop(-1)
else:
if l[0] > l[-1]:
dima += l[0]
l.pop(0)
else:
dima += l[-1]
l.pop(-1)
if flag:
flag = False
else:
flag = True
if flag:
sereja += l[0]
else:
dima += l[0]
print sereja, dima
|
[
"seeixas98@gmail.com"
] |
seeixas98@gmail.com
|
ceb66e687310bdba66c6402c4b4f6bdca72fdaae
|
87f9901774bcf12ee6735d4f4053635037e75a0c
|
/qa/rpc-tests/dao-consultation-consensus-cycle-length.py
|
892576c5c84e04adf8c0288e772c45b2dd513d96
|
[
"MIT"
] |
permissive
|
0x2830/navcoin-core
|
92f9b0185cff37acca3068524ef2e11e17b6a620
|
6d4c580efa1e73791a18d8d2d8e9c9e90fd8e780
|
refs/heads/master
| 2022-01-29T06:15:22.672048
| 2020-05-18T20:41:05
| 2020-05-18T20:41:05
| 240,787,904
| 0
| 2
|
MIT
| 2020-07-03T15:41:25
| 2020-02-15T21:07:09
|
C++
|
UTF-8
|
Python
| false
| false
| 4,310
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2019 The Navcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import NavCoinTestFramework
from test_framework.cfund_util import *
import time
class ConsensusConsultationsTest(NavCoinTestFramework):
"""Tests the consultations of the DAO"""
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
def setup_network(self, split=False):
self.nodes = []
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, [['-debug=dao']])
def run_test(self):
self.nodes[0].staking(False)
activate_softfork(self.nodes[0], "dao_consensus")
proposal = self.nodes[0].proposeconsensuschange(0, 7)['hash']
slow_gen(self.nodes[0] , 1)
assert ( self.nodes[0].listconsultations()[0]['hash'] == proposal)
first_answer = self.nodes[0].getconsultation(proposal)['answers'][0]['hash']
second_answer = self.nodes[0].proposeanswer(proposal, "5")["hash"]
third_answer_not_supported = self.nodes[0].proposeanswer(proposal, "6")["hash"]
slow_gen(self.nodes[0] , 1)
end_cycle(self.nodes[0])
for consultation in self.nodes[0].listconsultations():
answers = []
for answer in consultation["answers"]:
if answer:
answers.append(answer["hash"])
assert(second_answer in answers and third_answer_not_supported in answers)
self.nodes[0].support(first_answer)
self.nodes[0].support(second_answer)
slow_gen(self.nodes[0] , 1)
#cycle 1
assert_equal(self.nodes[0].getconsultation(proposal)["status"], "found support, waiting for end of voting period")
assert_equal(self.nodes[0].getconsultation(proposal)["votingCyclesFromCreation"], 1)
end_cycle(self.nodes[0])
slow_gen(self.nodes[0] , 1)
#cycle 2
assert_equal(self.nodes[0].getconsultation(proposal)["status"], "found support")
assert_equal(self.nodes[0].getconsultation(proposal)["votingCyclesFromCreation"], 2)
end_cycle(self.nodes[0])
slow_gen(self.nodes[0] , 1)
#cycle 3
assert_equal(self.nodes[0].getconsultation(proposal)["status"], "reflection phase")
assert_equal(self.nodes[0].getconsultation(proposal)["votingCyclesFromCreation"], 3)
phash=self.nodes[0].createproposal(self.nodes[0].getnewaddress(), 1, 10000, "test")["hash"]
createdin=self.nodes[0].getblockcount()
end_cycle(self.nodes[0])
slow_gen(self.nodes[0] , 1)
#cycle 4
assert_equal(self.nodes[0].getconsultation(proposal)["status"], "voting started")
assert_equal(self.nodes[0].getconsultation(proposal)["votingCyclesFromCreation"], 4)
try:
self.nodes[0].consultationvote(proposal,"yes")
raise AssertionError('Consultations cannot be directly voted')
except JSONRPCException as e:
assert(e.error['code']==-5)
try:
self.nodes[0].consultationvote(third_answer_not_supported,"yes")
raise AssertionError('Not supported answers can not be voted')
except JSONRPCException as e:
assert(e.error['code']==-5)
assert_equal(self.nodes[0].getproposal(phash)["votingCycle"], 1)
self.nodes[0].consultationvote(second_answer, "yes")
blocks=end_cycle(self.nodes[0])
self.nodes[0].generate(1)
#cycle 4
assert(self.nodes[0].getconsultation(proposal)["status"] == "passed")
assert_equal(self.nodes[0].getconsensusparameters()[0], 5)
assert_equal(self.nodes[0].getproposal(phash)["votingCycle"], 4)
self.nodes[0].invalidateblock(blocks[-1])
assert_equal(self.nodes[0].getconsensusparameters()[0], 10)
assert_equal(self.nodes[0].getproposal(phash)["votingCycle"], 1)
self.nodes[0].generate(2)
assert_equal(self.nodes[0].getconsensusparameters()[0], 5)
assert_equal(self.nodes[0].getproposal(phash)["votingCycle"], 4)
if __name__ == '__main__':
ConsensusConsultationsTest().main()
|
[
"noreply@github.com"
] |
0x2830.noreply@github.com
|
d9f4b747560741a4072aa3c57d2f03cfdf8e67fb
|
f1917c5662231c1dd51d34c739f1c9065b41f424
|
/c3_project_healthcheck.py
|
c880fba9f53c8b8fbaf5ff42bbde5fb86a82248e
|
[] |
no_license
|
jcardenaslie/c3_healthcheck
|
348a9390a702b2dfe97d2edbeeca20fa6bc2410a
|
3befc7f12f26cc90887ef80daf468d38f8bda30e
|
refs/heads/master
| 2020-09-27T02:17:28.023360
| 2019-12-06T20:09:46
| 2019-12-06T20:09:46
| 226,401,571
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,902
|
py
|
import os, json
import glob
import pandas as pd
pobjects = list()
# Get existing items in project
with open('project.c3proj') as f:
d = json.load(f)
objects = d['objectTypes']
if 'items' in objects:
o = objects['items']
pobjects.extend(o)
if 'subfolders' in objects:
for i in objects['subfolders']:
o = i['items']
pobjects.extend(o)
# print(len(pobjects), pobjects)
dobjects = dict()
for o in pobjects:
dobjects[o] = {}
dobjects[o]['eventSheets'] = []
# print (dobjects)
def GroupTraverse(dict):
pass
def BlockTraverse(dict):
pass
os.chdir("eventSheets")
for file in glob.glob("*.json"):
if 'uistate' not in file:
with open(file) as f:
d = json.load(f)
sheetName = d['name']
events = d['events'][-1]['children']
for e in events:
if e['eventType'] == 'block': # Block Traverse
conditions = e['conditions']
actions = e['actions']
def IsInEventSheet(key, sName):
if key in pobjects:
z = dobjects[key]['eventSheets']
if sheetName not in z:
z.append(sheetName)
print('Conditions:')
for c in conditions :
IsInEventSheet(c['objectClass'], sheetName)
print('Actions:')
for a in actions :
IsInEventSheet(c['objectClass'], sheetName)
elif e['eventType'] == 'group': # Group Traverse
pass
# break # revisa solo el primer archivo
for o, i in dobjects.items():
print(o, i)
|
[
"noreply@github.com"
] |
jcardenaslie.noreply@github.com
|
c329db22ce0ca92600792af0a7e6e9fa278434aa
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/file_20200811093334.py
|
928cfa231a0fcf7070cf6a70554e18fc7494d258
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 744
|
py
|
# a,b --> 2*3 == 2 + 2 + 2 = 6
# use for loop or a while loop
# 217856 = 2*1*7*8*5*6
import sys
def product(a,b):
# print('number',sys.maxsize)
if a == 0 or b == 0:
return 0
total = 0
btimes = abs(b)
while btimes > 0:
total +=abs(a)
btimes -=1
if b < 0 and a > 0:
return -(total)
elif b > 0 and a < 0:
number = str(total)
number = "-" + number
return int(number)
else:
return total
print(product(-2,3))
print("========")
print(product(2,-3))
print("========")
print(product(-2,-3))
print("========")
print(product(0,2))
print("========")
print(product(0,2))
print("========")
print(product(9223372036854775807,10))
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
bc511d1c201616bb528c913c8a82cfed0708d6a7
|
83a426cc5b8c52e95c4a5770119048b973a28521
|
/python/bin/wheel
|
c33835cf8d558016a4326931cf4dd6faa0d455b0
|
[] |
no_license
|
Seanie96/ProjectEuler
|
af6d3432771bcbacb5ce4445fb2097f98aba76f6
|
cf6219573ff95bbe28a51f7098bce338150aafd6
|
refs/heads/master
| 2020-07-05T20:21:49.278297
| 2020-05-11T01:32:16
| 2020-05-11T01:32:16
| 202,762,739
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 242
|
#!/Users/neuro/Documents/ProjectEuler/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"semcdona@tcd.ie"
] |
semcdona@tcd.ie
|
|
b4a328b85dffb3f2069a882adb700865a0968ed7
|
d13ec1afcf80d0805a3c0f9891b078ff762767ea
|
/users/forms.py
|
9f88dcba51dc8f836013c56aaaf28ce4835d0719
|
[
"BSD-3-Clause"
] |
permissive
|
Karina-karina/home
|
16903b849559f93d3a58f442dd50175f00de64c7
|
55ab529be965cf93f3fc39fcdf954be67ff2f885
|
refs/heads/master
| 2022-08-01T07:49:48.775888
| 2020-05-21T01:46:25
| 2020-05-21T01:46:25
| 265,726,999
| 0
| 0
|
BSD-3-Clause
| 2020-05-21T01:46:18
| 2020-05-21T01:39:56
|
Python
|
UTF-8
|
Python
| false
| false
| 319
|
py
|
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth import get_user_model
from django import forms
User = get_user_model()
class CreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = User
fields = ('first_name', 'last_name', 'username', 'email')
|
[
"karinax@bk.ru"
] |
karinax@bk.ru
|
55e43dae5c6af26eee520a141df3b47254a87dc0
|
15b73703241d286a87ca127ff3265f379ad8450e
|
/publisher.py
|
aff1607b593cab099270e0409f8f226f55af48bc
|
[] |
no_license
|
aditi-govindu/MQTT-Paho
|
3f0246c76c3ef42868f32e3706e0c8f085e686fb
|
ed9baf571ea8d8cfa7172ffeee4a6b9d9bedbdb5
|
refs/heads/main
| 2023-08-01T07:31:24.745173
| 2021-09-19T11:47:11
| 2021-09-19T11:47:11
| 408,106,937
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,003
|
py
|
# Import modules
import time
import paho.mqtt.client as paho
import random
broker = 'broker.hivemq.com'
# Function to connect to Mosquitto server, where rc = 0 indicates success
def on_connect(client1, userdata, flags, rc):
print('Publisher connected with result '+ str(rc))
time.sleep(2)
# Create client
client1 = paho.Client('client-001')
print('Connecting to broker:',broker)
# Connect to broker and start publishing data
client1.connect(broker)
client1.on_connect = on_connect
client1.loop_start()
try:
while True:
for i in range(10,50):
# Print temperature as a random number in range 30-100
temp = random.randint(30, 100)
# Publish temp to server
print(f'Publishing {temp}\n')
client1.publish('Test/Temperature', str(temp))
time.sleep(2)
except KeyboardInterrupt:
# End infinite loop using Ctrl+C in command prompt
client1.loop_stop()
client1.disconnect()
|
[
"noreply@github.com"
] |
aditi-govindu.noreply@github.com
|
cf12187c447dc3631d014c75ea2822a55d590620
|
2d25e842c04ed7352bd1f714080b882c77ea7d21
|
/form/hello.py
|
c2746e3011dd11394a6cdeb647daa61302229e4a
|
[
"MIT"
] |
permissive
|
mikalegall/flask
|
67fbbd8083c2b12a15446980a7282d171456c4e5
|
743faaf5ef6ef165612bbbc1b451613e522a5326
|
refs/heads/main
| 2023-05-31T04:19:24.866396
| 2021-07-03T16:35:24
| 2021-07-03T16:35:24
| 370,290,480
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,167
|
py
|
from flask import Flask, render_template, request, redirect
from flask_sqlalchemy import SQLAlchemy
from flask_wtf import FlaskForm
from wtforms.ext.sqlalchemy.orm import model_form
app = Flask(__name__)
app.secret_key = "mikalegall"
db = SQLAlchemy(app)
class Vieraskirja(db.Model):
id = db.Column(db.Integer, primary_key=True)
viesti = db.Column(db.String, nullable=False)
kirjoittaja = db.Column(db.String, nullable=False)
VieraskirjaLomake = model_form(Vieraskirja, base_class=FlaskForm, db_session=db.session)
@app.before_first_request
def alustus():
db.create_all()
@app.route('/', methods=["GET", "POST"])
def index():
lomake = VieraskirjaLomake()
if "viesti" in request.form:
viesti = request.form["viesti"]
kirjoittaja = request.form["kirjoittaja"]
merkinta = Vieraskirja(viesti=viesti, kirjoittaja=kirjoittaja)
print("Merkintä = ", merkinta)
db.session.add(merkinta)
db.session.commit()
return redirect("/")
merkinnat = Vieraskirja.query.all()
return render_template('index.html', lomake=lomake, merkinnat=merkinnat)
if __name__ == "__main__":
app.run()
|
[
"mika.legall@live.fi"
] |
mika.legall@live.fi
|
1bb8bc0a218b5f3577ca94c36224116dca1c7d9b
|
28abdd49560018ee70382a6c49d2ee8e674f184e
|
/domains/school/trapthings.py
|
450c0d9b551f8f11a05daf9796be36f82c67d5b5
|
[] |
no_license
|
davedotluebke/old-skool-text-game
|
27a5b7f88f67812fefeb4c8c2815ab188d987dc7
|
6a900b3d93c661a4d01adc8a18e3a69b7c6686a2
|
refs/heads/master
| 2023-01-14T13:12:21.222911
| 2023-01-08T23:52:23
| 2023-01-08T23:52:23
| 66,109,727
| 7
| 5
| null | 2022-12-27T23:25:51
| 2016-08-19T20:17:48
|
Python
|
UTF-8
|
Python
| false
| false
| 577
|
py
|
from thing import Thing
from room import Room
import gametools
class TrapThing(Thing):
def __init__(self, default_name, path, trap_message, trap_location, ID):
super().__init__(default_name, path, ID)
self.trap_message = trap_message
self.trap_location = trap_location
def take(self, p, cons, oDO, oIDO):
if oDO != self and oIDO != self:
return "I don't understand what you're trying to take!"
cons.write(self.trap_message)
cons.user.move_to(gametools.load_room(self.trap_location))
return True
|
[
"gitbot@luebke.us"
] |
gitbot@luebke.us
|
b845b1a61876fd712c50d5ddd345e78b0817e97f
|
d8e4120d3d474beba2f57ae2841a30bc8b87a5d6
|
/app.py
|
e0bfb1d882f99bfbe52ef1a7b5502a016c4d0665
|
[] |
no_license
|
HugoOrtega1/-Heroku-Absenteeism---Predictor
|
87fb7c2e805fe925d466f5d7f2c7f2aa062ea166
|
819b1f86c358f63ecb800b4245a992046b8c3d70
|
refs/heads/main
| 2023-04-13T01:48:42.584357
| 2021-04-19T17:28:15
| 2021-04-19T17:28:15
| 358,901,916
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,152
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 8 16:35:06 2021
@author: Hugo
"""
from flask import Flask, render_template, request
import pickle
import numpy as np
import pandas as pd
from sklearn.utils import check_array
app = Flask(__name__)
model = pickle.load(open('model', 'rb'))
@app.route('/')
def home():
return render_template('Home.html')
@app.route('/predictor', methods=['POST','GET'])
def predict():
if request.method == 'POST':
rea = request.form['ReasonCategory']
tran = request.form['Transport Expense']
age = request.form['Age']
edu = request.form['Education']
chi = request.form['Children']
inp = np.array([[rea, tran, age, edu, chi]])
prediction = model.predict(inp)
if prediction == 1:
return render_template('afteryes.html')
else:
return render_template('afterno.html')
else:
return render_template('predictor.html')
if __name__ == "__main__":
app.run(debug=True, use_reloader=False)
|
[
"noreply@github.com"
] |
HugoOrtega1.noreply@github.com
|
cbcb9c1c622fa89f49a8a6150586914a54029877
|
128e2652e2f0a4b4be57894bffab461c90170657
|
/tej_python/demo2.py
|
d7f366dc07e7ee111659d69ec9bf0ad697778280
|
[] |
no_license
|
tejadeep/Python_files
|
ec2695ec844e723d0784a67bd13c9a178c0aa601
|
43fde84dd62543e5ed9896177949cca877fdb858
|
refs/heads/master
| 2020-06-11T04:35:55.850108
| 2019-08-08T07:23:23
| 2019-08-08T07:23:23
| 193,851,082
| 0
| 0
| null | 2019-06-26T07:19:34
| 2019-06-26T07:19:34
| null |
UTF-8
|
Python
| false
| false
| 597
|
py
|
from Tkinter import *
import Tkinter
import tkMessageBox
top = Tkinter.Tk()
L1 = Label(top, text="My calculator",).grid(row=0,column=1)
L2 = Label(top, text="Number 1",).grid(row=1,column=0)
L3 = Label(top, text="Number 2",).grid(row=2,column=0)
L4 = Label(top, text="Operator",).grid(row=3,column=0)
L4 = Label(top, text="Answer",).grid(row=4,column=0)
E1 = Entry(top, bd =5)
E1.grid(row=1,column=1)
E2 = Entry(top, bd =5)
E2.grid(row=2,column=1)
E3 = Entry(top, bd =5)
E3.grid(row=3,column=1)
E4 = Entry(top, bd =5)
E4.grid(row=4,column=1)
B=Button(top, text ="Submit",).grid(row=5,column=1,)
|
[
"you@example.com"
] |
you@example.com
|
5a1cc1af7053037f9662afa8d9d6a339cd2117dd
|
c3d60b498fc47ed64b4f8292ce41f061ec61aff9
|
/category/admin.py
|
dd2cffcca72e9f1c8f982ec2c27ad03f9da8c3e6
|
[
"MIT"
] |
permissive
|
VikrantKumar121/kart
|
b92838526fd5c3035be836c163789a54a80c4f04
|
a330349b68c6ecfedcbf3b9ccf5890237009bfbf
|
refs/heads/main
| 2023-08-11T19:42:34.632786
| 2021-09-14T21:02:50
| 2021-09-14T21:02:50
| 398,318,146
| 0
| 0
|
MIT
| 2021-09-07T11:14:58
| 2021-08-20T15:18:26
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 312
|
py
|
from django.contrib import admin
from .models import Category
# Register your models here.
class CategoryAdmin(admin.ModelAdmin):
prepopulated_fields = {
'slug': ('category_name',)
}
list_display = [
'category_name',
'slug'
]
admin.site.register(Category,CategoryAdmin)
|
[
"vikrant.kumar@namasys.co@gmail.com"
] |
vikrant.kumar@namasys.co@gmail.com
|
c829a6eddd44dfeaeec21155f8639212cceb2a8c
|
62d068cf827f909b84de196ad73195ff868568e0
|
/docs/sphinx/source/conf.py
|
ccb762c0ef639b3a0e418f5b0bf8d204e92a0291
|
[] |
no_license
|
Kexin-Zhang-UCAS/alchemistbook
|
e2e71655a4f533c5f63398a4a6e5503050962da0
|
d7882fd2ea21d140139e5b2ed2449df1cf4ddaf5
|
refs/heads/master
| 2023-08-06T00:39:06.495706
| 2021-09-17T14:48:11
| 2021-09-17T14:48:11
| 406,634,882
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,302
|
py
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'alchemistbook'
copyright = '2021, Kexin-Zhang-UCAS'
author = 'Kexin-Zhang-UCAS'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['recommonmark']
source_suffix = {
'.rst': 'restructuredtext',
'.txt': 'restructuredtext',
'.md': 'markdown',
}
source_parsers = {'.md': 'recommonmark.parser.CommonMarkParser'}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'zh_CN'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
|
[
"1033477342@qq.com"
] |
1033477342@qq.com
|
1590637fb6d44e2dfd174e0fc491b295bdb63ba6
|
1a13a22db3cc21d260bf41e26c14ca7339adeed1
|
/bkit/__init__.py
|
db7790daabea628ea7e313f49518ad76733f870e
|
[
"MIT"
] |
permissive
|
rahmanhpu/bkit
|
26b960ee00b95969a5ebb63db01e26a4ae6a0dd9
|
35b0c5738a8a77d3113701d371b9811888f341c9
|
refs/heads/master
| 2023-03-13T12:43:49.699412
| 2021-02-28T19:28:01
| 2021-02-28T19:28:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 46
|
py
|
from . import milestoning
from . import ctmc
|
[
"jpthompson17@gmail.com"
] |
jpthompson17@gmail.com
|
94064235e906eeb34671bd302d9351cdc6579a28
|
94c1aef818f7a44792407f889b960d08a05f9f1d
|
/script/03_plots.py
|
ce7e412e253befdc05b9a1262ac86cc7c4275a39
|
[] |
no_license
|
JenniferJoerissen/python-novice
|
6e25a179db9be03ba093850ae59891fc66ecb736
|
f073475ff2191ecc78c0fc295bd4f9b556e07943
|
refs/heads/master
| 2020-04-02T17:45:41.824831
| 2018-10-25T17:05:30
| 2018-10-25T17:05:30
| 154,670,834
| 0
| 2
| null | 2018-10-25T17:01:14
| 2018-10-25T12:55:25
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,509
|
py
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas #import library needed to read in csv files
# In[4]:
anz = pandas.read_csv("../data/gapminder_gdp_oceania.csv", index_col = "country")
europe = pandas.read_csv("../data/gapminder_gdp_europe.csv", index_col = "country")
# In[5]:
get_ipython().run_line_magic('who', '')
# Import plotting library and assure that plotting occurs in the same chunck
# In[10]:
get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib.pyplot as plt
# Add a plot of transformed data
# In[33]:
anz.T.plot()
# In[29]:
europe.T.plot()
# In[13]:
anz.columns
# In[14]:
europe.columns
# Format column names to only the numbers by stripping off the preceding string
# In[59]:
years = anz.columns.str.strip('gdpPercap_')
anz.columns = years
europe.columns = years
# In[77]:
anz.T.plot()
plt.ylabel("GDP per Capita")
plt.style.use("seaborn")
# In[34]:
plt.style.available
# In[69]:
plt.style.use('default')
europe.plot(kind = 'box')
# In[72]:
gdp_i = europe.loc[('Italy', 'Ireland', 'Iceland'),]
# In[75]:
gdp_i
# In[88]:
plt.plot(years, gdp_i.iloc[0], 'g--', label = 'Italy')
plt.plot(years, gdp_i.iloc[1], 'b--', label = 'Ireland')
plt.plot(years, gdp_i.iloc[2], 'r-', label = 'Iceland')
plt.legend(loc = "best")
plt.xlabel('Years')
plt.ylabel('GDP per Capita')
plt.title('GDP per Capita in European countries starting with I')
plt.savefig('../plots/GDP_i_countries.png')
# In[ ]:
# In[ ]:
|
[
"Jenni-Joerissen@hotmail.com"
] |
Jenni-Joerissen@hotmail.com
|
603c02c9caff8ea51ad51d3a55e4821cad4a9f6b
|
d61056dfc89703368656f403913693e20269b954
|
/inventario.py
|
230ccc5c8ee2c4f733342aa7a1265d7ad7881ec4
|
[] |
no_license
|
antocaroca/inventario
|
592eadc6d42458f1fb1c8d1eecbeb60f380c7d44
|
2f351b8a8eb98c9abd006d3b98ef340e38f73e32
|
refs/heads/master
| 2022-04-23T17:38:10.901036
| 2020-04-22T00:12:16
| 2020-04-22T00:12:16
| 255,771,595
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,129
|
py
|
from tabulate import tabulate
class Producto:
def __init__(self, nombre, cantidad, precio, id_producto, categoria, peso):
self.nombre = nombre
self.cantidad = cantidad
self.precio = precio
self.id_producto = id_producto
self.categoria = categoria
self.peso = peso
def registrar(self):
with open("inventario.txt", "a") as inv:
inv.write("\n" + f"{self.nombre}")
inv.write(";" + f"{self.cantidad}")
inv.write(";" + f"{self.precio}")
inv.write(";" + f"{self.id_producto}")
inv.write(";" + f"{self.categoria}")
inv.write(";" + f"{self.peso}")
inv.close()
class Inventario():
def ingresar_datos():
nombre = input("Ingrese nombre del producto:")
cantidad = input("Ingrese cantidad del producto:")
precio = input("Ingrese precio del producto:")
id_producto = input("Ingrese id numérico del producto:")
categoria = input("Ingrese categoría del producto:")
peso = input("Ingrese peso del producto:")
producto1 = Producto(nombre, cantidad, precio, id_producto, categoria, peso)
producto1.registrar()
print("\n****producto:", producto1.nombre, "se ha registrado correctamente****")
def buscar_nombre_id_precio():
archivo = open("inventario.txt", "r")
nombres = {}
headers=["Nombre", "Cantidad", "Precio", "ID", "Categoría", "Peso"]
nombre_id_precio = input("\n ingrese nombre, id o precio del producto: ")
print()
for linea in archivo:
nombre, cantidad, precio, id_numerico, categoria, peso = linea.strip().split(";")
if nombre not in nombres:
nombres[nombre] = []
nombres[nombre].append((nombre, cantidad, precio, id_numerico, categoria, peso))
if id_numerico not in nombres:
nombres[id_numerico] = []
nombres[id_numerico].append((nombre, cantidad, precio, id_numerico, categoria, peso))
if precio not in nombres:
nombres[precio] = []
nombres[precio].append((nombre, cantidad, precio, id_numerico, categoria, peso))
print(tabulate(nombres[nombre_id_precio], headers=headers))
def buscar_categoria():
archivo = open("inventario.txt", "r")
categorias = {}
headers2=["Categoría", "Cantidad", "Precio", "ID", "Nombre", "Peso"]
categoria_a_buscar = input("\nIngrese la categoria del producto: ")
print()
for linea in archivo:
nombre, cantidad, precio, id_numerico, categoria, peso = linea.strip().split(";")
if categoria not in categorias:
categorias[categoria] = []
categorias[categoria].append((categoria, cantidad, precio, id_numerico, nombre, peso))
print(tabulate(categorias[categoria_a_buscar], headers=headers2))
archivo.close()
def buscar_top_9():
archivo = open("inventario.txt", "r")
cantidades = {}
headers3=["Cantidad", "Categoría", "Precio", "ID", "Nombre", "Peso"]
for linea in archivo:
nombre, cantidad, precio, id_numerico, categoria, peso = linea.strip().split(";")
if cantidad not in cantidades:
cantidades[cantidad] = []
cantidades[cantidad].append((cantidad, categoria, precio, id_numerico, nombre, peso))
lista_de_productos = []
for key, value in cantidades.items():
lista_de_productos.append((list(value)))
lista_prod_final = []
for i in lista_de_productos:
for j in i:
if (int(j[0])) < 10:
lista_prod_final.append(j)
print("***** Lista de productos bajo 10 unidades *****\n")
print(tabulate(lista_prod_final, headers=headers3))
archivo.close()
Inventario.ingresar_datos()
Inventario.buscar_nombre_id_precio()
Inventario.buscar_categoria()
print()
Inventario.buscar_top_9()
|
[
"antonella_caroca@hotmail.com"
] |
antonella_caroca@hotmail.com
|
4553b6d5bf64bbd8f49554b47d959e627a89ee18
|
421e68516f517902ebe29621ad54924094ffd13b
|
/create_setting_keys.py
|
e935b0ac14b33f7c0d72efd65439fb154354d210
|
[] |
no_license
|
andreaskuepfer/cross-lingual-product-matching
|
a53e7dd5df9b495b444a4a931bdf05c76395794a
|
0bd5d92e4f6d41a5224756a5c4dad3d495e7dcf9
|
refs/heads/main
| 2023-04-28T23:16:50.206376
| 2021-05-20T09:05:02
| 2021-05-20T09:05:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 770
|
py
|
import json
from argparse import ArgumentParser
from util import create_config_key
def create_keys_from_configs(input_path: str):
"""
:param input_path: Path to settings.json (must be placed in the same directory as the "datasets"-folder)
:return:
"""
# Read settings file
with open(f'{input_path}') as file:
settings = json.load(file)
for setting_key, setting_data in settings.items():
print(create_config_key(setting_data))
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("-i", "--input", type=str,
help="path to project", metavar="path")
args = parser.parse_args()
input_path = args.input
create_keys_from_configs(input_path)
|
[
"noreply@github.com"
] |
andreaskuepfer.noreply@github.com
|
cad6a5ac1ef1f08fe5dd284357f21c7db199bdf5
|
0fccee4c738449f5e0a8f52ea5acabf51db0e910
|
/genfragments/EightTeV/Qstar/QstarGIToQZ_M_2500Fs05_8TeV_pythia6_cff.py
|
b6d1de12d41f4e2ba6ff9090c8292704ed18df5f
|
[] |
no_license
|
cms-sw/genproductions
|
f308ffaf3586c19b29853db40e6d662e937940ff
|
dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4
|
refs/heads/master
| 2023-08-30T17:26:02.581596
| 2023-08-29T14:53:43
| 2023-08-29T14:53:43
| 11,424,867
| 69
| 987
| null | 2023-09-14T12:41:28
| 2013-07-15T14:18:33
|
Python
|
UTF-8
|
Python
| false
| false
| 2,499
|
py
|
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.PythiaUEZ2starSettings_cfi import *
generator = cms.EDFilter("Pythia6GeneratorFilter",
pythiaPylistVerbosity = cms.untracked.int32(0),
filterEfficiency = cms.untracked.double(1.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
comEnergy = cms.double(8000.0),
maxEventsToPrint = cms.untracked.int32(0),
PythiaParameters = cms.PSet(
pythiaUESettingsBlock,
processParameters = cms.vstring(
'MSEL = 0 ',
'MSUB(147) = 1 !d* ',
'MSUB(148) = 1 !u* ',
'MSUB(167) = 0 !u* ci ',
'MSUB(168) = 0 !d* ci ',
'PMAS(343,1) = 2500 !mass of d*',
'PMAS(344,1) = 2500 !mass of u*',
'RTCM(41) = 2500 !Lambda = mass ',
'RTCM(43) = 1 !f ',
'RTCM(44) = 1 !fp ',
'RTCM(45) = 0.5 !fs ',
'MDME(174,1) = 0 !Z decay into d dbar',
'MDME(175,1) = 0 !Z decay into u ubar',
'MDME(176,1) = 0 !Z decay into s sbar',
'MDME(177,1) = 0 !Z decay into c cbar',
'MDME(178,1) = 0 !Z decay into b bbar',
'MDME(179,1) = 0 !Z decay into t tbar',
'MDME(182,1) = 1 !Z decay into e- e+',
'MDME(183,1) = 0 !Z decay into nu_e nu_ebar',
'MDME(184,1) = 1 !Z decay into mu- mu+',
'MDME(185,1) = 0 !Z decay into nu_mu nu_mubar',
'MDME(186,1) = 0 !Z decay into tau- tau+',
'MDME(187,1) = 0 !Z decay into nu_tau nu_taubar',
'4000001:ALLOFF !Turn off all u* decays',
'4000001:ONIFMATCH 1 23 !Turn on u*->u Z',
'4000002:ALLOFF !Turn off all d* decays',
'4000002:ONIFMATCH 2 23 !Turn on d*->d Z'
),
parameterSets = cms.vstring('pythiaUESettings',
'processParameters')
)
)
#mumugenfilter = cms.EDFilter("MCParticlePairFilter",
# Status = cms.untracked.vint32(1, 1),
# MinPt = cms.untracked.vdouble(2.5, 2.5),
# MaxEta = cms.untracked.vdouble(2.5, 2.5),
# MinEta = cms.untracked.vdouble(-2.5, -2.5),
# ParticleCharge = cms.untracked.int32(-1),
# ParticleID1 = cms.untracked.vint32(13),
# ParticleID2 = cms.untracked.vint32(13)
#)
#ProductionFilterSequence = cms.Sequence(generator*mumugenfilter)
ProductionFilterSequence = cms.Sequence(generator)
|
[
"sha1-481a5a2ac9973b7cab990140e7a395466f50a31e@cern.ch"
] |
sha1-481a5a2ac9973b7cab990140e7a395466f50a31e@cern.ch
|
064a15eca31adf22e6991b9aac56c471247760b2
|
a84f5b92c5c9b7d280362fab75f7ebc7c06f70d3
|
/myTorch/nn/Padding.py
|
1388d2cef9ec8bbe6fdbd716531bdb15d5781e4a
|
[] |
no_license
|
cubayang/DeepLearningForWallShearStressPredictionAndImageSegmentation
|
f3cb416d3ebc1505f64b87364a73c838f797d808
|
dbb5e6a58b0ecfdb4ed3b05e5ca1841a321bd11b
|
refs/heads/master
| 2023-06-30T04:55:18.208681
| 2021-02-26T06:29:47
| 2021-02-26T06:29:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,992
|
py
|
try:
from myTorch.nn import Layer
except ImportError:
from .base import Layer
import torch as _torch
class _periodic_pad_controller:
""" controls the way in which padding is performed
[extended_summary]
Returns:
[type]: [description]
"""
def __init__(self, padding_tuple, switches):
self.padding_tuple = padding_tuple
self.switches = switches
def __call__(self, x):
if sum(self.switches) > 1:
if sum(self.switches[0:2]) == 2:
x = self.periodic_pad(x, self.padding_tuple[0], direction='c')
if sum(self.switches[2:]) == 2:
x = self.periodic_pad(x, self.padding_tuple[-1], direction='r')
else:
all_directions = ['left', 'right', 'top', 'bottom']
for direction, p, switch in zip(all_directions, self.padding_tuple, self.switches):
if switch:
x = getattr(
self, f'pad_{direction}'
)(x, p)
return x
def periodic_pad(self, x, p, direction='r'):
if p == 0:
return x
if direction == 'r':
dim = 2
x = _torch.cat(
[x[:, :, -p:, :], x, x[:, :, :p, :]], dim=dim
)
if direction == 'c':
dim = 3
x = _torch.cat(
[x[:, :, :, -p:], x, x[:, :, :, :p]], dim=dim
)
return x
def pad_left(self, x, p):
return _torch.cat([x[:, :, -p:, :], x], dim=2)
def pad_right(self, x, p):
return _torch.cat([x, x[:, :, :p, :]], dim=2)
def pad_top(self, x, p):
return _torch.cat([x[:, :, :, -p:], x], dim=3)
def pad_bottom(self, x, p):
return _torch.cat([x[:, :, :, p:], x], dim=3)
def calculate_padding_1D(
in_size,
out_size,
kernel_size,
stride,
dilation
):
i, o, k = in_size, out_size, kernel_size
s, d = stride, dilation
a = o - 1
b = k - 1
p = (s*a-i+d*b+1)//2
return p
class _DirectionMethod:
def __init__(self, padding_tuple):
self.padding_tuple = padding_tuple
def __call__(self, myClass, *args):
obj = myClass(*args)
obj._padding_switches = self.padding_tuple
return obj
class _SamePadding(Layer):
_padding_switches = (1, 1, 1, 1)
_padding_method = None
_paddingargs = ()
def __init__(
self,
kernel_size,
stride,
dilation
):
super(_SamePadding, self).__init__()
self.padding_tuple = (kernel_size, stride, dilation)
self.constructed = False
def forward(self, x):
self._construct_padder(x)
self.constructed = True
self.out_channels = x.shape[1]
self.in_channels = x.shape[1]
return self.padder(x)
def _construct_padder(self, x):
# if not self.constructed:
row_pad = calculate_padding_1D(
x.shape[2], x.shape[2], *self.padding_tuple)
col_pad = calculate_padding_1D(
x.shape[3], x.shape[3], *self.padding_tuple)
lrtb_padding = [row_pad, row_pad, col_pad, col_pad]
padding_sizes = [
lrtb_padding[0]*self._padding_switches[0],
lrtb_padding[1]*self._padding_switches[1],
lrtb_padding[2]*self._padding_switches[2],
lrtb_padding[3]*self._padding_switches[3]
]
if isinstance(self._padding_method, str):
self.padder = getattr(
_torch.nn, self._padding_method
)(padding_sizes)
else:
self.padder = self._padding_method(
padding_sizes, *self._paddingargs
)
_SamePadding.left = classmethod(_DirectionMethod((1, 0, 0, 0)))
_SamePadding.right = classmethod(_DirectionMethod((0, 1, 0, 0)))
_SamePadding.top = classmethod(_DirectionMethod((0, 0, 1, 0)))
_SamePadding.bottom = classmethod(_DirectionMethod((0, 0, 0, 1)))
_SamePadding.left_right = classmethod(_DirectionMethod((1, 1, 0, 0)))
_SamePadding.top_bottom = classmethod(_DirectionMethod((0, 0, 1, 1)))
class ZeroPad2d(_SamePadding):
_padding_method = 'ZeroPad2d'
class ConstantPad2d(_SamePadding):
_padding_method = 'ConstantPad2d'
_paddingargs = (1)
class ReplicationPad2d(_SamePadding):
_padding_method = 'ReplicationPad2d'
class PeriodicPad2d(_SamePadding):
# note does not support padding in only a single direction
def _padding_method(self, padding_sizes):
return _periodic_pad_controller(
padding_sizes, self._padding_switches
)
class PeriodicReplication2d(_SamePadding):
def _padding_method(self, padding_sizes):
row_padder = PeriodicPad2d.top_bottom(*self.padding_tuple)
column_padder = ReplicationPad2d.left_right(*self.padding_tuple)
def output_method(x):
x = row_padder(x)
x = column_padder(x)
return x
return output_method
|
[
"38807452+hj40@users.noreply.github.com"
] |
38807452+hj40@users.noreply.github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.