hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5e980755aede8846395099a7960ebfb4ac369707
| 12,677
|
py
|
Python
|
mycity/mycity/intents/trash_intent.py
|
EndyPremier/voiceapp311
|
168cf1665bd9a294823f82373e65c6c1d22b8831
|
[
"MIT"
] | null | null | null |
mycity/mycity/intents/trash_intent.py
|
EndyPremier/voiceapp311
|
168cf1665bd9a294823f82373e65c6c1d22b8831
|
[
"MIT"
] | null | null | null |
mycity/mycity/intents/trash_intent.py
|
EndyPremier/voiceapp311
|
168cf1665bd9a294823f82373e65c6c1d22b8831
|
[
"MIT"
] | null | null | null |
"""
Functions for Alexa responses related to trash day
"""
from mycity.intents.speech_constants.location_speech_constants import \
NOT_IN_BOSTON_SPEECH
from mycity.utilities.location_services_utils import \
request_device_address_permission_response, \
get_address_from_user_device, \
is_address_in_city
from mycity.intents import intent_constants
from mycity.intents.custom_errors import \
InvalidAddressError, BadAPIResponse, MultipleAddressError
from mycity.intents.user_address_intent import \
clear_address_from_mycity_object, request_user_address_response
import mycity.intents.speech_constants.trash_intent as speech_constants
from mycity.mycity_response_data_model import MyCityResponseDataModel
import mycity.utilities.address_utils as address_utils
import usaddress
import re
import requests
import logging
logger = logging.getLogger(__name__)
DAY_CODE_REGEX = r'\d+A? - '
CARD_TITLE = "Trash Day"
def get_trash_day_info(mycity_request):
"""
Generates response object for a trash day inquiry.
:param mycity_request: MyCityRequestDataModel object
:return: MyCityResponseDataModel object
"""
logger.debug('MyCityRequestDataModel received:' +
mycity_request.get_logger_string())
mycity_response = MyCityResponseDataModel()
# Determine if we have required address information. Request if we do not.
if intent_constants.CURRENT_ADDRESS_KEY not in \
mycity_request.session_attributes:
mycity_request, location_permissions = \
get_address_from_user_device(mycity_request)
if not location_permissions:
return request_device_address_permission_response()
elif intent_constants.CURRENT_ADDRESS_KEY not in \
mycity_request.session_attributes:
return request_user_address_response(mycity_request)
current_address = \
mycity_request.session_attributes[intent_constants.CURRENT_ADDRESS_KEY]
# grab relevant information from session address
parsed_address, _ = usaddress.tag(current_address)
# If we have more specific info then just the street
# address, make sure we are in Boston
if not is_address_in_city(current_address):
mycity_response.output_speech = NOT_IN_BOSTON_SPEECH
mycity_response.should_end_session = True
mycity_response.card_title = CARD_TITLE
return mycity_response
if not address_utils.is_address_valid(parsed_address):
mycity_response.output_speech = speech_constants.ADDRESS_NOT_UNDERSTOOD
mycity_response.dialog_directive = "ElicitSlotTrash"
mycity_response.reprompt_text = None
mycity_response.session_attributes = mycity_request.session_attributes
mycity_response.card_title = CARD_TITLE
mycity_response.should_end_session = True
return clear_address_from_mycity_object(mycity_response)
# currently assumes that trash day is the same for all units at
# the same street address
address = " ".join([
parsed_address['AddressNumber'],
parsed_address['StreetName'],
parsed_address['StreetNamePostType']])
neighborhood = parsed_address["PlaceName"] \
if "PlaceName" in parsed_address \
and not parsed_address["PlaceName"].isdigit() \
else None
if "Neighborhood" in mycity_request.intent_variables and \
"value" in mycity_request.intent_variables["Neighborhood"]:
neighborhood = \
mycity_request.intent_variables["Neighborhood"]["value"]
try:
trash_days = get_trash_and_recycling_days(address, neighborhood)
trash_days_speech = build_speech_from_list_of_days(trash_days)
mycity_response.output_speech = speech_constants.PICK_UP_DAY.\
format(trash_days_speech)
mycity_response.should_end_session = True
except InvalidAddressError:
address_string = address
mycity_response.output_speech = speech_constants.ADDRESS_NOT_FOUND.\
format(address_string)
mycity_response.dialog_directive = "ElicitSlotTrash"
mycity_response.reprompt_text = None
mycity_response.session_attributes = mycity_request.session_attributes
mycity_response.card_title = CARD_TITLE
mycity_response.should_end_session = False
return clear_address_from_mycity_object(mycity_response)
except BadAPIResponse:
mycity_response.output_speech = speech_constants.BAD_API_RESPONSE
mycity_response.should_end_session = True
except MultipleAddressError as error:
addresses = [re.sub(r' \d{5}', '', address) for address in
error.addresses]
address_list = ', '.join(addresses)
mycity_response.output_speech = speech_constants.\
MULTIPLE_ADDRESS_ERROR.format(address_list)
mycity_response.dialog_directive = "ElicitSlotNeighborhood"
mycity_response.should_end_session = False
# Setting reprompt_text to None signifies that we do not want to reprompt
# the user. If the user does not respond or says something that is not
# understood, the session will end.
mycity_response.reprompt_text = None
mycity_response.session_attributes = mycity_request.session_attributes
mycity_response.card_title = CARD_TITLE
return mycity_response
def get_trash_and_recycling_days(address, neighborhood=None):
"""
Determines the trash and recycling days for the provided address.
These are on the same day, so only one array of days will be returned.
:param neighborhood:
:param address: String of address to find trash day for
:return: array containing next trash and recycling days
:raises: InvalidAddressError, BadAPIResponse
"""
logger.debug('address: ' + str(address) +
', neighborhood: {}' + str(neighborhood))
api_params = get_address_api_info(address, neighborhood)
if not api_params:
raise InvalidAddressError
if not validate_found_address(api_params["name"], address):
logger.debug("InvalidAddressError")
raise InvalidAddressError
trash_data = get_trash_day_data(api_params)
if not trash_data:
raise BadAPIResponse
trash_and_recycling_days = get_trash_days_from_trash_data(trash_data)
return trash_and_recycling_days
def find_unique_addresses(address_request_json):
"""
Finds unique addresses in a provided address request json returned
from the ReCollect service
:param address_request_json: json object returned from ReCollect address
request service
:return: list of unique addresses
"""
logger.debug('address_request_json: ' + str(address_request_json))
# Pre-extract the addresses from the payload and uniquify them
strings_to_compare = sorted(set(address["name"] for address in
address_request_json),
key=len, reverse=True)
return [
compare_a
for i, compare_a in enumerate(strings_to_compare)
if not any(compare_b in compare_a for compare_b in
strings_to_compare[i + 1:])
]
def validate_found_address(found_address, user_provided_address):
"""
Validates that the street name and number found in trash collection
database matches the provided values. We do not treat partial matches
as valid.
:param found_address: Full address found in trash collection database
:param user_provided_address: Street number and name provided by user
:return: boolean: True if addresses are considered a match, else False
"""
logger.debug('found_address: ' + str(found_address) +
'user_provided_address: ' + str(user_provided_address))
found_address, _ = usaddress.tag(found_address)
user_provided_address, _ = usaddress.tag(user_provided_address)
if found_address["AddressNumber"] != user_provided_address["AddressNumber"]:
return False
# Re-collect replaces South with S and North with N
found_address["StreetName"] = re.sub(r'^S\.? ', "South ",
found_address["StreetName"])
found_address["StreetName"] = re.sub(r'^N\.? ', "North ",
found_address["StreetName"])
if found_address["StreetName"].lower() != \
user_provided_address["StreetName"].lower():
return False
# Allow for mismatched Road street_type between user input and ReCollect API
if "rd" in found_address["StreetNamePostType"].lower() and \
"road" in user_provided_address["StreetNamePostType"].lower():
return True
# Allow fuzzy match on street type to allow "ave" to match "avenue"
if "StreetNamePostType" in found_address and \
"StreetNamePostType" in user_provided_address:
if found_address["StreetNamePostType"].lower() not in \
user_provided_address["StreetNamePostType"].lower() and \
user_provided_address["StreetNamePostType"].lower() not in \
found_address["StreetNamePostType"].lower():
return False
return True
def get_address_api_info(address, neighborhood):
"""
Gets the parameters required for the ReCollect API call
:param address: Address to get parameters for
:return: JSON object containing API parameters with format:
{
'area_name': value,
'parcel_id': value,
'service_id': value,
'place_id': value,
'area_id': value,
'name': value
}
"""
logger.debug('address: ' + address)
base_url = "https://recollect.net/api/areas/" \
"Boston/services/310/address-suggest"
full_address = address if neighborhood is None else ' '.join([address,
neighborhood])
url_params = {'q': full_address, 'locale': 'en-US'}
request_result = requests.get(base_url, url_params)
if request_result.status_code != requests.codes.ok:
logger.debug('Error getting ReCollect API info. Got response: {}'
.format(request_result.status_code))
return {}
result_json = request_result.json()
if not result_json:
return {}
unique_addresses = find_unique_addresses(result_json)
if len(unique_addresses) > 1:
raise MultipleAddressError(unique_addresses)
return result_json[0]
def get_trash_day_data(api_parameters):
"""
Gets the trash day data from ReCollect using the provided API parameters
:param api_parameters: Parameters for ReCollect API
:return: JSON object containing all trash data
"""
logger.debug('api_parameters: ' + str(api_parameters))
# Rename the default API parameter "name" to "formatted_address"
if "name" in api_parameters:
api_parameters["formatted_address"] = api_parameters.pop("name")
base_url = "https://recollect.net/api/places"
request_result = requests.get(base_url, api_parameters)
if request_result.status_code != requests.codes.ok:
logger.debug("Error getting trash info from ReCollect API info. " \
"Got response: {}".format(request_result.status_code))
return {}
return request_result.json()
def get_trash_days_from_trash_data(trash_data):
"""
Parse trash data from ReCollect service and return the trash and recycling
days.
:param trash_data: Trash data provided from ReCollect API
:return: An array containing days trash and recycling are picked up
:raises: BadAPIResponse
"""
logger.debug('trash_data: ' + str(trash_data))
try:
trash_days_string = trash_data["next_event"]["zone"]["title"]
trash_days_string = re.sub(DAY_CODE_REGEX, '', trash_days_string)
trash_days = trash_days_string.replace('&', '').split()
except KeyError:
# ReCollect API returned an unexpected JSON format
raise BadAPIResponse
return trash_days
def build_speech_from_list_of_days(days):
"""
Converts a list of days into proper speech, such as adding the word 'and'
before the last item.
:param days: String array of days
:return: Speech representing the provided days
:raises: BadAPIResponse
"""
logger.debug('days: ' + str(days))
if len(days) == 0:
raise BadAPIResponse
if len(days) == 1:
return days[0]
elif len(days) == 2:
output_speech = " and ".join(days)
else:
output_speech = ", ".join(days[0:-1])
output_speech += ", and {}".format(days[-1])
return output_speech
| 37.285294
| 80
| 0.698982
|
89d96e762fd798966eccddeea5cb056d83133077
| 702
|
py
|
Python
|
models.py
|
bwasilewski/cardbox
|
2eb47cc40d0230e35878be79793b3646a5dcb876
|
[
"MIT"
] | 1
|
2015-11-07T04:51:27.000Z
|
2015-11-07T04:51:27.000Z
|
models.py
|
bwasilewski/cardbox
|
2eb47cc40d0230e35878be79793b3646a5dcb876
|
[
"MIT"
] | null | null | null |
models.py
|
bwasilewski/cardbox
|
2eb47cc40d0230e35878be79793b3646a5dcb876
|
[
"MIT"
] | null | null | null |
import datetime
from flask import url_for
from cardbox import db
class Card(db.Document):
created_at = db.DateTimeField(default=datetime.datetime.now, required=True)
name = db.StringField()
card_id = db.StringField()
url = db.StringField()
store_url = db.StringField()
types = db.StringField()
subtypes = db.StringField()
colors = db.StringField()
cmc = db.IntField()
cost = db.StringField()
text = db.StringField()
power = db.IntField()
toughness = db.IntField()
formats = db.StringField()
editions = db.StringField()
loyalty = db.StringField()
supertypes = db.StringField()
def display(self):
return '{} {}/{}'.format(self.name, self.power, self.toughness)
| 25.071429
| 77
| 0.69943
|
5799c79adcbf8b82e43f6fa282eb81841c679819
| 2,788
|
py
|
Python
|
abjhelp/users/forms.py
|
aleducode/help-abejorral
|
cd924bcef45ad45a4ed3d2950fb1002b170309a8
|
[
"MIT"
] | 4
|
2020-08-18T23:41:34.000Z
|
2020-10-08T21:15:54.000Z
|
abjhelp/users/forms.py
|
aleducode/help-abejorral
|
cd924bcef45ad45a4ed3d2950fb1002b170309a8
|
[
"MIT"
] | 3
|
2020-08-31T19:38:46.000Z
|
2020-08-31T19:39:18.000Z
|
abjhelp/users/forms.py
|
aleducode/help-abejorral
|
cd924bcef45ad45a4ed3d2950fb1002b170309a8
|
[
"MIT"
] | 1
|
2020-08-25T18:29:14.000Z
|
2020-08-25T18:29:14.000Z
|
"""User forms."""
from django import forms
from abjhelp.users.models import HelpRequest, DonorRequest
class HelpRequestForm(forms.Form):
name = forms.CharField(
min_length=2,
max_length=50,
label='Nombre',
error_messages={
'required': 'Este campo es requerido',
},
widget=forms.TextInput(attrs={
'class': 'form-control',
}),
)
description = forms.CharField(
label='Descripción del pedido',
required=False,
error_messages={
'required': 'Este campo es requerido',
},
widget=forms.Textarea(
attrs={
"rows": 5, "cols": 20,
'class': 'form-control',
}),
)
phone_number = forms.CharField(
label='Número del celular de contacto',
widget=forms.TextInput(
attrs={
'class': 'form-control',
}),
error_messages={
'required': 'Este campo es requerido',
},
)
address = forms.CharField(
min_length=2,
max_length=50,
label='¿Dónde estás ubicado?',
required=False,
widget=forms.TextInput(attrs={
'class': 'form-control',
}),
)
def save(self):
"""Create help resquest."""
data = self.cleaned_data
help_request = HelpRequest.objects.get_or_create(**data)
return help_request
class DonorRequestForm(forms.Form):
name = forms.CharField(
min_length=2,
max_length=50,
label='¿Cuál es tu nombre?',
error_messages={
'required': 'Este campo es requerido',
},
widget=forms.TextInput(attrs={
'class': 'form-control',
}),
)
phone_number = forms.CharField(
label='Número de whatsapp',
widget=forms.TextInput(
attrs={
'class': 'form-control',
}),
error_messages={
'required': 'Este campo es requerido',
},
)
email = forms.EmailField(
min_length=2,
max_length=50,
required=False,
label='¿Cuál es tu correo?',
widget=forms.TextInput(attrs={
'class': 'form-control',
}),
error_messages={
'invalid': 'Porfavor ingrese un correo válido',
},
)
address = forms.CharField(
label='¿Dónde estás ubicado?',
required=False,
widget=forms.TextInput(
attrs={
'class': 'form-control',
}),
)
def save(self):
"""Create Donor resquest."""
data = self.cleaned_data
donor_request = DonorRequest.objects.get_or_create(**data)
return donor_request
| 25.345455
| 66
| 0.523314
|
d01c749c1f8a594431ebb8f9cb641e24a7a7343e
| 3,049
|
py
|
Python
|
api/base/authentication/drf.py
|
bdyetton/prettychart
|
e8b33a7dfdc8c33d15969586be7f68172795f76d
|
[
"Apache-2.0"
] | null | null | null |
api/base/authentication/drf.py
|
bdyetton/prettychart
|
e8b33a7dfdc8c33d15969586be7f68172795f76d
|
[
"Apache-2.0"
] | null | null | null |
api/base/authentication/drf.py
|
bdyetton/prettychart
|
e8b33a7dfdc8c33d15969586be7f68172795f76d
|
[
"Apache-2.0"
] | null | null | null |
import itsdangerous
from django.utils.translation import ugettext_lazy as _
from rest_framework import authentication
from rest_framework.authentication import BasicAuthentication
from rest_framework import exceptions
from framework.auth import cas
from framework.sessions.model import Session
from framework.auth.core import User, get_user
from website import settings
def get_session_from_cookie(cookie_val):
"""Given a cookie value, return the `Session` object or `None`."""
session_id = itsdangerous.Signer(settings.SECRET_KEY).unsign(cookie_val)
return Session.load(session_id)
# http://www.django-rest-framework.org/api-guide/authentication/#custom-authentication
class OSFSessionAuthentication(authentication.BaseAuthentication):
"""Custom DRF authentication class which works with the OSF's Session object.
"""
def authenticate(self, request):
cookie_val = request.COOKIES.get(settings.COOKIE_NAME)
if not cookie_val:
return None
session = get_session_from_cookie(cookie_val)
if not session:
return None
user_id = session.data.get('auth_user_id')
user = User.load(user_id)
if user:
return user, None
return None
class OSFBasicAuthentication(BasicAuthentication):
# override BasicAuthentication
def authenticate_credentials(self, userid, password):
"""
Authenticate the userid and password against username and password.
"""
user = get_user(email=userid, password=password)
if userid and user is None:
raise exceptions.AuthenticationFailed(_('Invalid username/password.'))
elif userid is None and password is None:
raise exceptions.NotAuthenticated()
return (user, None)
def authenticate_header(self, request):
return ""
class OSFCASAuthentication(authentication.BaseAuthentication):
"""Check whether the user provides a valid OAuth2 bearer token"""
def authenticate(self, request):
client = cas.get_client() # Returns a CAS server client
try:
auth_header_field = request.META["HTTP_AUTHORIZATION"]
auth_token = cas.parse_auth_header(auth_header_field)
except (cas.CasTokenError, KeyError):
return None # If no token in header, then this method is not applicable
# Found a token; query CAS for the associated user id
try:
resp = client.profile(auth_token)
except cas.CasHTTPError:
raise exceptions.NotAuthenticated('User provided an invalid OAuth2 access token')
if resp.authenticated is False:
raise exceptions.NotAuthenticated('CAS server failed to authenticate this token')
user_id = resp.user
user = User.load(user_id)
if user is None:
raise exceptions.AuthenticationFailed("Could not find the user associated with this token")
return user, auth_token
def authenticate_header(self, request):
return ""
| 35.870588
| 103
| 0.700558
|
4daa0f7bc70902c3befb0f495203b8f472f1f9d9
| 377
|
py
|
Python
|
Camera/LineFollower/driverClient.py
|
maroneal/MircoITS
|
c83c2a0a4298698edaae181e15514d79ce59e7d0
|
[
"BSD-2-Clause"
] | null | null | null |
Camera/LineFollower/driverClient.py
|
maroneal/MircoITS
|
c83c2a0a4298698edaae181e15514d79ce59e7d0
|
[
"BSD-2-Clause"
] | null | null | null |
Camera/LineFollower/driverClient.py
|
maroneal/MircoITS
|
c83c2a0a4298698edaae181e15514d79ce59e7d0
|
[
"BSD-2-Clause"
] | null | null | null |
import socket
import time
import xboxdrv
TCP_IP = "127.0.0.1"
TCP_PORT = 4200
print "TCP target IP:", TCP_IP
print "TCP target port:", TCP_PORT
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
while True:
if ord('a') == True:
MESSAGE[0] = "1.0000"
else:
MESSAGE[0] = "0.0000"
sock.sendto(MESSAGE, (TCP_IP, TCP_PORT))
time.sleep(5)
| 17.136364
| 56
| 0.644562
|
6821602d3d3fc8ceaa615b81bb4b620cf747213b
| 3,585
|
py
|
Python
|
art/estimators/keras.py
|
meghana-sesetti/adversarial-robustness-toolbox
|
6a5ce9e4142734ad9004e5c093ef8fa754ea6b39
|
[
"MIT"
] | 1
|
2021-09-09T13:19:34.000Z
|
2021-09-09T13:19:34.000Z
|
art/estimators/keras.py
|
Tikquuss/adversarial-robustness-toolbox
|
62ffe7c951d8a60d49a9ea6ac7b04aa4432a3fb7
|
[
"MIT"
] | 33
|
2021-01-18T08:30:34.000Z
|
2022-03-11T07:05:13.000Z
|
art/estimators/keras.py
|
Tikquuss/adversarial-robustness-toolbox
|
62ffe7c951d8a60d49a9ea6ac7b04aa4432a3fb7
|
[
"MIT"
] | 1
|
2021-09-09T13:19:35.000Z
|
2021-09-09T13:19:35.000Z
|
# MIT License
#
# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2020
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This module implements the abstract estimator `KerasEstimator` for Keras models.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import numpy as np
from art.estimators.estimator import (
BaseEstimator,
NeuralNetworkMixin,
LossGradientsMixin,
)
logger = logging.getLogger(__name__)
class KerasEstimator(NeuralNetworkMixin, LossGradientsMixin, BaseEstimator):
"""
Estimator class for Keras models.
"""
def __init__(self, **kwargs) -> None:
"""
Estimator class for Keras models.
"""
super().__init__(**kwargs)
def predict(self, x: np.ndarray, batch_size: int = 128, **kwargs):
"""
Perform prediction of the neural network for samples `x`.
:param x: Samples of shape (nb_samples, nb_features) or (nb_samples, nb_pixels_1, nb_pixels_2,
nb_channels) or (nb_samples, nb_channels, nb_pixels_1, nb_pixels_2).
:param batch_size: Batch size.
:return: Predictions.
:rtype: Format as expected by the `model`
"""
return NeuralNetworkMixin.predict(self, x, batch_size=128, **kwargs)
def fit(self, x: np.ndarray, y, batch_size: int = 128, nb_epochs: int = 20, **kwargs) -> None:
"""
Fit the model of the estimator on the training data `x` and `y`.
:param x: Samples of shape (nb_samples, nb_features) or (nb_samples, nb_pixels_1, nb_pixels_2,
nb_channels) or (nb_samples, nb_channels, nb_pixels_1, nb_pixels_2).
:param y: Target values.
:type y: Format as expected by the `model`
:param batch_size: Batch size.
:param nb_epochs: Number of training epochs.
"""
NeuralNetworkMixin.fit(self, x, y, batch_size=128, nb_epochs=20, **kwargs)
def loss(self, x: np.ndarray, y: np.ndarray, **kwargs) -> np.ndarray:
"""
Compute the loss of the neural network for samples `x`.
:param x: Samples of shape (nb_samples, nb_features) or (nb_samples, nb_pixels_1, nb_pixels_2,
nb_channels) or (nb_samples, nb_channels, nb_pixels_1, nb_pixels_2).
:param y: Target values (class labels) one-hot-encoded of shape `(nb_samples, nb_classes)` or indices
of shape `(nb_samples,)`.
:return: Loss values.
:rtype: Format as expected by the `model`
"""
raise NotImplementedError
| 42.678571
| 120
| 0.691771
|
1799ca6e02a15d00c49565f6e10fcf659f4d9a70
| 6,510
|
py
|
Python
|
assists/baremetallinker_xlnx.py
|
hakonfam/lopper
|
8aec5c7fbf5bff874c859f2289253db17160b6e0
|
[
"BSD-3-Clause"
] | null | null | null |
assists/baremetallinker_xlnx.py
|
hakonfam/lopper
|
8aec5c7fbf5bff874c859f2289253db17160b6e0
|
[
"BSD-3-Clause"
] | null | null | null |
assists/baremetallinker_xlnx.py
|
hakonfam/lopper
|
8aec5c7fbf5bff874c859f2289253db17160b6e0
|
[
"BSD-3-Clause"
] | null | null | null |
#/*
# * Copyright (c) 2020 Xilinx Inc. All rights reserved.
# *
# * Author:
# * Appana Durga Kedareswara rao <appana.durga.rao@xilinx.com>
# *
# * SPDX-License-Identifier: BSD-3-Clause
# */
import struct
import sys
import types
import unittest
import os
import getopt
import re
import subprocess
import shutil
from pathlib import Path
from pathlib import PurePath
from io import StringIO
import contextlib
import importlib
from lopper import Lopper
from lopper import LopperFmt
import lopper
from lopper_tree import *
from re import *
sys.path.append(os.path.dirname(__file__))
from baremetalconfig_xlnx import scan_reg_size, get_cpu_node
from bmcmake_metadata_xlnx import to_cmakelist
def is_compat( node, compat_string_to_test ):
if re.search( "module,baremetallinker_xlnx", compat_string_to_test):
return xlnx_generate_bm_linker
return ""
# tgt_node: is the baremetal config top level domain node number
# sdt: is the system device-tree
# options: baremetal application source path
def get_memranges(tgt_node, sdt, options):
root_node = sdt.tree[tgt_node]
root_sub_nodes = root_node.subnodes()
mem_nodes = []
#Maintain a static memory IP list this is needed inorder to capture proper ip name in the linker script
xlnx_memipname = {"axi_bram": 0, "ps7_ddr": 0, "psu_ddr": 0, "psv_ddr": 0, "mig": 0, "lmb_bram": 0, "axi_noc": 0, "psu_ocm": 0, "psv_ocm": 0, "ddr4": 0}
for node in root_sub_nodes:
try:
device_type = node["device_type"].value
if "memory" in device_type:
mem_nodes.append(node)
except:
pass
# Yocto Machine to CPU compat mapping
match_cpunodes = get_cpu_node(sdt, options)
address_map = match_cpunodes[0].parent["address-map"].value
all_phandles = []
ns = match_cpunodes[0].parent["#ranges-size-cells"].value[0]
na = match_cpunodes[0].parent["#ranges-address-cells"].value[0]
cells = na + ns
tmp = na
while tmp < len(address_map):
all_phandles.append(address_map[tmp])
tmp = tmp + cells + na + 1
mem_ranges = {}
for node in mem_nodes:
# Check whether the memory node is mapped to cpu cluster or not
mem_phandles = [handle for handle in all_phandles if handle == node.phandle]
addr_list = []
if mem_phandles:
# Remove Duplicate phandle referenecs
mem_phandles = list(dict.fromkeys(mem_phandles))
indx_list = [index for index,handle in enumerate(address_map) for val in mem_phandles if handle == val]
for inx in indx_list:
start = [address_map[inx+i+1] for i in range(na)]
if na == 2 and start[0] != 0:
val = str(start[1])
pad = 8 - len(val)
val = val.ljust(pad + len(val), '0')
reg = int((str(hex(start[0])) + val), base=16)
addr_list.append(reg)
elif na == 2:
addr_list.append(start[1])
else:
addr_list.append(start[0])
nac = node.parent["#address-cells"].value[0]
nsc = node.parent["#size-cells"].value[0]
val = node['reg'].value
total_nodes = int(len(val)/(nac+nsc))
name_list = [name.replace("_", "-") for name in list(xlnx_memipname.keys())]
try:
compat = node['compatible'].value[0]
match = [mem for mem in name_list if mem in compat]
for i in range(total_nodes):
reg, size = scan_reg_size(node, val, i)
valid_range = [addr for addr in addr_list if reg == addr or addr > reg]
if valid_range:
key = match[0].replace("-", "_")
linker_secname = key + str("_") + str(xlnx_memipname[key])
mem_ranges.update({linker_secname: [valid_range[0], size]})
xlnx_memipname[key] += 1
except KeyError:
pass
return mem_ranges
# tgt_node: is the baremetal config top level domain node number
# sdt: is the system device-tree
# options: baremetal application source path
def xlnx_generate_bm_linker(tgt_node, sdt, options):
mem_ranges = get_memranges(tgt_node, sdt, options)
default_ddr = None
memtest_config = None
machine = options['args'][0]
try:
memtest_config = options['args'][2]
except IndexError:
pass
with open('memory.ld', 'w') as fd:
fd.write("MEMORY\n")
fd.write("{\n")
if memtest_config:
traverse = False
else:
traverse = True
for key, value in sorted(mem_ranges.items(), key=lambda e: e[1][1], reverse=traverse):
if default_ddr is None:
default_ddr = key
start,size = value[0], value[1]
"""
LMB BRAM initial 80 bytes being used by the linker vectors section
Adjust the size and start address accordingly.
"""
if "lmb_bram" in key:
start = 80
size -= start
"""
PS7 DDR initial 1MB is reserved memory
Adjust the size and start address accordingly.
"""
if "ps7_ddr" in key:
start = 1048576
size -= start
"""
For R5 PSU DDR initial 1MB is reserved for tcm
Adjust the size and start address accordingly.
"""
if "psu_ddr" in key and machine == "cortexr5-zynqmp" and start == 0:
start = 1048576
size -= start
if "axi_noc" in key and machine == "cortexr5-versal" and start == 0:
start = 1048576
size -= start
fd.write("\t%s : ORIGIN = %s, LENGTH = %s\n" % (key, hex(start), hex(size)))
fd.write("}\n")
src_dir = os.path.dirname(options['args'][1])
src_dir = os.path.dirname(src_dir)
appname = src_dir.rsplit('/', 1)[-1]
cmake_file = appname.capitalize() + str("Example.cmake")
with open(cmake_file, 'a') as fd:
fd.write("set(DDR %s)\n" % default_ddr)
memip_list = []
for key, value in sorted(mem_ranges.items(), key=lambda e: e[1][1], reverse=traverse):
memip_list.append(key)
fd.write("set(%s %s)\n" % (key, to_cmakelist([hex(value[0]), hex(value[1])])))
fd.write("set(TOTAL_MEM_CONTROLLERS %s)\n" % to_cmakelist(memip_list))
return True
| 36.779661
| 157
| 0.590783
|
7bcb5ad832c4d32592a8da0d6abf1106c38d9574
| 456
|
py
|
Python
|
day1/day1_pt2.py
|
jwhitex/AdventOfCode2018
|
e552185f7d6413ccdad824911c66a6590e8de9bb
|
[
"MIT"
] | null | null | null |
day1/day1_pt2.py
|
jwhitex/AdventOfCode2018
|
e552185f7d6413ccdad824911c66a6590e8de9bb
|
[
"MIT"
] | null | null | null |
day1/day1_pt2.py
|
jwhitex/AdventOfCode2018
|
e552185f7d6413ccdad824911c66a6590e8de9bb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
def runSim(fsum, series, fdict):
for i in range(0,len(series)):
fsum += series[i]
if fsum in fdict:
return (True, fsum)
fdict[fsum] = None
return (False, fsum)
fin=[]
with open("day1.input") as file:
for line in file:
fin+=[int(line.rstrip("\n\r"))]
fdict=dict({ 0:None})
fsum = 0
while True:
found,fsum = runSim(fsum, fin, fdict)
if found:
break
print(fsum)
| 19.826087
| 41
| 0.567982
|
523f06fc63a0ee44a4d562447c0677170397f47b
| 4,847
|
py
|
Python
|
applications/EigenSolversApplication/tests/test_eigen_direct_solver.py
|
HubertBalcerzak/Kratos
|
c15689d53f06dabb36dc44c13eeac73d3e183916
|
[
"BSD-4-Clause"
] | null | null | null |
applications/EigenSolversApplication/tests/test_eigen_direct_solver.py
|
HubertBalcerzak/Kratos
|
c15689d53f06dabb36dc44c13eeac73d3e183916
|
[
"BSD-4-Clause"
] | 1
|
2019-10-15T13:11:37.000Z
|
2019-10-15T13:11:37.000Z
|
applications/EigenSolversApplication/tests/test_eigen_direct_solver.py
|
Gaoliu19910601/Kratos
|
0bac5e132d02061680fc90f1e52d4930b5ed7fa3
|
[
"BSD-4-Clause"
] | null | null | null |
from __future__ import print_function, absolute_import, division
import os
import KratosMultiphysics
import KratosMultiphysics.EigenSolversApplication as EigenSolversApplication
import KratosMultiphysics.KratosUnittest as KratosUnittest
from KratosMultiphysics.python_linear_solver_factory import ConstructSolver
class TestEigenDirectSolver(KratosUnittest.TestCase):
def _execute_eigen_direct_solver_test(self, class_name, solver_type):
# check if solver is available
if (not hasattr(EigenSolversApplication, class_name)):
self.skipTest(class_name + " is not included in the compilation of the EigenSolversApplication")
space = KratosMultiphysics.UblasSparseSpace()
settings = KratosMultiphysics.Parameters('{ "solver_type" : "EigenSolversApplication.' + solver_type + '" }')
solver = ConstructSolver(settings)
a = KratosMultiphysics.CompressedMatrix()
this_file_dir = os.path.dirname(os.path.realpath(__file__))
base_dir = os.path.dirname(os.path.dirname(os.path.dirname(this_file_dir)))
matrix_file_path = os.path.join(base_dir, "kratos", "tests", "auxiliar_files_for_python_unittest", "sparse_matrix_files", "A.mm")
file_read = KratosMultiphysics.ReadMatrixMarketMatrix(matrix_file_path, a) # symmetric test matrix
self.assertTrue(file_read, msg="The MatrixFile could not be read")
dimension = a.Size1()
self.assertEqual(dimension, 900)
b_exp = KratosMultiphysics.Vector(dimension) # [1, 2, ..., dimension-1, dimension]
for i in range(dimension):
b_exp[i] = i + 1
x = KratosMultiphysics.Vector(dimension)
solver.Solve(a, x, b_exp)
b_act = KratosMultiphysics.Vector(dimension)
space.Mult(a, x, b_act)
for i in range(dimension):
self.assertAlmostEqual(b_act[i], b_exp[i], 7)
def _execute_eigen_direct_complex_solver_test(self, class_name, solver_type):
# check if solver is available
if (not hasattr(EigenSolversApplication, class_name)):
self.skipTest(class_name + " is not included in the compilation of the EigenSolversApplication")
space = KratosMultiphysics.UblasComplexSparseSpace()
settings = KratosMultiphysics.Parameters('{ "solver_type" : "EigenSolversApplication.' + solver_type + '" }')
solver = ConstructSolver(settings)
a = KratosMultiphysics.CompressedMatrix()
this_file_dir = os.path.dirname(os.path.realpath(__file__))
base_dir = os.path.dirname(os.path.dirname(os.path.dirname(this_file_dir)))
matrix_file_path = os.path.join(base_dir, "kratos", "tests", "auxiliar_files_for_python_unittest", "sparse_matrix_files", "A.mm")
file_read = KratosMultiphysics.ReadMatrixMarketMatrix(matrix_file_path, a) # symmetric test matrix
self.assertTrue(file_read, msg="The MatrixFile could not be read")
a = KratosMultiphysics.ComplexCompressedMatrix(a)
dimension = a.Size1()
self.assertEqual(dimension, 900)
b_exp = KratosMultiphysics.ComplexVector(dimension)
for i in range(dimension):
b_exp[i] = complex(i+1,i-1)
x = KratosMultiphysics.ComplexVector(dimension)
solver.Solve(a, x, b_exp)
b_act = KratosMultiphysics.ComplexVector(dimension)
space.Mult(a, x, b_act)
for i in range(dimension):
self.assertAlmostEqual(b_act[i], b_exp[i], 7)
def test_eigen_sparse_lu(self):
self._execute_eigen_direct_solver_test('SparseLUSolver', 'sparse_lu')
def test_eigen_sparse_cg(self):
self._execute_eigen_direct_solver_test('SparseCGSolver', 'sparse_cg')
def test_eigen_sparse_qr(self):
self._execute_eigen_direct_solver_test('SparseQRSolver', 'sparse_qr')
def test_eigen_pardiso_lu(self):
self._execute_eigen_direct_solver_test('PardisoLUSolver', 'pardiso_lu')
def test_eigen_pardiso_ldlt(self):
self._execute_eigen_direct_solver_test('PardisoLDLTSolver', 'pardiso_ldlt')
def test_eigen_pardiso_llt(self):
self._execute_eigen_direct_solver_test('PardisoLLTSolver', 'pardiso_llt')
def test_eigen_complex_sparse_lu(self):
self._execute_eigen_direct_complex_solver_test('ComplexSparseLUSolver', 'sparse_lu_complex')
def test_eigen_complex_pardiso_lu(self):
self._execute_eigen_direct_complex_solver_test('ComplexPardisoLUSolver', 'pardiso_lu_complex')
def test_eigen_complex_pardiso_ldlt(self):
self._execute_eigen_direct_complex_solver_test('ComplexPardisoLDLTSolver', 'pardiso_ldlt_complex')
def test_eigen_complex_pardiso_llt(self):
self._execute_eigen_direct_complex_solver_test('ComplexPardisoLLTSolver', 'pardiso_llt_complex')
if __name__ == '__main__':
KratosUnittest.main()
| 40.057851
| 137
| 0.727254
|
e2916551dfc60366cd8e1c49720d1dacfaa96b84
| 301
|
py
|
Python
|
setup.py
|
sukovanej/mplispstd
|
510fba5f3c95fec370665d736e5db5fb6248b3d7
|
[
"MIT"
] | null | null | null |
setup.py
|
sukovanej/mplispstd
|
510fba5f3c95fec370665d736e5db5fb6248b3d7
|
[
"MIT"
] | null | null | null |
setup.py
|
sukovanej/mplispstd
|
510fba5f3c95fec370665d736e5db5fb6248b3d7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from distutils.core import setup
setup(
name='mplispstd',
version='0.1',
packages=[
'mplispstd',
'mplispstd.io',
'mplispstd.math',
'mplispstd.string',
'mplispstd.env',
],
install_requires=[
'mplisp',
]
)
| 15.842105
| 32
| 0.534884
|
b8adb9ddfde6f4b218b7019e18a354e064b74b38
| 14,977
|
py
|
Python
|
seqskip_train_seq2eH_in10.py
|
mimbres/SeqSkip
|
031add5ba22cad1665d76b2cb21e8f6c1e1357e5
|
[
"Apache-2.0"
] | 7
|
2019-02-15T07:46:27.000Z
|
2021-09-16T06:31:43.000Z
|
seqskip_train_seq2eH_in10.py
|
mimbres/SeqSkip
|
031add5ba22cad1665d76b2cb21e8f6c1e1357e5
|
[
"Apache-2.0"
] | 1
|
2021-09-16T03:06:16.000Z
|
2021-09-17T02:08:02.000Z
|
seqskip_train_seq2eH_in10.py
|
mimbres/SeqSkip
|
031add5ba22cad1665d76b2cb21e8f6c1e1357e5
|
[
"Apache-2.0"
] | 1
|
2019-07-07T22:10:30.000Z
|
2019-07-07T22:10:30.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 11 00:45:08 2018
seq2eH: seqeunce learning model with separate encoder for support and query, 1stack each
- non-autoregressive (not feeding predicted labels)
- instance Norm.
- G: GLU version
- H: Highway-net version
- applied more efficient dilated conv over seq1
@author: mimbres
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.optim.lr_scheduler import StepLR
from torch.backends import cudnn
import numpy as np
import glob, os
import argparse
from tqdm import trange, tqdm
from spotify_data_loader import SpotifyDataloader
from utils.eval import evaluate
from blocks.highway_glu_dil_conv_v2 import HighwayDCBlock
cudnn.benchmark = True
parser = argparse.ArgumentParser(description="Sequence Skip Prediction")
parser.add_argument("-c","--config",type=str, default="./config_init_dataset.json")
parser.add_argument("-s","--save_path",type=str, default="./save/exp_seq2eH_in10/")
parser.add_argument("-l","--load_continue_latest",type=str, default=None)
parser.add_argument("-spl","--use_suplog_as_feat", type=bool, default=True)
parser.add_argument("-qf","--use_quelog_as_feat", type=bool, default=False)
parser.add_argument("-pl","--use_predicted_label", type=bool, default=False)
parser.add_argument("-glu","--use_glu", type=bool, default=False)
parser.add_argument("-w","--class_num",type=int, default = 2)
parser.add_argument("-e","--epochs",type=int, default= 10)
parser.add_argument("-lr","--learning_rate", type=float, default = 0.001)
parser.add_argument("-b","--train_batch_size", type=int, default = 2048)
parser.add_argument("-tsb","--test_batch_size", type=int, default = 1024)
parser.add_argument("-g","--gpu",type=int, default=0)
args = parser.parse_args()
# Hyper Parameters
USE_SUPLOG = args.use_suplog_as_feat
USE_QUELOG = args.use_quelog_as_feat
USE_PRED_LABEL = args.use_predicted_label
USE_GLU = args.use_glu
INPUT_DIM_S = 71 if USE_SUPLOG else 30 # default: 72
INPUT_DIM_Q = 71 if USE_QUELOG else 29 # default: 31
CLASS_NUM = args.class_num
EPOCHS = args.epochs
LEARNING_RATE = args.learning_rate
TR_BATCH_SZ = args.train_batch_size
TS_BATCH_SZ = args.test_batch_size
GPU = args.gpu
# Model-save directory
MODEL_SAVE_PATH = args.save_path
os.makedirs(os.path.dirname(MODEL_SAVE_PATH), exist_ok=True)
hist_trloss = list()
hist_tracc = list()
hist_vloss = list()
hist_vacc = list()
np.set_printoptions(precision=3)
class SeqEncoder(nn.Module):
def __init__(self, input_ch, e_ch,
h_k_szs=[2,2,3,1,1], #h_k_szs=[2,2,5,1,1],
h_dils=[1,2,4,1,1],
use_glu=False):
super(SeqEncoder, self).__init__()
h_io_chs = [e_ch]*len(h_k_szs)
self.front_1x1 = nn.Conv1d(input_ch, e_ch,1)
self.h_block = HighwayDCBlock(h_io_chs, h_k_szs, h_dils, causality=True, use_glu=use_glu)
self.mid_1x1 = nn.Sequential(nn.Conv1d(e_ch,e_ch,1), nn.ReLU(),
nn.Conv1d(e_ch,e_ch,1), nn.ReLU())
self.last_1x1 = nn.Sequential(nn.Conv1d(e_ch,e_ch,1))
def forward(self, x): # Input:bx(input_dim)*20
x = self.front_1x1(x) # bx128*20
x = self.h_block(x) # bx128*20
x = self.mid_1x1(x) # bx128*20
return self.last_1x1(x) # bx128*20
class SeqModel(nn.Module):
def __init__(self, input_dim_s=INPUT_DIM_S, input_dim_q=INPUT_DIM_Q, e_ch=128, d_ch=256, use_glu=USE_GLU):
super(SeqModel, self).__init__()
self.e_ch = e_ch
self.d_ch = d_ch
self.sup_enc = SeqEncoder(input_ch=input_dim_s, e_ch=d_ch, use_glu=use_glu) # bx256*10
self.que_enc = SeqEncoder(input_ch=input_dim_q, e_ch=e_ch, use_glu=use_glu) # bx128*10
self.classifier = nn.Sequential(nn.Conv1d(d_ch,d_ch,1), nn.ReLU(),
nn.Conv1d(d_ch,d_ch,1), nn.ReLU(),
nn.Conv1d(d_ch,1,1))
def forward(self, x_sup, x_que):
x_sup = self.sup_enc(x_sup) # bx256*10
x_que = self.que_enc(x_que) # bx128*10
# Attention: K,V from x_sup, Q from x_que
x_sup = torch.split(x_sup, self.e_ch, dim=1) # K: x_sup[0], V: x_sup[1]
att = F.softmax(torch.matmul(x_sup[0].transpose(1,2), x_que), dim=1) # K'*Q: bx10*10
x = torch.cat((torch.matmul(x_sup[1], att), x_que), 1) # {V*att, Q}: bx(128+128)*10
x = self.classifier(x).squeeze(1) # bx256*10 --> b*10
return x, att # bx10, bx10x10
#%%
def validate(mval_loader, SM, eval_mode, GPU):
tqdm.write("Validation...")
submit = []
gt = []
total_vloss = 0
total_vcorrects = 0
total_vquery = 0
val_sessions_iter = iter(mval_loader)
for val_session in trange(len(val_sessions_iter), desc='val-sessions', position=2, ascii=True):
SM.eval()
x, labels, y_mask, num_items, index = val_sessions_iter.next() # FIXED 13.Dec. SEPARATE LOGS. QUERY SHOULT NOT INCLUDE LOGS
# Sample data for 'support' and 'query': ex) 15 items = 7 sup, 8 queries...
num_support = num_items[:,0].detach().numpy().flatten() # If num_items was odd number, query has one more item.
num_query = num_items[:,1].detach().numpy().flatten()
batch_sz = num_items.shape[0]
# x: the first 10 items out of 20 are support items left-padded with zeros. The last 10 are queries right-padded.
x = x.permute(0,2,1) # bx70*20
x_sup = Variable(torch.cat((x[:,:,:10], labels[:,:10].unsqueeze(1)), 1)).cuda(GPU) # bx71(41+29+1)*10
x_que = Variable(x[:,41:,10:].clone()).cuda(GPU) # bx29*10
# y
y_que = labels[:,10:].clone() # bx10
# y_mask
y_mask_que = y_mask[:,10:].clone()
# Forward & update
y_hat_que, att = SM(x_sup, x_que) # y_hat_que: b*10, att: bx10*10
# if USE_PRED_LABEL is True:
# # Predict
# li = 70 if USE_SUPLOG is True else 29 # the label's dimension indice
# _x = x[:,:,:11] # bx72*11
# for q in range(11,20):
# y_hat = SM(Variable(_x, requires_grad=False)) # will be bx11 at the first round
# # Append next features
# _x = torch.cat((_x, x[:,:,q].unsqueeze(2)), 2) # now bx72*12
# _x[:,li,q] = torch.sigmoid(y_hat[:,-1])
# y_hat = SM(Variable(_x, requires_grad=False)) # y_hat(final): bx20
# del _x
# else:
# y_hat = SM(x)
# Calcultate BCE loss
loss = F.binary_cross_entropy_with_logits(input=y_hat_que*y_mask_que.cuda(GPU), target=y_que.cuda(GPU)*y_mask_que.cuda(GPU))
total_vloss += loss.item()
# Decision
y_prob = torch.sigmoid(y_hat_que*y_mask_que.cuda(GPU)).detach().cpu().numpy() # bx20
y_pred = (y_prob>=0.5).astype(np.int) # bx10
y_numpy = labels[:,10:].numpy() # bx10
# Acc
total_vcorrects += np.sum((y_pred==y_numpy)*y_mask_que.numpy())
total_vquery += np.sum(num_query)
# Eval, Submission
if eval_mode is not 0:
for b in np.arange(batch_sz):
submit.append(y_pred[b,:num_query[b]].flatten())
gt.append(y_numpy[b,:num_query[b]].flatten())
if (val_session+1)%400 == 0:
sample_sup = labels[0,(10-num_support[0]):10].long().numpy().flatten()
sample_que = y_numpy[0,:num_query[0]].astype(int)
sample_pred = y_pred[0,:num_query[0]]
sample_prob = y_prob[0,:num_query[0]]
tqdm.write("S:" + np.array2string(sample_sup) +'\n'+
"Q:" + np.array2string(sample_que) + '\n' +
"P:" + np.array2string(sample_pred) + '\n' +
"prob:" + np.array2string(sample_prob))
tqdm.write("val_session:{0:} vloss:{1:.6f} vacc:{2:.4f}".format(val_session,loss.item(), total_vcorrects/total_vquery))
del loss, y_hat_que, x # Restore GPU memory
# Avg.Acc
if eval_mode==1:
aacc = evaluate(submit, gt)
tqdm.write("AACC={0:.6f}, FirstAcc={1:.6f}".format(aacc[0], aacc[1]))
hist_vloss.append(total_vloss/val_session)
hist_vacc.append(total_vcorrects/total_vquery)
return submit
# Main
def main():
# Trainset stats: 2072002577 items from 124950714 sessions
print('Initializing dataloader...')
mtrain_loader = SpotifyDataloader(config_fpath=args.config,
mtrain_mode=True,
data_sel=(0, 99965071), # 80% 트레인
batch_size=TR_BATCH_SZ,
shuffle=True,
seq_mode=True) # seq_mode implemented
mval_loader = SpotifyDataloader(config_fpath=args.config,
mtrain_mode=True, # True, because we use part of trainset as testset
data_sel=(99965071, 104965071),#(99965071, 124950714), # 20%를 테스트
batch_size=TS_BATCH_SZ,
shuffle=False,
seq_mode=True)
# Init neural net
SM = SeqModel().cuda(GPU)
SM_optim = torch.optim.Adam(SM.parameters(), lr=LEARNING_RATE)
SM_scheduler = StepLR(SM_optim, step_size=1, gamma=0.8)
# Load checkpoint
if args.load_continue_latest is None:
START_EPOCH = 0
else:
latest_fpath = max(glob.iglob(MODEL_SAVE_PATH + "check*.pth"),key=os.path.getctime)
checkpoint = torch.load(latest_fpath, map_location='cuda:{}'.format(GPU))
tqdm.write("Loading saved model from '{0:}'... loss: {1:.6f}".format(latest_fpath,checkpoint['loss']))
SM.load_state_dict(checkpoint['SM_state'])
SM_optim.load_state_dict(checkpoint['SM_opt_state'])
SM_scheduler.load_state_dict(checkpoint['SM_sch_state'])
START_EPOCH = checkpoint['ep']
# Train
for epoch in trange(START_EPOCH, EPOCHS, desc='epochs', position=0, ascii=True):
tqdm.write('Train...')
tr_sessions_iter = iter(mtrain_loader)
total_corrects = 0
total_query = 0
total_trloss = 0
for session in trange(len(tr_sessions_iter), desc='sessions', position=1, ascii=True):
SM.train();
x, labels, y_mask, num_items, index = tr_sessions_iter.next() # FIXED 13.Dec. SEPARATE LOGS. QUERY SHOULT NOT INCLUDE LOGS
# Sample data for 'support' and 'query': ex) 15 items = 7 sup, 8 queries...
num_support = num_items[:,0].detach().numpy().flatten() # If num_items was odd number, query has one more item.
num_query = num_items[:,1].detach().numpy().flatten()
#batch_sz = num_items.shape[0]
# x: the first 10 items out of 20 are support items left-padded with zeros. The last 10 are queries right-padded.
x = x.permute(0,2,1) # bx70*20
x_sup = Variable(torch.cat((x[:,:,:10], labels[:,:10].unsqueeze(1)), 1)).cuda(GPU) # bx71(41+29+1)*10
x_que = Variable(x[:,41:,10:].clone()).cuda(GPU) # bx29*10
# y
y_que = labels[:,10:].clone() # bx10
# y_mask
y_mask_que = y_mask[:,10:].clone()
# Forward & update
y_hat_que, att = SM(x_sup, x_que) # y_hat_que: b*10, att: bx10*10
# Calcultate BCE loss
loss = F.binary_cross_entropy_with_logits(input=y_hat_que*y_mask_que.cuda(GPU), target=y_que.cuda(GPU)*y_mask_que.cuda(GPU))
total_trloss += loss.item()
SM.zero_grad()
loss.backward()
# Gradient Clipping
#torch.nn.utils.clip_grad_norm_(SM.parameters(), 0.5)
SM_optim.step()
# Decision
y_prob = torch.sigmoid(y_hat_que*y_mask_que.cuda(GPU)).detach().cpu().numpy() # bx20
y_pred = (y_prob>=0.5).astype(np.int) # bx10
y_numpy = labels[:,10:].numpy() # bx10
# Acc
total_corrects += np.sum((y_pred==y_numpy)*y_mask_que.numpy())
total_query += np.sum(num_query)
# Restore GPU memory
del loss, y_hat_que
if (session+1)%500 == 0:
hist_trloss.append(total_trloss/900)
hist_tracc.append(total_corrects/total_query)
# Prepare display
sample_att = att[0,(10-num_support[0]):10,:num_query[0]].detach().cpu().numpy()
sample_sup = labels[0,(10-num_support[0]):10].long().numpy().flatten()
sample_que = y_numpy[0,:num_query[0]].astype(int)
sample_pred = y_pred[0,:num_query[0]]
sample_prob = y_prob[0,:num_query[0]]
tqdm.write(np.array2string(sample_att,
formatter={'float_kind':lambda sample_att: "%.2f" % sample_att}).replace('\n ','').replace('][',']\n[').replace('[[','['))
tqdm.write("S:" + np.array2string(sample_sup) +'\n'+
"Q:" + np.array2string(sample_que) + '\n' +
"P:" + np.array2string(sample_pred) + '\n' +
"prob:" + np.array2string(sample_prob))
tqdm.write("tr_session:{0:} tr_loss:{1:.6f} tr_acc:{2:.4f}".format(session, hist_trloss[-1], hist_tracc[-1]))
total_corrects = 0
total_query = 0
total_trloss = 0
if (session+1)%25000 == 0:
# Validation
validate(mval_loader, SM, eval_mode=True, GPU=GPU)
# Save
torch.save({'ep': epoch, 'sess':session, 'SM_state': SM.state_dict(),'loss': hist_trloss[-1], 'hist_vacc': hist_vacc,
'hist_vloss': hist_vloss, 'hist_trloss': hist_trloss, 'SM_opt_state': SM_optim.state_dict(),
'SM_sch_state': SM_scheduler.state_dict()}, MODEL_SAVE_PATH + "check_{0:}_{1:}.pth".format(epoch, session))
# Validation
validate(mval_loader, SM, eval_mode=True, GPU=GPU)
# Save
torch.save({'ep': epoch, 'sess':session, 'SM_state': SM.state_dict(),'loss': hist_trloss[-1], 'hist_vacc': hist_vacc,
'hist_vloss': hist_vloss, 'hist_trloss': hist_trloss, 'SM_opt_state': SM_optim.state_dict(),
'SM_sch_state': SM_scheduler.state_dict()}, MODEL_SAVE_PATH + "check_{0:}_{1:}.pth".format(epoch, session))
SM_scheduler.step()
if __name__ == '__main__':
main()
| 45.522796
| 165
| 0.584964
|
537f51557f500c63adfc167d393678bf972d3b56
| 9,326
|
py
|
Python
|
tick/hawkes/inference/hawkes_sumexpkern_fixeddecay.py
|
sumau/tick
|
1b56924a35463e12f7775bc0aec182364f26f2c6
|
[
"BSD-3-Clause"
] | 411
|
2017-03-30T15:22:05.000Z
|
2022-03-27T01:58:34.000Z
|
tick/hawkes/inference/hawkes_sumexpkern_fixeddecay.py
|
saurabhdash/tick
|
bbc561804eb1fdcb4c71b9e3e2d83a66e7b13a48
|
[
"BSD-3-Clause"
] | 345
|
2017-04-13T14:53:20.000Z
|
2022-03-26T00:46:22.000Z
|
tick/hawkes/inference/hawkes_sumexpkern_fixeddecay.py
|
saurabhdash/tick
|
bbc561804eb1fdcb4c71b9e3e2d83a66e7b13a48
|
[
"BSD-3-Clause"
] | 102
|
2017-04-25T11:47:53.000Z
|
2022-02-15T11:45:49.000Z
|
# License: BSD 3 clause
import numpy as np
from tick.base import actual_kwargs
from tick.hawkes import (ModelHawkesSumExpKernLeastSq, SimuHawkesSumExpKernels)
from tick.hawkes.inference.base import LearnerHawkesParametric
from tick.prox import ProxElasticNet, ProxL1, ProxL2Sq, ProxPositive
class HawkesSumExpKern(LearnerHawkesParametric):
"""Hawkes process learner for sum-exponential kernels with fixed and given
decays, with many choices of penalization and solvers.
Hawkes processes are point processes defined by the intensity:
.. math::
\\forall i \\in [1 \\dots D], \\quad
\\lambda_i(t) = \\mu_i(t) + \\sum_{j=1}^D
\\sum_{t_k^j < t} \\phi_{ij}(t - t_k^j)
where
* :math:`D` is the number of nodes
* :math:`\mu_i(t)` are the baseline intensities
* :math:`\phi_{ij}` are the kernels
* :math:`t_k^j` are the timestamps of all events of node :math:`j`
and with an sum-exponential parametrisation of the kernels
.. math::
\phi_{ij}(t) = \sum_{u=1}^{U} \\alpha^u_{ij} \\beta^u
\exp (- \\beta^u t) 1_{t > 0}
In our implementation we denote:
* Integer :math:`D` by the attribute `n_nodes`
* Integer :math:`U` by the attribute `n_decays`
* Vector :math:`\mu \in \mathbb{R}^{D}` by the attribute
`baseline`
* Matrix :math:`A = (\\alpha^u_{ij})_{ij} \in \mathbb{R}^{D \\times D
\\times U}` by the attribute `adjacency`
* Vector :math:`\\beta \in \mathbb{R}^{U}` by the
parameter `decays`. This parameter is given to the model
Parameters
----------
decays : `np.ndarray`, shape=(n_decays, )
The decays used in the exponential kernels.
n_baselines : `int`, default=1
In this hawkes learner baseline is supposed to be either constant or
piecewise constant. If `n_baseline > 1` then piecewise constant
setting is enabled. In this case :math:`\\mu_i(t)` is piecewise
constant on intervals of size `period_length / n_baselines` and
periodic.
period_length : `float`, default=None
In piecewise constant setting this denotes the period of the
piecewise constant baseline function.
C : `float`, default=1e3
Level of penalization
penalty : {'l1', 'l2', 'elasticnet', 'none'} default='l2'
The penalization to use. Default is ridge penalization.
solver : {'gd', 'agd', 'bfgs', 'svrg'}, default='agd'
The name of the solver to use
step : `float`, default=None
Initial step size used for learning. Used in 'gd', 'agd', 'sgd'
and 'svrg' solvers
tol : `float`, default=1e-5
The tolerance of the solver (iterations stop when the stopping
criterion is below it). If not reached the solver does ``max_iter``
iterations
max_iter : `int`, default=100
Maximum number of iterations of the solver
verbose : `bool`, default=False
If `True`, we verbose things, otherwise the solver does not
print anything (but records information in history anyway)
print_every : `int`, default=10
Print history information when ``n_iter`` (iteration number) is
a multiple of ``print_every``
record_every : `int`, default=10
Record history information when ``n_iter`` (iteration number) is
a multiple of ``record_every``
elastic_net_ratio : `float`, default=0.95
Ratio of elastic net mixing parameter with 0 <= ratio <= 1.
* For ratio = 0 this is ridge (L2 squared) regularization.
* For ratio = 1 this is lasso (L1) regularization.
* For 0 < ratio < 1, the regularization is a linear combination
of L1 and L2.
Used in 'elasticnet' penalty
random_state : int seed, or None (default)
The seed that will be used by stochastic solvers. If `None`, a random
seed will be used (based on timestamp and other physical metrics).
Used in 'sgd', and 'svrg' solvers
Attributes
----------
n_nodes : `int`
Number of nodes / components in the Hawkes model
baseline : `np.array`, shape=(n_nodes,)
Inferred baseline of each component's intensity
adjacency : `np.ndarray`, shape=(n_nodes, n_nodes, n_decays)
Inferred adjacency matrix
coeffs : `np.array`, shape=(n_nodes + n_nodes * n_nodes * n_decays, )
Raw coefficients of the model. Row stack of `self.baseline` and
`self.adjacency`
"""
_attrinfos = {
"n_decays": {
"writable": False
},
"decays": {
"writable": False
},
"n_baselines": {
"writable": False
},
"period_length": {
"writable": False
},
}
_penalties = {
"none": ProxPositive,
"l1": ProxL1,
"l2": ProxL2Sq,
"elasticnet": ProxElasticNet,
}
@actual_kwargs
def __init__(self, decays, penalty="l2", C=1e3, n_baselines=1,
period_length=None, solver="agd", step=None, tol=1e-5,
max_iter=100, verbose=False, print_every=10, record_every=10,
elastic_net_ratio=0.95, random_state=None):
self._actual_kwargs = \
HawkesSumExpKern.__init__.actual_kwargs
self.decays = decays
self.n_baselines = n_baselines
self.period_length = period_length
LearnerHawkesParametric.__init__(
self, penalty=penalty, C=C, solver=solver, step=step, tol=tol,
max_iter=max_iter, verbose=verbose, print_every=print_every,
record_every=record_every, elastic_net_ratio=elastic_net_ratio,
random_state=random_state)
def _construct_model_obj(self):
model = ModelHawkesSumExpKernLeastSq(self.decays,
n_baselines=self.n_baselines,
period_length=self.period_length)
return model
@property
def n_decays(self):
return self._model_obj.n_decays
@property
def baseline(self):
if not self._fitted:
raise ValueError('You must fit data before getting estimated '
'baseline')
else:
baseline = self.coeffs[:self.n_nodes * self._model_obj.n_baselines]
if self._model_obj.n_baselines == 1:
return baseline
else:
return baseline.reshape((self.n_nodes,
self._model_obj.n_baselines))
@property
def adjacency(self):
if not self._fitted:
raise ValueError('You must fit data before getting estimated '
'adjacency')
else:
return self.coeffs[self.n_nodes * self._model_obj.n_baselines:] \
.reshape((self.n_nodes, self.n_nodes, self.n_decays))
def _corresponding_simu(self):
return SimuHawkesSumExpKernels(
adjacency=self.adjacency, decays=self.decays,
baseline=self.baseline,
period_length=self._model_obj.period_length)
def get_baseline_values(self, i, abscissa_array):
return self._corresponding_simu().get_baseline_values(
i, abscissa_array)
def score(self, events=None, end_times=None, baseline=None,
adjacency=None):
"""Compute score metric
Score metric is log likelihood (the higher the better)
Parameters
----------
events : `list` of `list` of `np.ndarray`, default = None
List of Hawkes processes realizations used to measure score.
Each realization of the Hawkes process is a list of n_node for
each component of the Hawkes. Namely `events[i][j]` contains a
one-dimensional `numpy.array` of the events' timestamps of
component j of realization i.
If only one realization is given, it will be wrapped into a list
If None, events given while fitting model will be used
end_times : `np.ndarray` or `float`, default = None
List of end time of all hawkes processes used to measure score.
If None, it will be set to each realization's latest time.
If only one realization is provided, then a float can be given.
baseline : `np.ndarray`, shape=(n_nodes, ), default = None
Baseline vector for which the score is measured
If `None` baseline obtained during fitting is used
adjacency : `np.ndarray`, shape=(n_nodes, n_nodes, n_decays), default = None
Adjacency matrix for which the score is measured
If `None` adjacency obtained during fitting is used
Returns
-------
likelihood : `double`
Computed log likelihood value
"""
if baseline is not None or adjacency is not None:
if baseline is None:
baseline = self.baseline
if adjacency is None:
adjacency = self.adjacency
coeffs = np.hstack((baseline, adjacency.ravel()))
else:
coeffs = None
return LearnerHawkesParametric.score(
self, events=events, end_times=end_times, coeffs=coeffs)
| 36.716535
| 84
| 0.611087
|
57dea3b6a9fcbefa44593b129ed848e6bbe893f3
| 406
|
py
|
Python
|
CTAtool/cache.py
|
guan27/NTU_NLP_2021Spring
|
a79451bf1d107a5d75dfa959ecc008b3829becc8
|
[
"MIT"
] | null | null | null |
CTAtool/cache.py
|
guan27/NTU_NLP_2021Spring
|
a79451bf1d107a5d75dfa959ecc008b3829becc8
|
[
"MIT"
] | null | null | null |
CTAtool/cache.py
|
guan27/NTU_NLP_2021Spring
|
a79451bf1d107a5d75dfa959ecc008b3829becc8
|
[
"MIT"
] | 1
|
2021-09-29T10:47:14.000Z
|
2021-09-29T10:47:14.000Z
|
'''
Cached words with lemmas that have been looked up before
Use a dictionary lookup if the word is in cache_dict
else find the word and add it into the cache_dict
'''
import json
def init():
global cache_dict
cache_dict = {}
def save_synonym_dict():
with open('./synonym_dict', 'w', encoding = 'utf-8') as f:
f.write(
json.dumps(cache_dict,ensure_ascii=False, indent = 2))
| 29
| 66
| 0.684729
|
a4be4402db5e8948e7572b20983b2d63d21cfd9c
| 11,313
|
py
|
Python
|
update.py
|
Hmaksu/Zoom-Duration-Calculator
|
118dbc17997b54f398914fb399ca2c882b0d0969
|
[
"MIT"
] | null | null | null |
update.py
|
Hmaksu/Zoom-Duration-Calculator
|
118dbc17997b54f398914fb399ca2c882b0d0969
|
[
"MIT"
] | null | null | null |
update.py
|
Hmaksu/Zoom-Duration-Calculator
|
118dbc17997b54f398914fb399ca2c882b0d0969
|
[
"MIT"
] | null | null | null |
from tkinter import *
from tkinter import filedialog
from tkinter.ttk import *
import os
import xlrd
import xlsxwriter
root = Tk()
root.title("CivilCon")
root.iconbitmap("CC.ico")
root.geometry("500x500")
class CivilCon:
def __init__(self, master): #First Page
self.master = master
Label(self.master, text = "Kaç oturum var?").grid(row = 0, column = 0)
self.clicked = StringVar()
OptionMenu(self.master, self.clicked, "1", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10").grid( row = 0, column = 1)
Button(self.master, text = "Seç", command = self.session).grid(row = 0, column = 4)
Button(self.master, text = "Excel Dosyası", command = self.Excel).grid(row = 0, column = 3)
def Excel(self):
self.attachment_file_directory = filedialog.askopenfilename(initialdir = os.path, title = "Excel")
def session(self):
try:
if self.attachment_file_directory[-3:] == "xls":
for widget in self.master.winfo_children():
widget.destroy()
variables_for_dict = []
key_of_dict = []
for x in range(int(self.clicked.get())):
variables_for_dict.append("self.clicked_version1"+str(x))
variables_for_dict.append("self.clicked_version2"+str(x))
variables_for_dict.append("self.clicked_version3"+str(x))
variables_for_dict.append("self.clicked_version4"+str(x))
key_of_dict.append(StringVar())
key_of_dict.append(StringVar())
key_of_dict.append(StringVar())
key_of_dict.append(StringVar())
self.variable_dictionary = dict(zip(variables_for_dict, key_of_dict))
Label(self.master, text = "Başlangıç").grid(row = 0, column = 1)
Label(self.master, text = "|").grid(row = 0, column = 3)
Label(self.master, text = "Bitiş").grid(row = 0, column = 4)
Label(self.master, text = "Saat").grid(row = 1, column = 1)
Label(self.master, text = "Dakika").grid(row = 1, column = 2)
Label(self.master, text = "|").grid(row = 1, column = 3)
Label(self.master, text = "Saat").grid(row = 1, column = 4)
Label(self.master, text = "Dakika").grid(row = 1, column = 5)
for x in range(int(self.clicked.get())):
Label(self.master, text = str(x+1) + ". Oturum").grid(row = x+2, column = 0)
OptionMenu(self.master, self.variable_dictionary["self.clicked_version1"+str(x)] , "01", "01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24").grid( row = x+2, column = 1)
OptionMenu(self.master, self.variable_dictionary["self.clicked_version2"+str(x)], "00", "00", "01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", "30", "31", "32", "33", "34", "35", "36", "37", "38", "39", "40", "41", "42", "43", "44", "45", "46", "47", "48", "49", "50", "51", "52", "53", "54", "55", "56", "57", "58", "59").grid( row = x+2, column = 2)
Label(self.master, text = "|").grid(row = x+2, column = 3)
OptionMenu(self.master, self.variable_dictionary["self.clicked_version3"+str(x)], "01", "01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24").grid( row = x+2, column = 4)
OptionMenu(self.master, self.variable_dictionary["self.clicked_version4"+str(x)], "00", "00", "01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", "30", "31", "32", "33", "34", "35", "36", "37", "38", "39", "40", "41", "42", "43", "44", "45", "46", "47", "48", "49", "50", "51", "52", "53", "54", "55", "56", "57", "58", "59").grid( row = x+2, column = 5)
Button(self.master, text = "Başlat", command = self.start).grid(row = int(self.clicked.get())+10, column = 5)
else:
self.Excel()
except:
self.Excel()
def start(self):
sessions = []
for k, v in self.variable_dictionary.items():
sessions.append(v.get())
sessions_vol2 = []
for x in range(len(sessions)):
if x%2 == 0:
try:
sessions_vol2.append(sessions[x]+":"+sessions[x+1])
except:
sessions_vol2.append(sessions[-2]+":"+sessions[-1])
sessions = sessions_vol2
try:
path = self.attachment_file_directory
except:
self.Excel()
for widget in self.master.winfo_children():
widget.destroy()
Label(self.master, text = "Kaç oturum var?").grid(row = 0, column = 0)
self.clicked = StringVar()
OptionMenu(self.master, self.clicked, "1", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10").grid( row = 0, column = 1)
Button(self.master, text = "Seç", command = self.session).grid(row = 0, column = 4)
Button(self.master, text = "Excel Dosyası", command = self.Excel).grid(row = 0, column = 3)
attendees = []
inputWorkbook = xlrd.open_workbook(path)
inputWorksheet = inputWorkbook.sheet_by_index(0)
for x in range(inputWorksheet.nrows-4):
x += 4
attendees.append(inputWorksheet.cell_value(x,0))
attendees.sort()
attendees_list_form = []
for x in attendees:
x = "CC | "+x
attendees_list_form.append(x.split(","))
for x in attendees_list_form:
for k in range(len(x)):
if x[k] == "":
x[k] = "info@hmaksu.com"
attendees_vol2 = []
k = 0
for x in range(len(attendees_list_form)):
attendees_list_form[x].pop()
attendees_list_form[x].pop()
try:
if attendees_list_form[x][0] == attendees_list_form[x+1][0] or attendees_list_form[x][1] == attendees_list_form[x+1][1]:
k += 1
continue
else:
if k == 0:
attendee = attendees_list_form[x]
attendee.sort()
attendees_vol2.append(attendees_list_form[x])
else:
attendee = attendees_list_form[x]
for t in range(k):
if k == t:
continue
else:
t += 1
attendee.append(attendees_list_form[x-t][-1])
attendee.append(attendees_list_form[x-t][-2])
attendee.sort()
attendees_vol2.append(attendee)
k = 0
except:
if k == 0:
attendee = attendees_list_form[x]
attendee.sort()
attendees_vol2.append(attendees_list_form[x])
else:
attendee = attendees_list_form[x]
for t in range(k):
if k == t:
continue
else:
t += 1
attendee.append(attendees_list_form[x-t][-1])
attendee.append(attendees_list_form[x-t][-2])
attendee.sort()
attendees_vol2.append(attendee)
attendee = []
attendees = []
attendee_vol3 = []
attendees_vol3 = []
for x in attendees_vol2:
attendee.append(x[-2])
attendee.append(x[-1])
attendee.append(x[0].split()[1])
attendee.append(x[-3].split()[1])
attendees.append(attendee)
attendee = []
attendee_vol3.append(x[-2])
attendee_vol3.append(x[-1])
for t in x:
if x[-2] == t or x[-1] == t:
continue
else:
attendee_vol3.append(t.split()[1])
attendees_vol3.append(attendee_vol3)
attendee_vol3 = []
outworkbook = xlsxwriter.Workbook("Sheet.xlsx")
outworksheet = outworkbook.add_worksheet()
outworksheet.write(0, 0, "İsim-Soyisim")
outworksheet.write(0, 1, "E-Posta Adresi")
sessions_vol2 = []
for x in range(len(sessions)):
try:
if x%2 == 0:
sessions_vol2.append(sessions[x]+" - "+sessions[x+1])
except:
sessions_vol2.append(sessions[-2]+" - "+sessions[-1])
sessions = sessions_vol2
for x in range(len(sessions)):
outworksheet.write(0, x+2, str(x+1)+". Oturum "+sessions[x])
for x in range(len(attendees)):
for k in range(len(attendees[x])):
if k < 2:
outworksheet.write(x+1, k, attendees[x][k])
for t in range(len(sessions)):
#print("="*30)
#print(attendees[x][3])
#print(attendees[x][2])
#print(sessions[t])
#print("="*30)
if int(attendees[x][3].replace(":","")[:-2]) < int(sessions[t].replace(":","")[:-7]) or int(attendees[x][2].replace(":","")[:-2]) > int(sessions[t].replace(":","")[7:]):
outworksheet.write(x+1, t+2, "Katılmadı")
else:
outworksheet.write(x+1, t+2, "Katıldı")
outworksheet.write(0, len(sessions)+2, "Toplam Süre")
for x in range(len(attendees_vol3)):
total_time = 0
for t in range(len(attendees_vol3[x])):
if t == 0 or t == 1:
continue
elif t%2 != 0:
total_time += int(attendees_vol3[x][t].replace(":","")[:2])*60+int(attendees_vol3[x][t].replace(":","")[2:4])-int(attendees_vol3[x][t-1].replace(":","")[:2])*60-int(attendees_vol3[x][t-1].replace(":","")[2:4])
outworksheet.write(x+1, len(sessions)+2, str(total_time))
outworkbook.close()
e = CivilCon(root)
root.mainloop()
| 45.616935
| 497
| 0.455847
|
1cce0638b2241e788a43951c2ed4fa252db27334
| 3,554
|
py
|
Python
|
wagtail/admin/widgets/datetime.py
|
wgarlock/wagtail
|
1bfc13952f5ffc0e40a4435d15a5aefd70984430
|
[
"BSD-3-Clause"
] | 2
|
2021-03-18T21:41:05.000Z
|
2021-03-18T21:41:08.000Z
|
wagtail/admin/widgets/datetime.py
|
wgarlock/wagtail
|
1bfc13952f5ffc0e40a4435d15a5aefd70984430
|
[
"BSD-3-Clause"
] | 13
|
2015-05-08T12:27:10.000Z
|
2020-01-23T14:45:57.000Z
|
wagtail/admin/widgets/datetime.py
|
wgarlock/wagtail
|
1bfc13952f5ffc0e40a4435d15a5aefd70984430
|
[
"BSD-3-Clause"
] | 1
|
2021-02-15T18:59:53.000Z
|
2021-02-15T18:59:53.000Z
|
import json
from django import forms
from django.conf import settings
from django.forms import widgets
from django.utils.formats import get_format
from wagtail.admin.datetimepicker import to_datetimepicker_format
from wagtail.admin.staticfiles import versioned_static
DEFAULT_DATE_FORMAT = '%Y-%m-%d'
DEFAULT_DATETIME_FORMAT = '%Y-%m-%d %H:%M'
DEFAULT_TIME_FORMAT = '%H:%M'
class AdminDateInput(widgets.DateInput):
template_name = 'wagtailadmin/widgets/date_input.html'
def __init__(self, attrs=None, format=None):
default_attrs = {'autocomplete': 'off'}
fmt = format
if attrs:
default_attrs.update(attrs)
if fmt is None:
fmt = getattr(settings, 'WAGTAIL_DATE_FORMAT', DEFAULT_DATE_FORMAT)
self.js_format = to_datetimepicker_format(fmt)
super().__init__(attrs=default_attrs, format=fmt)
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
config = {
'dayOfWeekStart': get_format('FIRST_DAY_OF_WEEK'),
'format': self.js_format,
}
context['widget']['config_json'] = json.dumps(config)
return context
@property
def media(self):
return forms.Media(js=[
versioned_static('wagtailadmin/js/date-time-chooser.js'),
])
class AdminTimeInput(widgets.TimeInput):
template_name = 'wagtailadmin/widgets/time_input.html'
def __init__(self, attrs=None, format=None):
default_attrs = {'autocomplete': 'off'}
if attrs:
default_attrs.update(attrs)
fmt = format
if fmt is None:
fmt = getattr(settings, 'WAGTAIL_TIME_FORMAT', DEFAULT_TIME_FORMAT)
self.js_format = to_datetimepicker_format(fmt)
super().__init__(attrs=default_attrs, format=fmt)
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
config = {
'format': self.js_format,
'formatTime': self.js_format
}
context['widget']['config_json'] = json.dumps(config)
return context
@property
def media(self):
return forms.Media(js=[
versioned_static('wagtailadmin/js/date-time-chooser.js'),
])
class AdminDateTimeInput(widgets.DateTimeInput):
template_name = 'wagtailadmin/widgets/datetime_input.html'
def __init__(self, attrs=None, format=None, time_format=None):
default_attrs = {'autocomplete': 'off'}
fmt = format
if attrs:
default_attrs.update(attrs)
if fmt is None:
fmt = getattr(settings, 'WAGTAIL_DATETIME_FORMAT', DEFAULT_DATETIME_FORMAT)
time_fmt = time_format
if time_fmt is None:
time_fmt = getattr(settings, 'WAGTAIL_TIME_FORMAT', DEFAULT_TIME_FORMAT)
self.js_format = to_datetimepicker_format(fmt)
self.js_time_format = to_datetimepicker_format(time_fmt)
super().__init__(attrs=default_attrs, format=fmt)
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
config = {
'dayOfWeekStart': get_format('FIRST_DAY_OF_WEEK'),
'format': self.js_format,
'formatTime': self.js_time_format
}
context['widget']['config_json'] = json.dumps(config)
return context
@property
def media(self):
return forms.Media(js=[
versioned_static('wagtailadmin/js/date-time-chooser.js'),
])
| 31.451327
| 87
| 0.647721
|
844754bd1e512eea8ba72baf256447ea1ef68a5f
| 7,863
|
py
|
Python
|
kubernetes/client/models/v1_config_map_node_config_source.py
|
L3T/python
|
b6e4ae81a2afb49f668a142eb7d1c6e2571ef478
|
[
"Apache-2.0"
] | 2
|
2020-06-21T08:03:18.000Z
|
2020-06-21T09:53:29.000Z
|
kubernetes/client/models/v1_config_map_node_config_source.py
|
L3T/python
|
b6e4ae81a2afb49f668a142eb7d1c6e2571ef478
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/client/models/v1_config_map_node_config_source.py
|
L3T/python
|
b6e4ae81a2afb49f668a142eb7d1c6e2571ef478
|
[
"Apache-2.0"
] | 1
|
2020-06-21T08:03:17.000Z
|
2020-06-21T08:03:17.000Z
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: release-1.16
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class V1ConfigMapNodeConfigSource(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'kubelet_config_key': 'str',
'name': 'str',
'namespace': 'str',
'resource_version': 'str',
'uid': 'str'
}
attribute_map = {
'kubelet_config_key': 'kubeletConfigKey',
'name': 'name',
'namespace': 'namespace',
'resource_version': 'resourceVersion',
'uid': 'uid'
}
def __init__(self, kubelet_config_key=None, name=None, namespace=None, resource_version=None, uid=None): # noqa: E501
"""V1ConfigMapNodeConfigSource - a model defined in OpenAPI""" # noqa: E501
self._kubelet_config_key = None
self._name = None
self._namespace = None
self._resource_version = None
self._uid = None
self.discriminator = None
self.kubelet_config_key = kubelet_config_key
self.name = name
self.namespace = namespace
if resource_version is not None:
self.resource_version = resource_version
if uid is not None:
self.uid = uid
@property
def kubelet_config_key(self):
"""Gets the kubelet_config_key of this V1ConfigMapNodeConfigSource. # noqa: E501
KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure This field is required in all cases. # noqa: E501
:return: The kubelet_config_key of this V1ConfigMapNodeConfigSource. # noqa: E501
:rtype: str
"""
return self._kubelet_config_key
@kubelet_config_key.setter
def kubelet_config_key(self, kubelet_config_key):
"""Sets the kubelet_config_key of this V1ConfigMapNodeConfigSource.
KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure This field is required in all cases. # noqa: E501
:param kubelet_config_key: The kubelet_config_key of this V1ConfigMapNodeConfigSource. # noqa: E501
:type: str
"""
if kubelet_config_key is None:
raise ValueError("Invalid value for `kubelet_config_key`, must not be `None`") # noqa: E501
self._kubelet_config_key = kubelet_config_key
@property
def name(self):
"""Gets the name of this V1ConfigMapNodeConfigSource. # noqa: E501
Name is the metadata.name of the referenced ConfigMap. This field is required in all cases. # noqa: E501
:return: The name of this V1ConfigMapNodeConfigSource. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1ConfigMapNodeConfigSource.
Name is the metadata.name of the referenced ConfigMap. This field is required in all cases. # noqa: E501
:param name: The name of this V1ConfigMapNodeConfigSource. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def namespace(self):
"""Gets the namespace of this V1ConfigMapNodeConfigSource. # noqa: E501
Namespace is the metadata.namespace of the referenced ConfigMap. This field is required in all cases. # noqa: E501
:return: The namespace of this V1ConfigMapNodeConfigSource. # noqa: E501
:rtype: str
"""
return self._namespace
@namespace.setter
def namespace(self, namespace):
"""Sets the namespace of this V1ConfigMapNodeConfigSource.
Namespace is the metadata.namespace of the referenced ConfigMap. This field is required in all cases. # noqa: E501
:param namespace: The namespace of this V1ConfigMapNodeConfigSource. # noqa: E501
:type: str
"""
if namespace is None:
raise ValueError("Invalid value for `namespace`, must not be `None`") # noqa: E501
self._namespace = namespace
@property
def resource_version(self):
"""Gets the resource_version of this V1ConfigMapNodeConfigSource. # noqa: E501
ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status. # noqa: E501
:return: The resource_version of this V1ConfigMapNodeConfigSource. # noqa: E501
:rtype: str
"""
return self._resource_version
@resource_version.setter
def resource_version(self, resource_version):
"""Sets the resource_version of this V1ConfigMapNodeConfigSource.
ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status. # noqa: E501
:param resource_version: The resource_version of this V1ConfigMapNodeConfigSource. # noqa: E501
:type: str
"""
self._resource_version = resource_version
@property
def uid(self):
"""Gets the uid of this V1ConfigMapNodeConfigSource. # noqa: E501
UID is the metadata.UID of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status. # noqa: E501
:return: The uid of this V1ConfigMapNodeConfigSource. # noqa: E501
:rtype: str
"""
return self._uid
@uid.setter
def uid(self, uid):
"""Sets the uid of this V1ConfigMapNodeConfigSource.
UID is the metadata.UID of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status. # noqa: E501
:param uid: The uid of this V1ConfigMapNodeConfigSource. # noqa: E501
:type: str
"""
self._uid = uid
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ConfigMapNodeConfigSource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 34.186957
| 172
| 0.634109
|
a78e736afad421de8fdbbede8643e00ba7727de2
| 36,123
|
py
|
Python
|
saleor/graphql/product/tests/test_category.py
|
fairhopeweb/saleor
|
9ac6c22652d46ba65a5b894da5f1ba5bec48c019
|
[
"CC-BY-4.0"
] | 15,337
|
2015-01-12T02:11:52.000Z
|
2021-10-05T19:19:29.000Z
|
saleor/graphql/product/tests/test_category.py
|
fairhopeweb/saleor
|
9ac6c22652d46ba65a5b894da5f1ba5bec48c019
|
[
"CC-BY-4.0"
] | 7,486
|
2015-02-11T10:52:13.000Z
|
2021-10-06T09:37:15.000Z
|
saleor/graphql/product/tests/test_category.py
|
fairhopeweb/saleor
|
9ac6c22652d46ba65a5b894da5f1ba5bec48c019
|
[
"CC-BY-4.0"
] | 5,864
|
2015-01-16T14:52:54.000Z
|
2021-10-05T23:01:15.000Z
|
import os
from unittest.mock import Mock, patch
import graphene
import pytest
from django.utils.text import slugify
from graphql_relay import to_global_id
from ....product.error_codes import ProductErrorCode
from ....product.models import Category, Product, ProductChannelListing
from ....product.tests.utils import create_image, create_pdf_file_with_image_ext
from ....tests.utils import dummy_editorjs
from ...tests.utils import (
get_graphql_content,
get_graphql_content_from_response,
get_multipart_request_body,
)
QUERY_CATEGORY = """
query ($id: ID, $slug: String, $channel: String){
category(
id: $id,
slug: $slug,
) {
id
name
ancestors(first: 20) {
edges {
node {
name
}
}
}
children(first: 20) {
edges {
node {
name
}
}
}
products(first: 10, channel: $channel) {
edges {
node {
id
}
}
}
}
}
"""
def test_category_query_by_id(user_api_client, product, channel_USD):
category = Category.objects.first()
variables = {
"id": graphene.Node.to_global_id("Category", category.pk),
"channel": channel_USD.slug,
}
response = user_api_client.post_graphql(QUERY_CATEGORY, variables=variables)
content = get_graphql_content(response)
category_data = content["data"]["category"]
assert category_data is not None
assert category_data["name"] == category.name
assert len(category_data["ancestors"]["edges"]) == category.get_ancestors().count()
assert len(category_data["children"]["edges"]) == category.get_children().count()
def test_category_query_invalid_id(user_api_client, product, channel_USD):
category_id = "'"
variables = {
"id": category_id,
"channel": channel_USD.slug,
}
response = user_api_client.post_graphql(QUERY_CATEGORY, variables)
content = get_graphql_content_from_response(response)
assert len(content["errors"]) == 1
assert content["errors"][0]["message"] == f"Couldn't resolve id: {category_id}."
assert content["data"]["category"] is None
def test_category_query_object_with_given_id_does_not_exist(
user_api_client, product, channel_USD
):
category_id = graphene.Node.to_global_id("Category", -1)
variables = {
"id": category_id,
"channel": channel_USD.slug,
}
response = user_api_client.post_graphql(QUERY_CATEGORY, variables)
content = get_graphql_content(response)
assert content["data"]["category"] is None
def test_category_query_object_with_invalid_object_type(
user_api_client, product, channel_USD
):
category = Category.objects.first()
category_id = graphene.Node.to_global_id("Product", category.pk)
variables = {
"id": category_id,
"channel": channel_USD.slug,
}
response = user_api_client.post_graphql(QUERY_CATEGORY, variables)
content = get_graphql_content(response)
assert content["data"]["category"] is None
def test_category_query_description(user_api_client, product, channel_USD):
category = Category.objects.first()
description = dummy_editorjs("Test description.", json_format=True)
category.description = dummy_editorjs("Test description.")
category.save()
variables = {
"id": graphene.Node.to_global_id("Category", category.pk),
"channel": channel_USD.slug,
}
query = """
query ($id: ID, $slug: String){
category(
id: $id,
slug: $slug,
) {
id
name
description
descriptionJson
}
}
"""
response = user_api_client.post_graphql(query, variables=variables)
content = get_graphql_content(response)
category_data = content["data"]["category"]
assert category_data["description"] == description
assert category_data["descriptionJson"] == description
def test_category_query_without_description(user_api_client, product, channel_USD):
category = Category.objects.first()
category.save()
variables = {
"id": graphene.Node.to_global_id("Category", category.pk),
"channel": channel_USD.slug,
}
query = """
query ($id: ID, $slug: String){
category(
id: $id,
slug: $slug,
) {
id
name
description
descriptionJson
}
}
"""
response = user_api_client.post_graphql(query, variables=variables)
content = get_graphql_content(response)
category_data = content["data"]["category"]
assert category_data["description"] is None
assert category_data["descriptionJson"] == "{}"
def test_category_query_by_slug(user_api_client, product, channel_USD):
category = Category.objects.first()
variables = {"slug": category.slug, "channel": channel_USD.slug}
response = user_api_client.post_graphql(QUERY_CATEGORY, variables=variables)
content = get_graphql_content(response)
category_data = content["data"]["category"]
assert category_data is not None
assert category_data["name"] == category.name
assert len(category_data["ancestors"]["edges"]) == category.get_ancestors().count()
assert len(category_data["children"]["edges"]) == category.get_children().count()
def test_category_query_error_when_id_and_slug_provided(
user_api_client, product, graphql_log_handler, channel_USD
):
category = Category.objects.first()
variables = {
"id": graphene.Node.to_global_id("Category", category.pk),
"slug": category.slug,
"channel": channel_USD.slug,
}
response = user_api_client.post_graphql(QUERY_CATEGORY, variables=variables)
assert graphql_log_handler.messages == [
"saleor.graphql.errors.handled[INFO].GraphQLError"
]
content = get_graphql_content(response, ignore_errors=True)
assert len(content["errors"]) == 1
def test_category_query_error_when_no_param(
user_api_client, product, graphql_log_handler
):
variables = {}
response = user_api_client.post_graphql(QUERY_CATEGORY, variables=variables)
assert graphql_log_handler.messages == [
"saleor.graphql.errors.handled[INFO].GraphQLError"
]
content = get_graphql_content(response, ignore_errors=True)
assert len(content["errors"]) == 1
def test_query_category_product_only_visible_in_listings_as_customer(
user_api_client, product_list, channel_USD
):
# given
category = Category.objects.first()
product_list[0].channel_listings.all().update(visible_in_listings=False)
product_count = Product.objects.count()
variables = {
"id": graphene.Node.to_global_id("Category", category.pk),
"channel": channel_USD.slug,
}
# when
response = user_api_client.post_graphql(QUERY_CATEGORY, variables=variables)
# then
content = get_graphql_content(response, ignore_errors=True)
assert len(content["data"]["category"]["products"]["edges"]) == product_count - 1
def test_query_category_product_visible_in_listings_as_staff_without_manage_products(
staff_api_client, product_list, channel_USD
):
# given
category = Category.objects.first()
product_list[0].channel_listings.all().update(visible_in_listings=False)
product_count = Product.objects.count()
variables = {
"id": graphene.Node.to_global_id("Category", category.pk),
"channel": channel_USD.slug,
}
# when
response = staff_api_client.post_graphql(QUERY_CATEGORY, variables=variables)
# then
content = get_graphql_content(response, ignore_errors=True)
assert (
len(content["data"]["category"]["products"]["edges"]) == product_count - 1
) # invisible doesn't count
def test_query_category_product_only_visible_in_listings_as_staff_with_perm(
staff_api_client, product_list, permission_manage_products
):
# given
staff_api_client.user.user_permissions.add(permission_manage_products)
category = Category.objects.first()
product_list[0].channel_listings.all().update(visible_in_listings=False)
product_count = Product.objects.count()
variables = {"id": graphene.Node.to_global_id("Category", category.pk)}
# when
response = staff_api_client.post_graphql(QUERY_CATEGORY, variables=variables)
# then
content = get_graphql_content(response, ignore_errors=True)
assert len(content["data"]["category"]["products"]["edges"]) == product_count
def test_query_category_product_only_visible_in_listings_as_app_without_manage_products(
app_api_client, product_list, channel_USD
):
# given
category = Category.objects.first()
product_list[0].channel_listings.all().update(visible_in_listings=False)
product_count = Product.objects.count()
variables = {
"id": graphene.Node.to_global_id("Category", category.pk),
"channel": channel_USD.slug,
}
# when
response = app_api_client.post_graphql(QUERY_CATEGORY, variables=variables)
# then
content = get_graphql_content(response, ignore_errors=True)
assert (
len(content["data"]["category"]["products"]["edges"]) == product_count - 1
) # invisible doesn't count
def test_query_category_product_only_visible_in_listings_as_app_with_perm(
app_api_client, product_list, permission_manage_products
):
# given
app_api_client.app.permissions.add(permission_manage_products)
category = Category.objects.first()
product_list[0].channel_listings.all().update(visible_in_listings=False)
product_count = Product.objects.count()
variables = {"id": graphene.Node.to_global_id("Category", category.pk)}
# when
response = app_api_client.post_graphql(QUERY_CATEGORY, variables=variables)
# then
content = get_graphql_content(response, ignore_errors=True)
assert len(content["data"]["category"]["products"]["edges"]) == product_count
CATEGORY_CREATE_MUTATION = """
mutation(
$name: String, $slug: String,
$description: JSONString, $backgroundImage: Upload,
$backgroundImageAlt: String, $parentId: ID) {
categoryCreate(
input: {
name: $name
slug: $slug
description: $description
backgroundImage: $backgroundImage
backgroundImageAlt: $backgroundImageAlt
},
parent: $parentId
) {
category {
id
name
slug
description
parent {
name
id
}
backgroundImage{
alt
}
}
errors {
field
code
message
}
}
}
"""
def test_category_create_mutation(
monkeypatch, staff_api_client, permission_manage_products, media_root
):
query = CATEGORY_CREATE_MUTATION
mock_create_thumbnails = Mock(return_value=None)
monkeypatch.setattr(
(
"saleor.product.thumbnails."
"create_category_background_image_thumbnails.delay"
),
mock_create_thumbnails,
)
category_name = "Test category"
description = "description"
category_slug = slugify(category_name)
category_description = dummy_editorjs(description, True)
image_file, image_name = create_image()
image_alt = "Alt text for an image."
# test creating root category
variables = {
"name": category_name,
"description": category_description,
"backgroundImage": image_name,
"backgroundImageAlt": image_alt,
"slug": category_slug,
}
body = get_multipart_request_body(query, variables, image_file, image_name)
response = staff_api_client.post_multipart(
body, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["categoryCreate"]
assert data["errors"] == []
assert data["category"]["name"] == category_name
assert data["category"]["description"] == category_description
assert not data["category"]["parent"]
category = Category.objects.get(name=category_name)
assert category.description_plaintext == description
assert category.background_image.file
img_name, format = os.path.splitext(image_file._name)
file_name = category.background_image.name
assert file_name != image_file._name
assert file_name.startswith(f"category-backgrounds/{img_name}")
assert file_name.endswith(format)
mock_create_thumbnails.assert_called_once_with(category.pk)
assert data["category"]["backgroundImage"]["alt"] == image_alt
# test creating subcategory
parent_id = data["category"]["id"]
variables = {
"name": category_name,
"description": category_description,
"parentId": parent_id,
"slug": f"{category_slug}-2",
}
response = staff_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
data = content["data"]["categoryCreate"]
assert data["errors"] == []
assert data["category"]["parent"]["id"] == parent_id
@pytest.mark.parametrize(
"input_slug, expected_slug",
(
("test-slug", "test-slug"),
(None, "test-category"),
("", "test-category"),
("わたし-わ-にっぽん-です", "わたし-わ-にっぽん-です"),
),
)
def test_create_category_with_given_slug(
staff_api_client, permission_manage_products, input_slug, expected_slug
):
query = CATEGORY_CREATE_MUTATION
name = "Test category"
variables = {"name": name, "slug": input_slug}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["categoryCreate"]
assert not data["errors"]
assert data["category"]["slug"] == expected_slug
def test_create_category_name_with_unicode(
staff_api_client, permission_manage_products
):
query = CATEGORY_CREATE_MUTATION
name = "わたし-わ にっぽん です"
variables = {"name": name}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["categoryCreate"]
assert not data["errors"]
assert data["category"]["name"] == name
assert data["category"]["slug"] == "わたし-わ-にっぽん-です"
def test_category_create_mutation_without_background_image(
monkeypatch, staff_api_client, permission_manage_products
):
query = CATEGORY_CREATE_MUTATION
description = dummy_editorjs("description", True)
mock_create_thumbnails = Mock(return_value=None)
monkeypatch.setattr(
(
"saleor.product.thumbnails."
"create_category_background_image_thumbnails.delay"
),
mock_create_thumbnails,
)
# test creating root category
category_name = "Test category"
variables = {
"name": category_name,
"description": description,
"slug": slugify(category_name),
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["categoryCreate"]
assert data["errors"] == []
assert mock_create_thumbnails.call_count == 0
MUTATION_CATEGORY_UPDATE_MUTATION = """
mutation($id: ID!, $name: String, $slug: String,
$backgroundImage: Upload, $backgroundImageAlt: String,
$description: JSONString) {
categoryUpdate(
id: $id
input: {
name: $name
description: $description
backgroundImage: $backgroundImage
backgroundImageAlt: $backgroundImageAlt
slug: $slug
}
) {
category {
id
name
description
parent {
id
}
backgroundImage{
alt
}
}
errors {
field
message
}
}
}
"""
def test_category_update_mutation(
monkeypatch, staff_api_client, category, permission_manage_products, media_root
):
mock_create_thumbnails = Mock(return_value=None)
monkeypatch.setattr(
(
"saleor.product.thumbnails."
"create_category_background_image_thumbnails.delay"
),
mock_create_thumbnails,
)
# create child category and test that the update mutation won't change
# it's parent
child_category = category.children.create(name="child")
category_name = "Updated name"
description = "description"
category_slug = slugify(category_name)
category_description = dummy_editorjs(description, True)
image_file, image_name = create_image()
image_alt = "Alt text for an image."
category_id = graphene.Node.to_global_id("Category", child_category.pk)
variables = {
"name": category_name,
"description": category_description,
"backgroundImage": image_name,
"backgroundImageAlt": image_alt,
"id": category_id,
"slug": category_slug,
}
body = get_multipart_request_body(
MUTATION_CATEGORY_UPDATE_MUTATION, variables, image_file, image_name
)
response = staff_api_client.post_multipart(
body, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["categoryUpdate"]
assert data["errors"] == []
assert data["category"]["id"] == category_id
assert data["category"]["name"] == category_name
assert data["category"]["description"] == category_description
parent_id = graphene.Node.to_global_id("Category", category.pk)
assert data["category"]["parent"]["id"] == parent_id
category = Category.objects.get(name=category_name)
assert category.description_plaintext == description
assert category.background_image.file
mock_create_thumbnails.assert_called_once_with(category.pk)
assert data["category"]["backgroundImage"]["alt"] == image_alt
def test_category_update_mutation_invalid_background_image(
staff_api_client, category, permission_manage_products
):
image_file, image_name = create_pdf_file_with_image_ext()
image_alt = "Alt text for an image."
variables = {
"name": "new-name",
"slug": "new-slug",
"id": to_global_id("Category", category.id),
"backgroundImage": image_name,
"backgroundImageAlt": image_alt,
"isPublished": True,
}
body = get_multipart_request_body(
MUTATION_CATEGORY_UPDATE_MUTATION, variables, image_file, image_name
)
response = staff_api_client.post_multipart(
body, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["categoryUpdate"]
assert data["errors"][0]["field"] == "backgroundImage"
assert data["errors"][0]["message"] == "Invalid file type."
def test_category_update_mutation_without_background_image(
monkeypatch, staff_api_client, category, permission_manage_products
):
query = """
mutation($id: ID!, $name: String, $slug: String, $description: JSONString) {
categoryUpdate(
id: $id
input: {
name: $name
description: $description
slug: $slug
}
) {
errors {
field
message
}
}
}
"""
mock_create_thumbnails = Mock(return_value=None)
monkeypatch.setattr(
(
"saleor.product.thumbnails."
"create_category_background_image_thumbnails.delay"
),
mock_create_thumbnails,
)
category_name = "Updated name"
variables = {
"id": graphene.Node.to_global_id(
"Category", category.children.create(name="child").pk
),
"name": category_name,
"description": dummy_editorjs("description", True),
"slug": slugify(category_name),
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["categoryUpdate"]
assert data["errors"] == []
assert mock_create_thumbnails.call_count == 0
UPDATE_CATEGORY_SLUG_MUTATION = """
mutation($id: ID!, $slug: String) {
categoryUpdate(
id: $id
input: {
slug: $slug
}
) {
category{
name
slug
}
errors {
field
message
code
}
}
}
"""
@pytest.mark.parametrize(
"input_slug, expected_slug, error_message",
[
("test-slug", "test-slug", None),
("", "", "Slug value cannot be blank."),
(None, "", "Slug value cannot be blank."),
],
)
def test_update_category_slug(
staff_api_client,
category,
permission_manage_products,
input_slug,
expected_slug,
error_message,
):
query = UPDATE_CATEGORY_SLUG_MUTATION
old_slug = category.slug
assert old_slug != input_slug
node_id = graphene.Node.to_global_id("Category", category.id)
variables = {"slug": input_slug, "id": node_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["categoryUpdate"]
errors = data["errors"]
if not error_message:
assert not errors
assert data["category"]["slug"] == expected_slug
else:
assert errors
assert errors[0]["field"] == "slug"
assert errors[0]["code"] == ProductErrorCode.REQUIRED.name
def test_update_category_slug_exists(
staff_api_client, category, permission_manage_products
):
query = UPDATE_CATEGORY_SLUG_MUTATION
input_slug = "test-slug"
second_category = Category.objects.get(pk=category.pk)
second_category.pk = None
second_category.slug = input_slug
second_category.save()
assert input_slug != category.slug
node_id = graphene.Node.to_global_id("Category", category.id)
variables = {"slug": input_slug, "id": node_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["categoryUpdate"]
errors = data["errors"]
assert errors
assert errors[0]["field"] == "slug"
assert errors[0]["code"] == ProductErrorCode.UNIQUE.name
@pytest.mark.parametrize(
"input_slug, expected_slug, input_name, error_message, error_field",
[
("test-slug", "test-slug", "New name", None, None),
("", "", "New name", "Slug value cannot be blank.", "slug"),
(None, "", "New name", "Slug value cannot be blank.", "slug"),
("test-slug", "", None, "This field cannot be blank.", "name"),
("test-slug", "", "", "This field cannot be blank.", "name"),
(None, None, None, "Slug value cannot be blank.", "slug"),
],
)
def test_update_category_slug_and_name(
staff_api_client,
category,
permission_manage_products,
input_slug,
expected_slug,
input_name,
error_message,
error_field,
):
query = """
mutation($id: ID!, $name: String, $slug: String) {
categoryUpdate(
id: $id
input: {
name: $name
slug: $slug
}
) {
category{
name
slug
}
errors {
field
message
code
}
}
}
"""
old_name = category.name
old_slug = category.slug
assert input_slug != old_slug
assert input_name != old_name
node_id = graphene.Node.to_global_id("Category", category.id)
variables = {"slug": input_slug, "name": input_name, "id": node_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
category.refresh_from_db()
data = content["data"]["categoryUpdate"]
errors = data["errors"]
if not error_message:
assert data["category"]["name"] == input_name == category.name
assert data["category"]["slug"] == input_slug == category.slug
else:
assert errors
assert errors[0]["field"] == error_field
assert errors[0]["code"] == ProductErrorCode.REQUIRED.name
MUTATION_CATEGORY_DELETE = """
mutation($id: ID!) {
categoryDelete(id: $id) {
category {
name
}
errors {
field
message
}
}
}
"""
@patch("saleor.product.signals.delete_versatile_image")
def test_category_delete_mutation(
delete_versatile_image_mock,
staff_api_client,
category,
permission_manage_products,
):
variables = {"id": graphene.Node.to_global_id("Category", category.id)}
response = staff_api_client.post_graphql(
MUTATION_CATEGORY_DELETE, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["categoryDelete"]
assert data["category"]["name"] == category.name
with pytest.raises(category._meta.model.DoesNotExist):
category.refresh_from_db()
delete_versatile_image_mock.assert_not_called()
@patch("saleor.product.signals.delete_versatile_image")
def test_delete_category_with_background_image(
delete_versatile_image_mock,
staff_api_client,
category_with_image,
permission_manage_products,
media_root,
):
"""Ensure deleting category deletes background image from storage."""
category = category_with_image
variables = {"id": graphene.Node.to_global_id("Category", category.id)}
response = staff_api_client.post_graphql(
MUTATION_CATEGORY_DELETE, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["categoryDelete"]
assert data["category"]["name"] == category.name
with pytest.raises(category._meta.model.DoesNotExist):
category.refresh_from_db()
delete_versatile_image_mock.assert_called_once_with(category.background_image)
@patch("saleor.product.utils.update_products_discounted_prices_task")
def test_category_delete_mutation_for_categories_tree(
mock_update_products_discounted_prices_task,
staff_api_client,
categories_tree_with_published_products,
permission_manage_products,
):
parent = categories_tree_with_published_products
parent_product = parent.products.first()
child_product = parent.children.first().products.first()
product_list = [child_product, parent_product]
variables = {"id": graphene.Node.to_global_id("Category", parent.id)}
response = staff_api_client.post_graphql(
MUTATION_CATEGORY_DELETE, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["categoryDelete"]
assert data["category"]["name"] == parent.name
with pytest.raises(parent._meta.model.DoesNotExist):
parent.refresh_from_db()
mock_update_products_discounted_prices_task.delay.assert_called_once()
(
_call_args,
call_kwargs,
) = mock_update_products_discounted_prices_task.delay.call_args
assert set(call_kwargs["product_ids"]) == set(p.pk for p in product_list)
product_channel_listings = ProductChannelListing.objects.filter(
product__in=product_list
)
for product_channel_listing in product_channel_listings:
assert product_channel_listing.is_published is False
assert not product_channel_listing.publication_date
assert product_channel_listings.count() == 4
@patch("saleor.product.utils.update_products_discounted_prices_task")
def test_category_delete_mutation_for_children_from_categories_tree(
mock_update_products_discounted_prices_task,
staff_api_client,
categories_tree_with_published_products,
permission_manage_products,
):
parent = categories_tree_with_published_products
child = parent.children.first()
parent_product = parent.products.first()
child_product = child.products.first()
variables = {"id": graphene.Node.to_global_id("Category", child.id)}
response = staff_api_client.post_graphql(
MUTATION_CATEGORY_DELETE, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["categoryDelete"]
assert data["category"]["name"] == child.name
with pytest.raises(child._meta.model.DoesNotExist):
child.refresh_from_db()
mock_update_products_discounted_prices_task.delay.assert_called_once_with(
product_ids=[child_product.pk]
)
parent_product.refresh_from_db()
assert parent_product.category
product_channel_listings = ProductChannelListing.objects.filter(
product=parent_product
)
for product_channel_listing in product_channel_listings:
assert product_channel_listing.is_published is True
assert product_channel_listing.publication_date
child_product.refresh_from_db()
assert not child_product.category
product_channel_listings = ProductChannelListing.objects.filter(
product=child_product
)
for product_channel_listing in product_channel_listings:
assert product_channel_listing.is_published is False
assert not product_channel_listing.publication_date
LEVELED_CATEGORIES_QUERY = """
query leveled_categories($level: Int) {
categories(level: $level, first: 20) {
edges {
node {
name
parent {
name
}
}
}
}
}
"""
def test_category_level(user_api_client, category):
query = LEVELED_CATEGORIES_QUERY
child = Category.objects.create(name="child", slug="chi-ld", parent=category)
variables = {"level": 0}
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
category_data = content["data"]["categories"]["edges"][0]["node"]
assert category_data["name"] == category.name
assert category_data["parent"] is None
variables = {"level": 1}
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
category_data = content["data"]["categories"]["edges"][0]["node"]
assert category_data["name"] == child.name
assert category_data["parent"]["name"] == category.name
NOT_EXISTS_IDS_CATEGORIES_QUERY = """
query ($filter: CategoryFilterInput!) {
categories(first: 5, filter: $filter) {
edges {
node {
id
name
}
}
}
}
"""
def test_categories_query_ids_not_exists(user_api_client, category):
query = NOT_EXISTS_IDS_CATEGORIES_QUERY
variables = {"filter": {"ids": ["W3KATGDn3fq3ZH4=", "zH9pYmz7yWD3Hy8="]}}
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response, ignore_errors=True)
message_error = '{"ids": [{"message": "Invalid ID specified.", "code": ""}]}'
assert len(content["errors"]) == 1
assert content["errors"][0]["message"] == message_error
assert content["data"]["categories"] is None
FETCH_CATEGORY_QUERY = """
query fetchCategory($id: ID!){
category(id: $id) {
name
backgroundImage(size: 120) {
url
alt
}
}
}
"""
def test_category_image_query(user_api_client, non_default_category, media_root):
alt_text = "Alt text for an image."
category = non_default_category
image_file, image_name = create_image()
category.background_image = image_file
category.background_image_alt = alt_text
category.save()
category_id = graphene.Node.to_global_id("Category", category.pk)
variables = {"id": category_id}
response = user_api_client.post_graphql(FETCH_CATEGORY_QUERY, variables)
content = get_graphql_content(response)
data = content["data"]["category"]
thumbnail_url = category.background_image.thumbnail["120x120"].url
assert thumbnail_url in data["backgroundImage"]["url"]
assert data["backgroundImage"]["alt"] == alt_text
def test_category_image_query_without_associated_file(
user_api_client, non_default_category
):
category = non_default_category
category_id = graphene.Node.to_global_id("Category", category.pk)
variables = {"id": category_id}
response = user_api_client.post_graphql(FETCH_CATEGORY_QUERY, variables)
content = get_graphql_content(response)
data = content["data"]["category"]
assert data["name"] == category.name
assert data["backgroundImage"] is None
def test_update_category_mutation_remove_background_image(
staff_api_client, category_with_image, permission_manage_products
):
query = """
mutation updateCategory($id: ID!, $backgroundImage: Upload) {
categoryUpdate(
id: $id, input: {
backgroundImage: $backgroundImage
}
) {
category {
backgroundImage{
url
}
}
errors {
field
message
}
}
}
"""
assert category_with_image.background_image
variables = {
"id": to_global_id("Category", category_with_image.id),
"backgroundImage": None,
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["categoryUpdate"]["category"]
assert not data["backgroundImage"]
category_with_image.refresh_from_db()
assert not category_with_image.background_image
def test_query_category_for_federation(api_client, non_default_category):
category_id = graphene.Node.to_global_id("Category", non_default_category.pk)
variables = {
"representations": [
{
"__typename": "Category",
"id": category_id,
},
],
}
query = """
query GetCategoryInFederation($representations: [_Any]) {
_entities(representations: $representations) {
__typename
... on Category {
id
name
}
}
}
"""
response = api_client.post_graphql(query, variables)
content = get_graphql_content(response)
assert content["data"]["_entities"] == [
{
"__typename": "Category",
"id": category_id,
"name": non_default_category.name,
}
]
| 32.080817
| 88
| 0.644575
|
d41bd3c2551994fa9d6dabe0ff04996d3e737368
| 14,675
|
py
|
Python
|
ddtrace/profiling/exporter/pprof.py
|
RobertTownley/dd-trace-py
|
f5a04c60ed0387268f024ab8cd6d2ac2a1f04eb9
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
ddtrace/profiling/exporter/pprof.py
|
RobertTownley/dd-trace-py
|
f5a04c60ed0387268f024ab8cd6d2ac2a1f04eb9
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
ddtrace/profiling/exporter/pprof.py
|
RobertTownley/dd-trace-py
|
f5a04c60ed0387268f024ab8cd6d2ac2a1f04eb9
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
import collections
import itertools
import operator
import sys
try:
import tracemalloc
except ImportError:
tracemalloc = None
from ddtrace.vendor import six
from ddtrace.profiling import _line2def
from ddtrace.profiling import exporter
from ddtrace.vendor import attr
from ddtrace.profiling.collector import exceptions
from ddtrace.profiling.collector import memory
from ddtrace.profiling.collector import stack
from ddtrace.profiling.collector import threading
from ddtrace.profiling.exporter import pprof_pb2
_ITEMGETTER_ZERO = operator.itemgetter(0)
_ITEMGETTER_ONE = operator.itemgetter(1)
_ATTRGETTER_ID = operator.attrgetter("id")
@attr.s
class _Sequence(object):
start_at = attr.ib(default=1)
next_id = attr.ib(init=False, default=None)
def __attrs_post_init__(self):
self.next_id = self.start_at
def generate(self):
"""Generate a new unique id and return it."""
generated_id = self.next_id
self.next_id += 1
return generated_id
@attr.s
class _StringTable(object):
_strings = attr.ib(init=False, factory=lambda: {"": 0})
_seq_id = attr.ib(init=False, factory=_Sequence)
def to_id(self, string):
try:
return self._strings[string]
except KeyError:
generated_id = self._strings[string] = self._seq_id.generate()
return generated_id
def __iter__(self):
for string, _ in sorted(self._strings.items(), key=_ITEMGETTER_ONE):
yield string
def __len__(self):
return len(self._strings)
@attr.s
class _PprofConverter(object):
"""Convert stacks generated by a Profiler to pprof format."""
# Those attributes will be serialize in a `pprof_pb2.Profile`
_functions = attr.ib(init=False, factory=dict)
_locations = attr.ib(init=False, factory=dict)
_string_table = attr.ib(init=False, factory=_StringTable)
_last_location_id = attr.ib(init=False, factory=_Sequence)
_last_func_id = attr.ib(init=False, factory=_Sequence)
# A dict where key is a (Location, [Labels]) and value is a a dict.
# This dict has sample-type (e.g. "cpu-time") as key and the numeric value.
_location_values = attr.ib(factory=lambda: collections.defaultdict(dict), init=False, repr=False)
def _to_Function(self, filename, funcname):
try:
return self._functions[(filename, funcname)]
except KeyError:
func = pprof_pb2.Function(
id=self._last_func_id.generate(), name=self._str(funcname), filename=self._str(filename),
)
self._functions[(filename, funcname)] = func
return func
def _to_Location(self, filename, lineno, funcname=None):
try:
return self._locations[(filename, lineno, funcname)]
except KeyError:
if funcname is None:
real_funcname = _line2def.filename_and_lineno_to_def(filename, lineno)
else:
real_funcname = funcname
location = pprof_pb2.Location(
id=self._last_location_id.generate(),
line=[pprof_pb2.Line(function_id=self._to_Function(filename, real_funcname).id, line=lineno,),],
)
self._locations[(filename, lineno, funcname)] = location
return location
def _str(self, string):
"""Convert a string to an id from the string table."""
return self._string_table.to_id(str(string))
def _to_locations(self, frames, nframes):
locations = [self._to_Location(filename, lineno, funcname).id for filename, lineno, funcname in frames]
omitted = nframes - len(frames)
if omitted:
locations.append(
self._to_Location("", 0, "<%d frame%s omitted>" % (omitted, ("s" if omitted > 1 else ""))).id
)
return tuple(locations)
def convert_uncaught_exception_event(self, thread_id, thread_name, frames, nframes, exc_type_name, events):
location_key = (
self._to_locations(frames, nframes),
(("thread id", str(thread_id)), ("thread name", thread_name), ("exception type", exc_type_name)),
)
self._location_values[location_key]["uncaught-exceptions"] = len(events)
def convert_stack_event(self, thread_id, thread_name, frames, nframes, samples):
location_key = (
self._to_locations(frames, nframes),
(("thread id", str(thread_id)), ("thread name", thread_name),),
)
self._location_values[location_key]["cpu-samples"] = len(samples)
self._location_values[location_key]["cpu-time"] = sum(s.cpu_time_ns for s in samples)
self._location_values[location_key]["wall-time"] = sum(s.wall_time_ns for s in samples)
def convert_lock_acquire_event(self, lock_name, thread_id, thread_name, frames, nframes, events, sampling_ratio):
location_key = (
self._to_locations(frames, nframes),
(("thread id", str(thread_id)), ("thread name", thread_name), ("lock name", lock_name)),
)
self._location_values[location_key]["lock-acquire"] = len(events)
self._location_values[location_key]["lock-acquire-wait"] = int(
sum(e.wait_time_ns for e in events) / sampling_ratio
)
def convert_lock_release_event(self, lock_name, thread_id, thread_name, frames, nframes, events, sampling_ratio):
location_key = (
self._to_locations(frames, nframes),
(("thread id", str(thread_id)), ("thread name", thread_name), ("lock name", lock_name)),
)
self._location_values[location_key]["lock-release"] = len(events)
self._location_values[location_key]["lock-release-hold"] = int(
sum(e.locked_for_ns for e in events) / sampling_ratio
)
def convert_stack_exception_event(self, thread_id, thread_name, frames, nframes, exc_type_name, events):
location_key = (
self._to_locations(frames, nframes),
(("thread id", str(thread_id)), ("thread name", thread_name), ("exception type", exc_type_name),),
)
self._location_values[location_key]["exception-samples"] = len(events)
def convert_memory_event(self, stats, sampling_ratio):
location = tuple(self._to_Location(frame.filename, frame.lineno).id for frame in reversed(stats.traceback))
location_key = (location, tuple())
self._location_values[location_key]["alloc-samples"] = int(stats.count / sampling_ratio)
self._location_values[location_key]["alloc-space"] = int(stats.size / sampling_ratio)
def _build_profile(self, start_time_ns, duration_ns, period, sample_types, program_name):
pprof_sample_type = [
pprof_pb2.ValueType(type=self._str(type_), unit=self._str(unit)) for type_, unit in sample_types
]
sample = [
pprof_pb2.Sample(
location_id=locations,
value=[values.get(sample_type_name, 0) for sample_type_name, unit in sample_types],
label=[pprof_pb2.Label(key=self._str(key), str=self._str(s)) for key, s in labels],
)
for (locations, labels), values in sorted(six.iteritems(self._location_values), key=_ITEMGETTER_ZERO)
]
period_type = pprof_pb2.ValueType(type=self._str("time"), unit=self._str("nanoseconds"))
# WARNING: no code should use _str() here as once the _string_table is serialized below,
# it won't be updated if you call _str later in the code here
return pprof_pb2.Profile(
sample_type=pprof_sample_type,
sample=sample,
mapping=[pprof_pb2.Mapping(id=1, filename=self._str(program_name),),],
# Sort location and function by id so the output is reproducible
location=sorted(self._locations.values(), key=_ATTRGETTER_ID),
function=sorted(self._functions.values(), key=_ATTRGETTER_ID),
string_table=list(self._string_table),
time_nanos=start_time_ns,
duration_nanos=duration_ns,
period=period,
period_type=period_type,
)
class PprofExporter(exporter.Exporter):
"""Export recorder events to pprof format."""
@staticmethod
def _get_program_name(default="-"):
try:
import __main__
program_name = __main__.__file__
except (ImportError, AttributeError):
try:
program_name = sys.argv[0]
except IndexError:
program_name = None
if program_name is None:
return default
return program_name
@staticmethod
def _stack_event_group_key(event):
return (event.thread_id, str(event.thread_name), tuple(event.frames), event.nframes)
def _group_stack_events(self, events):
return itertools.groupby(sorted(events, key=self._stack_event_group_key), key=self._stack_event_group_key,)
@staticmethod
def _lock_event_group_key(event):
return (event.lock_name, event.thread_id, str(event.thread_name), tuple(event.frames), event.nframes)
def _group_lock_events(self, events):
return itertools.groupby(sorted(events, key=self._lock_event_group_key), key=self._lock_event_group_key,)
@staticmethod
def _exception_group_key(event):
exc_type = event.exc_type
exc_type_name = exc_type.__module__ + "." + exc_type.__name__
return (event.thread_id, str(event.thread_name), tuple(event.frames), event.nframes, exc_type_name)
def _group_exception_events(self, events):
return itertools.groupby(sorted(events, key=self._exception_group_key), key=self._exception_group_key,)
@staticmethod
def min_none(a, b):
"""A min() version that discards None values."""
if a is None:
return b
if b is None:
return a
return min(a, b)
@staticmethod
def max_none(a, b):
"""A max() version that discards None values."""
if a is None:
return b
if b is None:
return a
return max(a, b)
def export(self, events, start_time_ns, end_time_ns):
"""Convert events to pprof format.
:param events: The event dictionary from a `ddtrace.profiling.recorder.Recorder`.
:param start_time_ns: The start time of recording.
:param end_time_ns: The end time of recording.
:return: A protobuf Profile object.
"""
program_name = self._get_program_name()
sum_period = 0
nb_event = 0
converter = _PprofConverter()
# Handle StackSampleEvent
stack_events = []
for event in events.get(stack.StackSampleEvent, []):
stack_events.append(event)
sum_period += event.sampling_period
nb_event += 1
for (thread_id, thread_name, frames, nframes), stack_events in self._group_stack_events(stack_events):
converter.convert_stack_event(thread_id, thread_name, frames, nframes, list(stack_events))
# Handle Lock events
for event_class, convert_fn in (
(threading.LockAcquireEvent, converter.convert_lock_acquire_event),
(threading.LockReleaseEvent, converter.convert_lock_release_event),
):
lock_events = events.get(event_class, [])
sampling_sum_pct = sum(event.sampling_pct for event in lock_events)
if lock_events:
sampling_ratio_avg = sampling_sum_pct / (len(lock_events) * 100.0)
for (lock_name, thread_id, thread_name, frames, nframes), l_events in self._group_lock_events(
lock_events
):
convert_fn(lock_name, thread_id, thread_name, frames, nframes, list(l_events), sampling_ratio_avg)
# Handle UncaughtExceptionEvent
for ((thread_id, thread_name, frames, nframes, exc_type_name), ue_events,) in self._group_exception_events(
events.get(exceptions.UncaughtExceptionEvent, [])
):
converter.convert_uncaught_exception_event(
thread_id, thread_name, frames, nframes, exc_type_name, list(ue_events)
)
sample_types = (
("cpu-samples", "count"),
("cpu-time", "nanoseconds"),
("wall-time", "nanoseconds"),
("uncaught-exceptions", "count"),
("lock-acquire", "count"),
("lock-acquire-wait", "nanoseconds"),
("lock-release", "count"),
("lock-release-hold", "nanoseconds"),
)
# Handle StackExceptionSampleEvent
if stack.FEATURES["stack-exceptions"]:
sample_types += (("exception-samples", "count"),)
for (thread_id, thread_name, frames, nframes, exc_type_name), se_events in self._group_exception_events(
events.get(stack.StackExceptionSampleEvent, [])
):
converter.convert_stack_exception_event(
thread_id, thread_name, frames, nframes, exc_type_name, list(se_events)
)
if tracemalloc:
sample_types += (
("alloc-samples", "count"),
("alloc-space", "bytes"),
)
# Handle MemorySampleEvent
# Merge all the memory snapshots
traces = []
traceback_limit = None
sampling_pct_sum = 0
nb_events = 0
for event in events.get(memory.MemorySampleEvent, []):
sampling_pct_sum += event.sampling_pct
nb_events += 1
traces.extend(event.snapshot.traces._traces)
# Assume they are all the same
traceback_limit = event.snapshot.traceback_limit
# Ignore period for memory events are it's not a time-based sampling
if nb_events:
sampling_ratio_avg = sampling_pct_sum / (nb_events * 100.0) # convert percentage to ratio
for stats in tracemalloc.Snapshot(traces, traceback_limit).statistics("traceback"):
converter.convert_memory_event(stats, sampling_ratio_avg)
# Compute some metadata
if nb_event:
period = int(sum_period / nb_event)
else:
period = None
duration_ns = end_time_ns - start_time_ns
return converter._build_profile(
start_time_ns=start_time_ns,
duration_ns=duration_ns,
period=period,
sample_types=sample_types,
program_name=program_name,
)
| 38.925729
| 118
| 0.638228
|
9f6c9d7cb4a629b64e20f2d4d2b437ed82ea8ff7
| 14,179
|
py
|
Python
|
portfolio/Asset.py
|
open-risk/equinox
|
0503e716b566ff7c776f04a611879f88d86e1cc6
|
[
"Apache-2.0"
] | 10
|
2021-03-21T22:05:33.000Z
|
2022-03-15T18:26:58.000Z
|
portfolio/Asset.py
|
open-risk/equinox
|
0503e716b566ff7c776f04a611879f88d86e1cc6
|
[
"Apache-2.0"
] | 2
|
2021-10-30T15:15:41.000Z
|
2021-11-11T12:35:02.000Z
|
portfolio/Asset.py
|
open-risk/equinox
|
0503e716b566ff7c776f04a611879f88d86e1cc6
|
[
"Apache-2.0"
] | 1
|
2022-03-16T18:59:36.000Z
|
2022-03-16T18:59:36.000Z
|
# Copyright (c) 2021 Open Risk (https://www.openriskmanagement.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from django.db import models
from portfolio.model_choices import *
from django.contrib.gis.db.models import PointField, PolygonField
from django.urls import reverse
from datetime import datetime
ASSET_CLASS_CHOICES = [(0, '(a) Residential'),
(1, '(b) CRE'),
(2, '(c) SME/Corporate'),
(3, '(d) Unsecured'),
(4, '(e) Auto'),
(5, '(f) Leasing / ABF'),
(6, '(g) Specialised')]
class Asset(models.Model):
"""
The Asset model holds asset specific data for each real asset, facility (plant, infrastructure etc) that is part of a Portfolio or Inventory or a Project - which may or may not be financed. An asset will involve one more emissions sources.
An Asset participates in only one Project at a time (if linked to a project object)
"""
# IDENTIFICATION & CATEGORIZATION
asset_identifier = models.CharField(max_length=80, blank=True, null=True,
help_text='Unique identifier of the asset for internal purposes')
description = models.TextField(blank=True, null=True,
help_text='Textual Description of the Asset')
asset_class = models.IntegerField(blank=True, null=True, choices=ASSET_CLASS_CHOICES,
help_text='Standard Description. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki">Documentation</a>')
business_description = models.TextField(blank=True, null=True,
help_text='Standard Description. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki">Documentation</a>')
registration_number = models.TextField(blank=True, null=True,
help_text='Standard Description. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki">Documentation</a>')
# LINKS / RELATIONS
project = models.ForeignKey('Project', blank=True, null=True, on_delete=models.CASCADE)
legal_owner = models.TextField(blank=True, null=True,
help_text='Standard Description. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki">Documentation</a>')
#
# GHG Data
#
asset_ghg_emissions = models.FloatField(blank=True, null=True,
help_text='This stores the aggregate current annualized emissions of an asset in GO2 equivalents')
#
# Geographic Information (Geometries stored separately)
#
# asset_basin_of_influence = PolygonField(blank=True, null=True,
# help_text='Standard Description. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki">Documentation</a>')
#
# asset_perimeter = PolygonField(blank=True, null=True,
# help_text='Standard Description. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki">Documentation</a>')
city_of_registered_location = models.TextField(blank=True, null=True,
help_text='Standard Description. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki">Documentation</a>')
address_of_registered_location = models.TextField(blank=True, null=True,
help_text='Standard Description. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki">Documentation</a>')
country_of_registered_location = models.TextField(blank=True, null=True,
help_text='Standard Description. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki">Documentation</a>')
current_country_of_registration = models.TextField(blank=True, null=True,
help_text='Standard Description. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki">Documentation</a>')
geographic_region_classification = models.TextField(blank=True, null=True,
help_text='Standard Description. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki">Documentation</a>')
geographic_region_of_registered_location = models.TextField(blank=True, null=True,
help_text='Standard Description. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki">Documentation</a>')
original_country_of_registration = models.TextField(blank=True, null=True,
help_text='Standard Description. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki">Documentation</a>')
postcode_of_registered_location = models.TextField(blank=True, null=True,
help_text='Standard Description. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki">Documentation</a>')
#
# Financial Data
#
activation_of_guarantee = models.BooleanField(blank=True, null=True,
help_text='Standard Description. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki">Documentation</a>')
asset_purchase_obligation = models.BooleanField(blank=True, null=True,
help_text='Standard Description. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki">Documentation</a>')
collateral_insurance = models.BooleanField(blank=True, null=True,
help_text='Standard Description. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki">Documentation</a>')
collateral_insurance_coverage_amount = models.FloatField(blank=True, null=True,
help_text='Standard Description. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki">Documentation</a>')
collateral_insurance_provider = models.TextField(blank=True, null=True,
help_text='Standard Description. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki">Documentation</a>')
collateral_type = models.IntegerField(blank=True, null=True, choices=COLLATERAL_TYPE_CHOICES,
help_text='Standard Description. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki">Documentation</a>')
configuration = models.TextField(blank=True, null=True,
help_text='Standard Description. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki">Documentation</a>')
currency_of_collateral = models.TextField(blank=True, null=True,
help_text='Standard Description. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki">Documentation</a>')
current_opex_and_overheads = models.FloatField(blank=True, null=True,
help_text='Standard Description. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki">Documentation</a>')
date_of_initial_valuation = models.DateField(blank=True, null=True,
help_text='Standard Description. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki">Documentation</a>')
date_of_latest_valuation = models.DateField(blank=True, null=True,
help_text='Standard Description. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki">Documentation</a>')
date_of_the_latest_residual_valuation = models.DateField(blank=True, null=True,
help_text='Standard Description. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki">Documentation</a>')
engine_size = models.FloatField(blank=True, null=True,
help_text='Standard Description. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki">Documentation</a>')
estimated_useful_life = models.FloatField(blank=True, null=True,
help_text='Standard Description. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki">Documentation</a>')
industry_segment = models.TextField(blank=True, null=True,
help_text='Standard Description. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki">Documentation</a>')
initial_residual_valuation_date = models.DateField(blank=True, null=True,
help_text='Standard Description. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki">Documentation</a>')
initial_residual_value = models.FloatField(blank=True, null=True,
help_text='Standard Description. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki">Documentation</a>')
initial_valuation_amount = models.FloatField(blank=True, null=True,
help_text='Standard Description. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki">Documentation</a>')
latest_residual_value = models.FloatField(blank=True, null=True,
help_text='Standard Description. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki">Documentation</a>')
latest_valuation_amount = models.FloatField(blank=True, null=True,
help_text='Standard Description. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki">Documentation</a>')
manufacturer_of_collateral = models.TextField(blank=True, null=True,
help_text='Standard Description. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki">Documentation</a>')
name_or_model_of_collateral = models.TextField(blank=True, null=True,
help_text='Standard Description. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki">Documentation</a>')
new_or_used = models.IntegerField(blank=True, null=True, choices=NEW_OR_USED_CHOICES,
help_text='Standard Description. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki">Documentation</a>')
option_to_buy_price = models.FloatField(blank=True, null=True,
help_text='Standard Description. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki">Documentation</a>')
project_characteristics = models.FloatField(blank=True, null=True,
help_text='Standard Description. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki">Documentation</a>')
type_of_initial_valuation = models.IntegerField(blank=True, null=True, choices=TYPE_OF_INITIAL_VALUATION_CHOICES,
help_text='Standard Description. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki">Documentation</a>')
type_of_latest_valuation = models.IntegerField(blank=True, null=True, choices=TYPE_OF_LATEST_VALUATION_CHOICES,
help_text='Standard Description. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki">Documentation</a>')
type_of_legal_owner = models.IntegerField(blank=True, null=True, choices=TYPE_OF_LEGAL_OWNER_CHOICES,
help_text='Standard Description. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki">Documentation</a>')
year_of_manufacture = models.DateField(blank=True, null=True,
help_text='Standard Description. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki">Documentation</a>')
year_of_registration = models.DateField(blank=True, null=True,
help_text='Standard Description. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki">Documentation</a>')
#
# BOOKKEEPING FIELDS
#
creation_date = models.DateTimeField(auto_now_add=True)
last_change_date = models.DateTimeField(auto_now=True)
def __str__(self):
return self.asset_identifier
def get_absolute_url(self):
return reverse('portfolio:Asset_edit', kwargs={'pk': self.pk})
class Meta:
verbose_name = "Asset"
verbose_name_plural = "Assets"
| 64.744292
| 243
| 0.640313
|
ac65babe72679aa6912795fc10f5973f7204c013
| 3,208
|
py
|
Python
|
artifacts/infodynamics-dist-1.3/demos/python/example4TeContinuousDataKraskov.py
|
kamir/WikiExplorer.NG
|
7b56e3d1e638d760fe238dfd66d3775404335ff4
|
[
"Apache-2.0"
] | null | null | null |
artifacts/infodynamics-dist-1.3/demos/python/example4TeContinuousDataKraskov.py
|
kamir/WikiExplorer.NG
|
7b56e3d1e638d760fe238dfd66d3775404335ff4
|
[
"Apache-2.0"
] | 7
|
2020-06-30T23:18:30.000Z
|
2022-02-01T01:02:32.000Z
|
artifacts/infodynamics-dist-1.3/demos/python/example4TeContinuousDataKraskov.py
|
kamir/WikiExplorer.NG
|
7b56e3d1e638d760fe238dfd66d3775404335ff4
|
[
"Apache-2.0"
] | null | null | null |
##
## Java Information Dynamics Toolkit (JIDT)
## Copyright (C) 2012, Joseph T. Lizier
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
##
# = Example 4 - Transfer entropy on continuous data using Kraskov estimators =
# Simple transfer entropy (TE) calculation on continuous-valued data using the Kraskov-estimator TE calculator.
from jpype import *
import random
import math
# Change location of jar to match yours:
jarLocation = "../../infodynamics.jar"
# Start the JVM (add the "-Xmx" option with say 1024M if you get crashes due to not enough memory space)
startJVM(getDefaultJVMPath(), "-ea", "-Djava.class.path=" + jarLocation)
# Generate some random normalised data.
numObservations = 1000
covariance=0.4
# Source array of random normals:
sourceArray = [random.normalvariate(0,1) for r in range(numObservations)]
# Destination array of random normals with partial correlation to previous value of sourceArray
destArray = [0] + [sum(pair) for pair in zip([covariance*y for y in sourceArray[0:numObservations-1]], \
[(1-covariance)*y for y in [random.normalvariate(0,1) for r in range(numObservations-1)]] ) ]
# Uncorrelated source array:
sourceArray2 = [random.normalvariate(0,1) for r in range(numObservations)]
# Create a TE calculator and run it:
teCalcClass = JPackage("infodynamics.measures.continuous.kraskov").TransferEntropyCalculatorKraskov
teCalc = teCalcClass()
teCalc.setProperty("NORMALISE", "true") # Normalise the individual variables
teCalc.initialise(1) # Use history length 1 (Schreiber k=1)
teCalc.setProperty("k", "4") # Use Kraskov parameter K=4 for 4 nearest points
# Perform calculation with correlated source:
teCalc.setObservations(JArray(JDouble, 1)(sourceArray), JArray(JDouble, 1)(destArray))
result = teCalc.computeAverageLocalOfObservations()
# Note that the calculation is a random variable (because the generated
# data is a set of random variables) - the result will be of the order
# of what we expect, but not exactly equal to it; in fact, there will
# be a large variance around it.
print("TE result %.4f nats; expected to be close to %.4f nats for these correlated Gaussians" % \
(result, math.log(1/(1-math.pow(covariance,2)))))
# Perform calculation with uncorrelated source:
teCalc.initialise() # Initialise leaving the parameters the same
teCalc.setObservations(JArray(JDouble, 1)(sourceArray2), JArray(JDouble, 1)(destArray))
result2 = teCalc.computeAverageLocalOfObservations()
print("TE result %.4f nats; expected to be close to 0 nats for these uncorrelated Gaussians" % result2)
| 50.125
| 138
| 0.747506
|
2919c4d6de8fe4758418f7a8055fbf264f667117
| 307
|
py
|
Python
|
take_snapshot.py
|
prarthaana123/Project102
|
58273208e98e7d15183ed6375ac6682bcaeeab94
|
[
"MIT"
] | null | null | null |
take_snapshot.py
|
prarthaana123/Project102
|
58273208e98e7d15183ed6375ac6682bcaeeab94
|
[
"MIT"
] | null | null | null |
take_snapshot.py
|
prarthaana123/Project102
|
58273208e98e7d15183ed6375ac6682bcaeeab94
|
[
"MIT"
] | null | null | null |
def take_snapshot():
videoCaptureObject = cv2.VideoCapture(0)
result = True
while(result):
ret,frame = videoCaptureObject.read()
cv2.imwrite("NewPicture.jpg",frame)
result = False
videoCaptureObject.release()
cv2.destroyAllWindows()
take_snapshot()
| 23.615385
| 46
| 0.644951
|
fa5368b76900762937ce9e6c1a44d4035984c0ed
| 38,619
|
py
|
Python
|
spearmint/acquisition_functions/PPESMOC_gradients.py
|
EduardoGarrido90/spearmint_ppesmoc
|
4c4da9b3858e52c15698f1b401f4a4a5b02517fa
|
[
"RSA-MD"
] | 6
|
2021-06-29T11:26:49.000Z
|
2022-01-20T18:12:47.000Z
|
build/lib.linux-x86_64-2.7/spearmint/acquisition_functions/PPESMOC_gradients.py
|
EduardoGarrido90/spearmint_ppesmoc
|
4c4da9b3858e52c15698f1b401f4a4a5b02517fa
|
[
"RSA-MD"
] | null | null | null |
build/lib.linux-x86_64-2.7/spearmint/acquisition_functions/PPESMOC_gradients.py
|
EduardoGarrido90/spearmint_ppesmoc
|
4c4da9b3858e52c15698f1b401f4a4a5b02517fa
|
[
"RSA-MD"
] | 3
|
2021-06-29T14:48:44.000Z
|
2022-03-28T15:20:04.000Z
|
from collections import defaultdict
import autograd.numpy as np
#import numpy as np
import autograd.misc.flatten as flatten
import autograd.scipy.stats as sps
#import scipy.stats as sps
from autograd.numpy.linalg import solve
#from numpy.linalg import solve
from scipy.spatial.distance import cdist
import numpy.linalg as npla
def my_log(x):
# The computation of the gradient sometimes fails as a consequence of evaluating log(0.0)
# Uncomment the code below if that is the case.
#if np.any(x == 0.0):
# import pdb; pdb.set_trace()
return np.log(x + 1e-10)
SQRT_5 = np.sqrt(5)
def two_by_two_matrix_inverse(a, b, c, d):
det = a * d - c * b
a_new = 1.0 / det * d
b_new = 1.0 / det * -b
c_new = 1.0 / det * - c
d_new = 1.0 / det * a
return a_new, b_new, c_new, d_new
def log_1_minus_exp_x(x):
#if not isinstance(x, np.ndarray) or x.size==1:
if x.shape == () or x.size==1:
return log_1_minus_exp_x_scalar(x)
assert np.all(x <= 0)
case1 = x < my_log(1e-6) # -13.8
case2 = x > -1e-6
case3 = np.logical_and(x >= my_log(1e-6), x <= -1e-6)
assert np.all(case1+case2+case3 == 1)
#These three operations has to be done using two np.where.
#Test this.
"""
result = np.zeros(x.shape)
result[case1] = -np.exp(x[case1])
with np.errstate(divide='ignore'): # if x is exactly 0, give -inf without complaining
result[case2] = np.log(-x[case2])
result[case3] = np.log(1.0-np.exp(x[case3]))
return result
"""
return np.where(x < my_log(1e-6), -np.exp(x), np.where(x > -1e-6, my_log(-x), my_log(1.0-np.exp(x))))
def logcdf_robust(x):
#if isinstance(x, np.ndarray):
if x.shape != ():
ret = sps.norm.logcdf(x)
#ret[x > 5] = -sps.norm.sf(x[x > 5])
ret = np.where(x <= 5, ret, -sps.norm.sf(x))
elif x > 5:
ret = -sps.norm.sf(x)
else:
ret = sps.norm.logcdf(x)
return ret
def two_by_two_symmetric_matrix_product_vector(a, b, c, v_a, v_b):
return a * v_a + c * v_b, c * v_a + b * v_b
def two_by_two_symmetric_matrix_inverse(a, b, c):
det = a * b - c * c
a_new = 1.0 / det * b
b_new = 1.0 / det * a
c_new = 1.0 / det * - c
return a_new, b_new, c_new
def build_unconditioned_predictive_distributions(all_tasks, all_constraints, X):
mPred = dict()
Vpred = dict()
VpredInv = dict()
mPred_cons = dict()
Vpred_cons = dict()
VpredInv_cons = dict()
for t in all_tasks:
mPred[t], Vpred[t] = predict(all_tasks[t], X)
VpredInv[t] = np.linalg.inv(Vpred[t])
for t in all_constraints:
mPred_cons[t], Vpred_cons[t] = predict(all_constraints[t], X)
VpredInv_cons[t] = np.linalg.inv(Vpred_cons[t])
return mPred, Vpred, VpredInv, mPred_cons, Vpred_cons, VpredInv_cons
def build_set_of_points_that_conditions_GPs(obj_models, con_models, pareto_set, Xtest):
#We first include the observations.
X = np.array([np.array([])])
for t in obj_models:
Xtask = obj_models[ t ].observed_inputs
X = np.array([Xtask[ 0, ]])
for i in range(1, Xtask.shape[ 0 ]):
if np.min(cdist(Xtask[ i : (i + 1), : ], X)) > 1e-8:
X = np.vstack((X, Xtask[ i, ]))
for t in con_models:
Xtask = con_models[ t ].observed_inputs
for i in range(Xtask.shape[ 0 ]):
if np.min(cdist(Xtask[ i : (i + 1), : ], X)) > 1e-8:
X = np.vstack((X, Xtask[ i, ]))
#Then, we include the Pareto Set points.
for i in range(pareto_set.shape[ 0 ]):
#if np.min(cdist(pareto_set[ i : (i + 1), : ], X)) > 1e-8:
X = np.vstack((X, pareto_set[ i, ]))
n_obs = X.shape[ 0 ] - pareto_set.shape[ 0 ]
#Finally, we include the candidate points, without comparing with the previous points.
n_test = Xtest.shape[ 0 ]
for i in range(Xtest.shape[ 0 ]):
X = np.vstack((X, Xtest[ i, ]))
n_total = X.shape[ 0 ]
n_pset = pareto_set.shape[ 0 ]
return X, n_obs, n_pset, n_test, n_total
def original_autograd_cdist(xx1, xx2):
txx1 = np.tile(xx1, xx2.shape[0]).reshape((xx1.shape[0], xx2.shape[0], xx1.shape[1]))
txx2 = np.tile(flatten(xx2)[0], xx1.shape[0]).reshape((xx1.shape[0], xx2.shape[0], xx1.shape[1]))
return np.sum(np.power(txx1-txx2, 2), axis=2)
def autograd_cdist(xx1, xx2):
return np.outer(np.sum(xx1**2, axis=1), np.ones(xx2.shape[0])) - 2.0 * np.dot(xx1, xx2.T) + \
np.outer(np.ones(xx1.shape[0]), np.sum(xx2**2, axis=1))
def original_dist2(ls, x1, x2=None):
if x2 is None:
# Find distance with self for x1.
# Rescale.
xx1 = x1 / ls
xx2 = xx1
else:
# Rescale.
xx1 = x1 / ls
xx2 = x2 / ls
return original_autograd_cdist(xx1, xx2)
def dist2(ls, x1, x2=None):
if x2 is None:
# Find distance with self for x1.
# Rescale.
xx1 = x1 / ls
xx2 = xx1
else:
# Rescale.
xx1 = x1 / ls
xx2 = x2 / ls
return autograd_cdist(xx1, xx2)
def original_cov(ls_values, inputs):
return original_cross_cov(ls_values, inputs, inputs)
def cov(ls_values, inputs):
return cross_cov(ls_values, inputs, inputs, squared=True)
def original_cross_cov(ls_values, inputs_1, inputs_2):
r2 = np.abs(original_dist2(ls_values, inputs_1, inputs_2))
r = np.sqrt(r2)
cov = (1.0 + SQRT_5*r + (5.0/3.0)*r2) * np.exp(-SQRT_5*r)
return cov
def cross_cov(ls_values, inputs_1, inputs_2, squared=False):
r2 = np.abs(dist2(ls_values, inputs_1, inputs_2))
r2 = np.where(r2==0.0, r2 + 1e-10, r2)
r = np.sqrt(r2)
cov = (1.0 + SQRT_5*r + (5.0/3.0)*r2) * np.exp(-SQRT_5*r)
return cov
def predict(gp, xstar):
x = gp.inputs
y = gp.values
mean = gp.mean.value
scale = gp.params['amp2'].value
#Se le anaden los noise_scale y el jitter para emular a la suma del matern+scale_kernel+noise_kernel de Spearmint.
cov_f_f = cov(gp.params['ls'].value, xstar) * scale + np.eye(len(xstar)) * gp.stability_noise_kernel.noise.value
#cov_f_f_o = original_cov(gp.params['ls'].value, xstar) * scale + np.eye(len(xstar)) * gp.stability_noise_kernel.noise.value
#print(np.abs(cov_f_f-cov_f_f_o))
#assert np.all(np.abs(cov_f_f-cov_f_f_o)) < 1e-10)
#if np.any(np.abs(cov_f_f-cov_f_f_o) > 1e-10):
#import pdb; pdb.set_trace();
cov_y_f = cross_cov(gp.params['ls'].value, x, xstar) * scale
#cov_y_f_o = original_cross_cov(gp.params['ls'].value, x, xstar) * scale
#print(np.abs(cov_y_f-cov_y_f_o))
#assert np.all(np.abs(cov_y_f-cov_y_f_o)) < 1e-10)
#if np.any(np.abs(cov_y_f-cov_y_f_o) > 1e-10):
#import pdb; pdb.set_trace();
cov_y_y = cov(gp.params['ls'].value, x) * scale + np.eye(len(y)) * gp.stability_noise_kernel.noise.value
#cov_y_y_o = original_cov(gp.params['ls'].value, x) * scale + np.eye(len(y)) * gp.stability_noise_kernel.noise.value
#print(np.abs(cov_y_y-cov_y_y_o))
#assert np.all(np.abs(cov_y_y-cov_y_y_o)) < 1e-10)
#if np.any(np.abs(cov_y_y-cov_y_y_o) > 1e-10):
#import pdb; pdb.set_trace();
InvMat = solve(cov_y_y, cov_y_f).T
pred_mean = mean + np.dot(InvMat, y - mean)
#pred_mean = mean + np.matmul(cov_y_f ,np.matmul(np.linalg.inv(cov_y_y), y - mean))
pred_cov = cov_f_f - np.dot(InvMat, cov_y_f)
#pred_cov = cov_f_f - np.matmul(cov_y_f.T, np.matmul(np.linalg.inv(cov_y_y), cov_y_f.T))
return pred_mean, pred_cov
def compute_unconstrained_variances_and_init_acq_fun(obj_models_dict, cand, con_models): #, log, fun_log):
unconstrainedVariances = dict()
constrainedVariances = dict()
acq = dict()
for obj in obj_models_dict:
unconstrainedVariances[ obj ] = predict(obj_models_dict[ obj ], cand)[ 1 ]
#fun_log('predict', log, {'obj_models_dict[ obj ]': obj_models_dict[ obj ], \
# 'cand' : cand, 'unconstrainedVariances[ obj ]': unconstrainedVariances[ obj ]})
unconstrainedVariances[ obj ] = unconstrainedVariances[ obj ] + np.eye(unconstrainedVariances[ obj ].shape[0]) * obj_models_dict[ obj ].noise_value()
for cons in con_models:
unconstrainedVariances[ cons ] = predict(con_models[ cons ], cand)[ 1 ]
#fun_log('predict', log, {'con_models[ cons ]': con_models[ cons ], \
# 'cand' : cand, 'unconstrainedVariances[ cons ]': unconstrainedVariances[ cons ]})
unconstrainedVariances[ cons ] = unconstrainedVariances[ cons ] + np.eye(unconstrainedVariances[ cons ].shape[0]) * con_models[ cons ].noise_value()
for t in unconstrainedVariances:
acq[t] = 0
return acq, unconstrainedVariances, constrainedVariances
def update_full_marginals(a):
n_obs = a['n_obs']
n_total = a['n_total']
n_pset = a['n_pset']
n_test = a['n_test']
objectives = a['objs']
constraints = a['cons']
all_tasks = objectives
all_constraints = constraints
n_objs = len(all_tasks)
n_cons = len(all_constraints)
ntask = 0
# Updating constraint distribution marginals.
#vTilde_back = defaultdict(lambda: np.zeros((n_total, n_total)))
#vTilde_cons_back = defaultdict(lambda: np.zeros((n_total, n_total)))
for cons in all_constraints:
# Summing all the factors into the diagonal. Reescribir.
vTilde_cons = np.diag(np.append(np.append(np.sum(a['a_c_hfhat'][ :, : , ntask ], axis = 1), \
np.sum(a['c_c_hfhat'][ :, : , ntask ], axis = 1) + a['ehfhat'][ :, ntask ]), \
np.sum(a['g_c_hfhat'][ :, : , ntask ], axis = 1)))
mTilde_cons = np.append(np.append(np.sum(a['b_c_hfhat'][ :, : , ntask ], axis = 1), \
np.sum(a['d_c_hfhat'][ :, : , ntask ], axis = 1) + a['fhfhat'][ :, ntask ]), \
np.sum(a['h_c_hfhat'][ :, : , ntask ], axis = 1))
# Natural parameter conversion and update of the marginal variance matrices.
a['Vinv_cons'][cons] = a['VpredInv_cons'][cons] + vTilde_cons
a['V_cons'][cons] = np.linalg.inv(a['VpredInv_cons'][cons] + vTilde_cons)
# Natural parameter conversion and update of the marginal mean vector.
a['m_nat_cons'][cons] = np.dot(a['VpredInv_cons'][cons], a['mPred_cons'][cons]) + mTilde_cons
a['m_cons'][cons] = np.dot(a['V_cons'][cons], a['m_nat_cons'][ cons ])
ntask = ntask + 1
ntask = 0
for obj in all_tasks:
vTilde = np.zeros((n_total,n_total))
vTilde = np.zeros((n_total,n_total))
diagVtilde = np.identity(n_total) * np.append(np.append(np.sum(a['ahfhat'][ :, : , ntask, 0, 0 ], axis = 1), \
np.sum(a['ahfhat'][ :, : , ntask, 1, 1 ], axis = 0) + \
np.sum(a['chfhat'][ :, : , ntask, 0, 0 ], axis = 1) + \
np.sum(a['chfhat'][ :, : , ntask, 1, 1 ], axis = 0) + \
np.sum(a['ghfhat'][ :, : , ntask, 1, 1 ], axis = 0)), \
np.sum(a['ghfhat'][ :, : , ntask, 0, 0 ], axis = 1))
#Building full matrices from blocks.
block_2 = a['chfhat'][ :, : , ntask, 0, 1 ] + a['chfhat'][ :, : , ntask, 1, 0 ].T
block_2 = np.hstack([np.zeros((n_pset, n_obs)), block_2])
block_2 = np.hstack([block_2, np.zeros((n_pset, n_test))])
block_2 = np.vstack([np.zeros((n_obs, n_total)), block_2])
block_2 = np.vstack([block_2, np.zeros((n_test, n_total))])
block_3 = a['ahfhat'][ :, :, ntask, 0, 1]
block_3 = np.hstack([np.zeros((n_obs, n_obs)), block_3])
block_3 = np.hstack([block_3, np.zeros((n_obs, n_test))])
block_3 = np.vstack([block_3, np.zeros((n_pset + n_test, n_total))])
block_4 = a['ahfhat'][ :, :, ntask, 0, 1].transpose()
block_4 = np.hstack([block_4, np.zeros((n_pset, n_pset+n_test))])
block_4 = np.vstack([np.zeros((n_obs, n_total)), block_4])
block_4 = np.vstack([block_4, np.zeros((n_test, n_total))])
block_5 = a['ghfhat'][ :, :, ntask, 0, 1]
block_5 = np.hstack([np.zeros((n_test, n_obs)), block_5])
block_5 = np.hstack([block_5, np.zeros((n_test, n_test))])
block_5 = np.vstack([np.zeros((n_obs+n_pset, n_total)), block_5])
block_6 = a['ghfhat'][ :, :, ntask, 0, 1].transpose()
block_6 = np.hstack([np.zeros((n_pset, n_obs + n_pset)), block_6])
block_6 = np.vstack([np.zeros((n_obs, n_total)), block_6])
block_6 = np.vstack([block_6, np.zeros((n_test, n_total))])
#Adding to final matrix all the blocks.
vTilde += diagVtilde
vTilde += block_2
vTilde += block_3
vTilde += block_4
vTilde += block_5
vTilde += block_6
a['Vinv'][obj] = a['VpredInv'][obj] + vTilde
a['V'][obj] = np.linalg.inv(a['VpredInv'][obj] + vTilde)
mTilde = np.append(np.append(np.sum(a['bhfhat'][ :, : , ntask, 0 ], axis = 1),
np.sum(a['bhfhat'][ :, : , ntask, 1 ], axis = 0) + np.sum(a['hhfhat'][ :, : , ntask, 1 ], axis = 0) +\
np.sum(a['dhfhat'][ :, : , ntask, 0 ], axis = 1) + np.sum(a['dhfhat'][ :, : , ntask, 1 ], axis = 0)), \
np.sum(a['hhfhat'][ :, : , ntask, 0 ], axis = 1))
a['m_nat'][obj] = np.dot(a['VpredInv'][obj], a['mPred'][obj]) + mTilde
a['m'][obj] = np.dot(a['V'][obj], a['m_nat'][ obj ])
ntask = ntask + 1
return a
def get_test_predictive_distributions(a):
n_obs = a['n_obs']
n_pset = a['n_pset']
n_test = a['n_test']
n_total = a['n_total']
q = len(a['objs'])
c = len(a['cons'])
predictive_distributions = {
'mf' : defaultdict(lambda: np.zeros(n_test)),
'vf' : defaultdict(lambda: np.zeros((n_test, n_test))),
'mc' : defaultdict(lambda: np.zeros(n_test)),
'vc' : defaultdict(lambda: np.zeros((n_test, n_test))),
}
for obj in a['objs'].keys():
predictive_distributions['mf'][ obj ] = a['m'][ obj ][ n_obs + n_pset : n_total ]
predictive_distributions['vf'][ obj ] = a['V'][ obj ][ n_obs + n_pset : n_total , n_obs + n_pset : n_total ]
for cons in a['cons'].keys():
predictive_distributions['mc'][ cons ] = a['m_cons'][ cons ][ n_obs + n_pset : n_total ]
predictive_distributions['vc'][ cons ] = a['V_cons'][ cons ][ n_obs + n_pset : n_total , n_obs + n_pset : n_total ]
return predictive_distributions, a
def compute_PPESMOC_approximation(predictionEP, obj_models_dict, con_models, unconstrainedVariances, constrainedVariances, acq):
predictionEP_obj = predictionEP[ 'vf' ]
predictionEP_cons = predictionEP[ 'vc' ]
# DHL changed fill_diag, because that was updating the a structure and screwing things up later on
for obj in obj_models_dict:
predictionEP_obj[ obj ] = predictionEP_obj[ obj ] + np.eye(predictionEP_obj[ obj ].shape[ 0 ]) * obj_models_dict[ obj ].noise_value()
constrainedVariances[ obj ] = predictionEP_obj[ obj ]
for cons in con_models:
predictionEP_cons[ cons ] = predictionEP_cons[ cons ] + np.eye(predictionEP_cons[ obj ].shape[ 0 ]) * con_models[ cons ].noise_value()
constrainedVariances[ cons ] = predictionEP_cons[ cons ]
# We only care about the variances because the means do not affect the entropy
# The summation of the acq of the tasks (t) is done in a higher method. Do no do it here.
for t in unconstrainedVariances:
# DHL replaced np.log(np.linalg.det()) to avoid precision errors
value = 0.5 * np.linalg.slogdet(unconstrainedVariances[t])[ 1 ] - 0.5 * np.linalg.slogdet(constrainedVariances[t])[ 1 ]
# We set negative values of the acquisition function to zero because the
# entropy cannot be increased when conditioning
value = np.max(value, 0)
acq[t] += value
return acq
def update_full_Factors_only_test_factors(a, damping, minimize=True, no_negative_variances_nor_nands = False, no_negatives = True):
# used to switch between minimizing and maximizing
sgn = -1.0 if minimize else 1.0
# We update the h factors
all_tasks = a['objs']
all_constraints = a['cons']
n_obs = a['n_obs']
n_pset = a['n_pset']
n_test = a['n_test']
n_total = a['n_total']
q = a['q']
c = a['c']
alpha = np.zeros(a['q'])
s = np.zeros(a['q'])
ratio_cons = np.zeros(c)
# First we update the factors corresponding to the observed data
# We compute an "old" distribution
# Data structures for objective npset ntest cavities (a, b).
m_pset = np.array([])
#m_pset = np.zeros((q, n_pset, n_test))
m_test = np.array([])
#m_test = np.zeros((q, n_pset, n_test))
v_pset = np.array([])
#v_pset = np.zeros((q, n_pset, n_test))
v_test = np.array([])
#v_test = np.zeros((q, n_pset, n_test))
v_cov = np.array([])
#v_cov = np.zeros((q, n_pset, n_test))
# Data structures for constraint npset nobs cavities (c_a, c_b).
c_m = np.array([])
#c_m = np.zeros((c, n_pset, n_test))
c_v = np.array([])
#c_v = np.zeros((c, n_pset, n_test))
# Update marginals: a['m'] , a['V']
#n_task = 0
for obj in all_tasks: #OK
m_test = np.append(m_test, np.tile(a['m'][ obj ][ n_obs + n_pset : n_total ], n_pset).reshape((n_pset, n_test))) #OK
#m_test[ n_task, :, : ] = np.tile(a['m'][ obj ][ n_obs + n_pset : n_total ], n_pset).reshape((n_pset, n_test))
m_pset = np.append(m_pset, np.tile(a['m'][ obj ][ n_obs : n_obs + n_pset ], n_test).reshape((n_test, n_pset)).T) #OK
#m_pset[ n_task, :, : ] = np.tile(a['m'][ obj ][ n_obs : n_obs + n_pset ], n_test).reshape((n_test, n_pset)).T
v_cov = np.append(v_cov, a['V'][ obj ][ n_obs : n_obs + n_pset, n_obs + n_pset : n_total ])
#v_cov[ n_task, :, : ] = a['V'][ obj ][ n_obs : n_obs + n_pset, n_obs + n_pset : n_total ]
v_test = np.append(v_test, np.tile(np.diag(a['V'][ obj ])[ n_obs + n_pset : n_total ], n_pset).reshape((n_pset, n_test))) #CASO 1: OK.
#v_test[ n_task, :, : ] = np.tile(np.diag(a['V'][ obj ])[ n_obs + n_pset : n_total ], n_pset).reshape((n_pset, n_test))
v_pset = np.append(v_pset, np.tile(np.diag(a['V'][ obj ])[ n_obs : n_obs + n_pset ], n_test).reshape((n_test, n_pset)).T) #CASO 2: OK.
#v_pset[ n_task, :, : ] = np.tile(np.diag(a['V'][ obj ])[ n_obs : n_obs + n_pset ], n_test).reshape((n_test, n_pset)).T
#n_task += 1
m_test = m_test.reshape((q, n_pset, n_test))
m_pset = m_pset.reshape((q, n_pset, n_test))
v_cov = v_cov.reshape((q, n_pset, n_test))
v_test = v_test.reshape((q, n_pset, n_test))
v_pset = v_pset.reshape((q, n_pset, n_test))
#n_task = 0
for cons in all_constraints: #OK
c_m = np.append(c_m, np.tile(a['m_cons'][ cons ][ n_obs + n_pset : n_total ], n_pset))
#c_m[ n_task, :, : ] = a['m_cons'][ cons ][ n_obs + n_pset : n_total ]
c_v = np.append(c_v, np.tile(np.diag(a['V_cons'][ cons ])[ n_obs + n_pset : n_total ], n_pset))
#c_v[ n_task, :, : ] = np.diag(a['V_cons'][ cons ])[ n_obs + n_pset : n_total ]
#n_task += 1
c_m = c_m.reshape((c, n_pset, n_test)) #OK
c_v = c_v.reshape((c, n_pset, n_test)) #OK
vTilde_test = a['ghfhat'][ :, :, :, 0, 0 ].T #OK
vTilde_pset = a['ghfhat'][ :, :, :, 1, 1 ].T #OK
vTilde_cov = a['ghfhat'][ :, :, :, 0, 1 ].T #OK
mTilde_test = a['hhfhat'][ :, :, :, 0 ].T #OK
mTilde_pset = a['hhfhat'][ :, :, :, 1 ].T #OK
vTilde_test_cons = a['g_c_hfhat'][:, :, :].T #OK
mTilde_test_cons = a['h_c_hfhat'][:, :, :].T #OK
# Obtaining cavities.
inv_v_test, inv_v_pset, inv_v_cov = two_by_two_symmetric_matrix_inverse(v_test, v_pset, v_cov) #OK
inv_c_v = 1.0 / c_v #OK
inv_vOld_test = inv_v_test - vTilde_test #OK
inv_vOld_pset = inv_v_pset - vTilde_pset #OK
inv_vOld_cov = inv_v_cov - vTilde_cov #OK
inv_c_vOld = inv_c_v - vTilde_test_cons #OK
vOld_test, vOld_pset, vOld_cov = two_by_two_symmetric_matrix_inverse(inv_vOld_test, inv_vOld_pset, inv_vOld_cov) #OK
c_vOld = 1.0 / inv_c_vOld #OK
mOld_test, mOld_pset = two_by_two_symmetric_matrix_product_vector(inv_v_test, inv_v_pset, inv_v_cov, m_test, m_pset) #OK
mOld_test = mOld_test - mTilde_test #OK
mOld_pset = mOld_pset - mTilde_pset #OK
mOld_test, mOld_pset = two_by_two_symmetric_matrix_product_vector(vOld_test, vOld_pset, vOld_cov, mOld_test, mOld_pset) #OK
c_mOld = c_vOld * (c_m / c_v - mTilde_test_cons)
# Computing factors.
s = vOld_pset + vOld_test - 2 * vOld_cov
s_cons = c_vOld
if np.any(vOld_pset < 0):
#raise npla.linalg.LinAlgError("Negative variance in the sqrt!")
vOld_pset = np.where(vOld_pset < 0, -vOld_pset, vOld_pset)
print('Careful!!! Negative variances have appear before sqrt!')
if np.any(vOld_test < 0):
vOld_test = np.where(vOld_test < 0, -vOld_test, vOld_test)
#raise npla.linalg.LinAlgError("Negative variance in the sqrt!")
print('Careful!!! Negative variances have appear before sqrt!')
if np.any(c_vOld < 0):
c_vOld = np.where(c_vOld < 0, -c_vOld, c_vOld)
#raise npla.linalg.LinAlgError("Negative value in the sqrt!")
print('Careful!!! Negative variances have appear before sqrt!')
alpha_cons = c_mOld / np.sqrt(c_vOld) #OK
scale = 1.0 - 1e-4
while np.any(s / (vOld_pset + vOld_test) < 1e-6): #OK
scale = scale**2
s = vOld_pset + vOld_test - 2 * vOld_cov * scale
s = np.where(s==0.0, 1.0, s)
ss = np.sqrt(s)
ss = np.where(ss==1.0, 1e-15, ss)
alpha = (mOld_test - mOld_pset) / ss * sgn #OK
log_phi = logcdf_robust(alpha)
log_phi_cons = logcdf_robust(alpha_cons) #OK
logZ_orig = log_1_minus_exp_x(np.sum(log_phi, axis = 0)) #OK
#Hay que sustituir este bloque de codigo por un np.where.
logZ_orig = np.where(logZ_orig == -np.inf, logcdf_robust(-np.min(alpha, axis = 0)), logZ_orig) #OK
logZ_term1 = np.sum(log_phi_cons, axis = 0) + logZ_orig #OK
logZ_term2 = log_1_minus_exp_x(np.sum(log_phi_cons, axis = 0)) #OK
logZ_term2 = np.where(logZ_term2 == -np.inf, logcdf_robust(-np.min(alpha_cons, axis = 0)), logZ_term2) #OK
max_value = np.maximum(logZ_term1, logZ_term2) #OK
logZ = my_log(np.exp(logZ_term1 - max_value) + np.exp(logZ_term2 - max_value)) + max_value
for i in range(q-1):
logZ = np.hstack((logZ, my_log(np.exp(logZ_term1 - max_value) + np.exp(logZ_term2 - max_value)) + max_value))
logZ = logZ.reshape((n_pset, q, n_test)).swapaxes(0, 1)
#logZ = np.tile(np.log(np.exp(logZ_term1 - max_value) + np.exp(logZ_term2 - max_value)) + \
#max_value, q).reshape((n_pset, q, n_test)).swapaxes(0, 1) #SOSPECHOSO, SE PUEDE SIMULAR.
logZ_cons = my_log(np.exp(logZ_term1 - max_value) + np.exp(logZ_term2 - max_value)) + max_value
for i in range(c-1):
logZ_cons = np.hstack((logZ_cons, my_log(np.exp(logZ_term1 - max_value) + np.exp(logZ_term2 - max_value)) + max_value))
logZ_cons = logZ_cons.reshape((n_pset, c, n_test)).swapaxes(0, 1)
#logZ_cons = np.tile(np.log(np.exp(logZ_term1 - max_value) + np.exp(logZ_term2 - max_value)) + \
#max_value, c).reshape((n_pset, c, n_test)).swapaxes(0, 1) #OK
log_phi_sum = np.sum(log_phi, axis = 0)
for i in range(q-1):
log_phi_sum = np.hstack((log_phi_sum, np.sum(log_phi, axis = 0)))
log_phi_sum = log_phi_sum.reshape((n_pset, q, n_test)).swapaxes(0, 1)
#log_phi_sum = np.tile(np.sum(log_phi, axis = 0), q).reshape((n_pset, q, n_test)).swapaxes(0, 1) #SOSPECHOSO, SE PUEDE SIMULAR.
log_phi_sum_cons = np.sum(log_phi_cons, axis = 0)
for i in range(q-1):
log_phi_sum_cons = np.hstack((log_phi_sum_cons, np.sum(log_phi_cons, axis = 0)))
log_phi_sum_cons = log_phi_sum_cons.reshape((n_pset, q, n_test)).swapaxes(0, 1)
#log_phi_sum_cons = np.tile(np.sum(log_phi_cons, axis = 0), q).reshape((n_pset, q, n_test)).swapaxes(0, 1) #OK
ratio = - np.exp(sps.norm.logpdf(alpha) - logZ + log_phi_sum - logcdf_robust(alpha) + log_phi_sum_cons)
logZ_orig_cons = logZ_orig
for i in range(c-1):
logZ_orig_cons = np.hstack((logZ_orig_cons, logZ_orig))
logZ_orig_cons = logZ_orig_cons.reshape((n_pset, c, n_test)).swapaxes(0, 1)
#logZ_orig_cons = np.tile(logZ_orig, c).reshape((n_pset, c, n_test)).swapaxes(0, 1) #OK?
log_phi_sum_cons = np.sum(log_phi_cons, axis = 0)
for i in range(c-1):
log_phi_sum_cons = np.hstack((log_phi_sum_cons, np.sum(log_phi_cons, axis = 0)))
log_phi_sum_cons = log_phi_sum_cons.reshape((n_pset, c, n_test)).swapaxes(0, 1)
#log_phi_sum_cons = np.tile(np.sum(log_phi_cons, axis = 0), c).reshape((n_pset, c, n_test)).swapaxes(0, 1) #OK
ratio_cons = np.exp(sps.norm.logpdf(alpha_cons) - logZ_cons + logZ_orig_cons + log_phi_sum_cons - logcdf_robust(alpha_cons)) - \
np.exp(sps.norm.logpdf(alpha_cons) - logZ_cons + log_phi_sum_cons - logcdf_robust(alpha_cons)) #OK
s = np.where(s==0.0, 1.0, s)
ss = np.sqrt(s)
ss = np.where(ss==1.0, 1e-15, ss)
dlogZdmfOld_test = ratio / ss * sgn
dlogZdmfOld_pset = ratio / ss * -1.0 * sgn
dlogZdmfOld_test2 = - ratio / s * (alpha + ratio)
dlogZdmfOld_pset2 = - ratio / s * (alpha + ratio)
dlogZdmfOld_cov2 = - ratio / s * (alpha + ratio) * -1.0
s_cons = np.where(s_cons==0.0, 1.0, s_cons)
sc = np.sqrt(s_cons)
sc = np.where(sc==1.0, 1e-15, sc)
dlogZdmcOld = ratio_cons / sc #OK
dlogZdmcOld2 = - ratio_cons / s_cons * (alpha_cons + ratio_cons) #OK
a_VfOld_times_dlogZdmfOld2 = vOld_test * dlogZdmfOld_test2 + vOld_cov * dlogZdmfOld_cov2 + 1.0
b_VfOld_times_dlogZdmfOld2 = vOld_test * dlogZdmfOld_cov2 + vOld_cov * dlogZdmfOld_pset2
c_VfOld_times_dlogZdmfOld2 = vOld_cov * dlogZdmfOld_test2 + vOld_pset * dlogZdmfOld_cov2
d_VfOld_times_dlogZdmfOld2 = vOld_cov * dlogZdmfOld_cov2 + vOld_pset * dlogZdmfOld_pset2 + 1.0
a_inv, b_inv, c_inv, d_inv = two_by_two_matrix_inverse(a_VfOld_times_dlogZdmfOld2, b_VfOld_times_dlogZdmfOld2, \
c_VfOld_times_dlogZdmfOld2, d_VfOld_times_dlogZdmfOld2)
vTilde_test_new = - (dlogZdmfOld_test2 * a_inv + dlogZdmfOld_cov2 * c_inv)
vTilde_pset_new = - (dlogZdmfOld_cov2 * b_inv + dlogZdmfOld_pset2 * d_inv)
vTilde_cov_new = - (dlogZdmfOld_test2 * b_inv + dlogZdmfOld_cov2 * d_inv)
v_1, v_2 = two_by_two_symmetric_matrix_product_vector(dlogZdmfOld_test2, \
dlogZdmfOld_pset2, dlogZdmfOld_cov2, mOld_test, mOld_pset) #OK
v_1 = dlogZdmfOld_test - v_1 #OK
v_2 = dlogZdmfOld_pset - v_2 #OK
mTilde_test_new = v_1 * a_inv + v_2 * c_inv #OK
mTilde_pset_new = v_1 * b_inv + v_2 * d_inv #OK
vTilde_cons = - dlogZdmcOld2 / (1.0 + dlogZdmcOld2 * c_vOld) #OK
mTilde_cons = (dlogZdmcOld - c_mOld * dlogZdmcOld2) / (1.0 + dlogZdmcOld2 * c_vOld) #OK
if no_negative_variances_nor_nands == True: #OK full.
#finite = np.logical_and(np.logical_and(np.logical_and(np.logical_and(np.isfinite(vTilde_test_new), np.isfinite(vTilde_pset_new)), \
#np.isfinite(vTilde_cov_new)), np.isfinite(mTilde_test_new)), np.isfinite(mTilde_pset_new))
#c_finite = np.logical_and(np.isfinite(vTilde_cons), np.isfinite(mTilde_cons))
#neg1 = np.where(np.logical_or(np.logical_not(finite), vTilde_test_new < 0))
#neg2 = np.where(np.logical_or(np.logical_not(finite), vTilde_pset_new < 0))
#c_neg = np.where(np.logical_or(np.logical_not(c_finite), vTilde_cons < 0))
o_cond = np.logical_or(vTilde_test_new < 0, vTilde_pset_new < 0, \
np.logical_not(np.logical_and(np.logical_and(np.logical_and(np.logical_and( \
np.isfinite(vTilde_test_new), np.isfinite(vTilde_pset_new)), np.isfinite(vTilde_cov_new)), \
np.isfinite(mTilde_test_new)), np.isfinite(mTilde_pset_new))))
c_cond = np.logical_or(np.logical_not(np.logical_and(np.isfinite(vTilde_cons), np.isfinite(mTilde_cons))), vTilde_cons < 0)
vTilde_test_new = np.where(o_cond, 0.0, vTilde_test_new)
vTilde_pset_new = np.where(o_cond, 0.0, vTilde_pset_new)
vTilde_cov_new = np.where(o_cond, 0.0, vTilde_cov_new)
mTilde_test_new = np.where(o_cond, 0.0, mTilde_test_new)
mTilde_pset_new = np.where(o_cond, 0.0, mTilde_pset_new)
vTilde_cons = np.where(c_cond, 0.0, vTilde_cons)
mTilde_cons = np.where(c_cond, 0.0, mTilde_cons)
#vTilde_test_new[ neg1 ] = 0.0
#vTilde_test_new[ neg2 ] = 0.0
#vTilde_pset_new[ neg1 ] = 0.0
#vTilde_pset_new[ neg2 ] = 0.0
#vTilde_cov_new[ neg1 ] = 0.0
#vTilde_cov_new[ neg2 ] = 0.0
#mTilde_test_new[ neg1 ] = 0.0
#mTilde_test_new[ neg2 ] = 0.0
#mTilde_pset_new[ neg1 ] = 0.0
#mTilde_pset_new[ neg2 ] = 0.0
#vTilde_cons[ c_neg ] = 0.0
#mTilde_cons[ c_neg ] = 0.0
# We do the actual update
g_c_hfHatNew = vTilde_cons #OK
h_c_hfHatNew = mTilde_cons #OK
ghfhat = np.array([])
ghfhata = np.array([])
hhfhat = np.array([])
g00 = vTilde_test_new.T * damping + (1 - damping) * a['ghfhat'][ :, :, :, 0, 0 ]
g01 = vTilde_cov_new.T * damping + (1 - damping) * a['ghfhat'][ :, :, :, 0, 1 ]
g10 = vTilde_cov_new.T * damping + (1 - damping) * a['ghfhat'][ :, :, :, 1, 0 ]
g11 = vTilde_pset_new.T * damping + (1 - damping) * a['ghfhat'][ :, :, :, 1, 1 ]
h0 = mTilde_test_new.T * damping + (1 - damping) * a['hhfhat'][ :, :, :, 0 ] #OK
h1 = mTilde_pset_new.T * damping + (1 - damping) * a['hhfhat'][ :, :, :, 1 ] #OK
#for tp in range(n_test):
# for pp in range(n_pset):
# for qp in range(q):
# #ES UN TILE! Segun la traza de error de autograd parece que un TILE la esta liando seriamente, y esta relacionado con ghfhat.
# ghfhat = np.append(ghfhat, g00[tp, pp, qp])
# ghfhat = np.append(ghfhat, g01[tp, pp, qp])
# ghfhat = np.append(ghfhat, g10[tp, pp, qp])
# ghfhat = np.append(ghfhat, g11[tp, pp, qp])
# hhfhat = np.append(hhfhat, np.array([h0[tp, pp, qp], h1[tp, pp, qp]])) #OK
ghfhat_new = np.stack((np.stack((g00, g01), axis = 3), np.stack((g01, g11), axis = 3)), axis = 3)
hhfhat_new = np.stack((h0, h1), axis = 3)
#ghfhat = ghfhat.reshape((n_test, n_pset, q, 2, 2))
#ghfhat = ghfhat.reshape((n_test, n_pset, q, 2, 2))
#hhfhat = hhfhat.reshape((n_test, n_pset, q, 2)) #OK
#a['ghfhat'] = ghfhat
#a['hhfhat'] = hhfhat
a['ghfhat'] = ghfhat_new
a['hhfhat'] = hhfhat_new
#a['ghfhat'][ :, :, :, 0, 0 ] = vTilde_test_new.T * damping + (1 - damping) * a['ghfhat'][ :, :, :, 0, 0 ]
#a['ghfhat'][ :, :, :, 1, 1 ] = vTilde_pset_new.T * damping + (1 - damping) * a['ghfhat'][ :, :, :, 1, 1 ]
#a['ghfhat'][ :, :, :, 0, 1 ] = vTilde_cov_new.T * damping + (1 - damping) * a['ghfhat'][ :, :, :, 0, 1 ]
#a['ghfhat'][ :, :, :, 1, 0 ] = vTilde_cov_new.T * damping + (1 - damping) * a['ghfhat'][ :, :, :, 1, 0 ]
#a['hhfhat'][ :, :, :, 0 ] = mTilde_test_new.T * damping + (1 - damping) * a['hhfhat'][ :, :, :, 0 ]
#a['hhfhat'][ :, :, :, 1 ] = mTilde_pset_new.T * damping + (1 - damping) * a['hhfhat'][ :, :, :, 1 ]
a['g_c_hfhat'] = g_c_hfHatNew.T * damping + (1 - damping) * a['g_c_hfhat'][ :, :, : ]
#a['g_c_hfhat'][ :, :, : ] = g_c_hfHatNew.T * damping + (1 - damping) * a['g_c_hfhat'][ :, :, : ]
a['h_c_hfhat'] = h_c_hfHatNew.T * damping + (1 - damping) * a['h_c_hfhat'][ :, :, : ]
#a['h_c_hfhat'][ :, :, : ] = h_c_hfHatNew.T * damping + (1 - damping) * a['h_c_hfhat'][ :, :, : ]
return a
def compute_acq_fun_wrt_test_points(X_test, obj_model_dict, con_models_dict, pareto_set, info_gps, tasks):#, log, fun_log):
#Execute necessary functionality for the test factors update and the recomputation of the marginals and PPESMOC.
acq, unconstrainedVariances, constrainedVariances = compute_unconstrained_variances_and_init_acq_fun \
(obj_model_dict, X_test, con_models_dict)#, log, fun_log)
#Pareto set samples loop.
acqs = {}
for ps in pareto_set.keys():
pareto_set_sample = pareto_set[ps]
info_gps_ps = info_gps[ps] #Verificar que los factores con los que empezamos son los que termina la ejecucion normal.
X, n_obs, n_pset, n_test, n_total = build_set_of_points_that_conditions_GPs(obj_model_dict, con_models_dict, \
pareto_set_sample, X_test)
mPred, Vpred, VpredInv, mPred_cons, Vpred_cons, VpredInv_cons = \
build_unconditioned_predictive_distributions(obj_model_dict, con_models_dict, X)
#Modify information of a according to previous computations.
q = len(obj_model_dict)
c = len(con_models_dict)
info_gps_ps['ghfhat'] = np.zeros((n_test, n_pset, q, 2, 2))
info_gps_ps['hhfhat'] = np.zeros((n_test, n_pset, q, 2))
info_gps_ps['g_c_hfhat'] = np.zeros((n_test, n_pset, c))
info_gps_ps['h_c_hfhat'] = np.zeros((n_test, n_pset, c))
info_gps_ps['m'] = defaultdict(lambda: np.zeros(n_total))
info_gps_ps['m_nat'] = defaultdict(lambda: np.zeros(n_total))
info_gps_ps['V'] = defaultdict(lambda: np.zeros((n_total, n_total)))
info_gps_ps['Vinv'] = defaultdict(lambda: np.zeros((n_total, n_total)))
info_gps_ps['m_cons'] = defaultdict(lambda: np.zeros(n_total))
info_gps_ps['m_nat_cons'] = defaultdict(lambda: np.zeros(n_total))
info_gps_ps['V_cons'] = defaultdict(lambda: np.zeros((n_total, n_total)))
info_gps_ps['Vinv_cons'] = defaultdict(lambda: np.zeros((n_total, n_total)))
info_gps_ps['n_obs'] = n_obs
info_gps_ps['n_pset'] = n_pset
info_gps_ps['n_test'] = n_test
info_gps_ps['n_total'] = n_total
info_gps_ps['mPred'] = mPred
info_gps_ps['VPred'] = Vpred
info_gps_ps['VpredInv'] = VpredInv
info_gps_ps['mPred_cons'] = mPred_cons
info_gps_ps['Vpred_cons'] = Vpred_cons
info_gps_ps['VpredInv_cons'] = VpredInv_cons
info_gps_ps['X'] = X
#Creo que hay que cambiar mas cosas en infogps, quiza como los factores de test? Pero no estoy seguro.
#Por precaucion, dejar asi.
#Execute EP modification of the test factors and the PPESMOC approximation.
#First we have to do an EP update marginals.
info_gps_ps = update_full_marginals(info_gps_ps)
info_gps_ps = update_full_Factors_only_test_factors(info_gps_ps, 0.1, \
minimize = True, no_negative_variances_nor_nands = True)
info_gps_ps = update_full_marginals(info_gps_ps)
predictionEP = get_test_predictive_distributions(info_gps_ps)[0]
#Hay que dar la suma de las adquisiciones tambien.
acqs[ps] = compute_PPESMOC_approximation(predictionEP, obj_model_dict, con_models_dict, \
unconstrainedVariances, constrainedVariances, acq)
#Sumar las adquisiciones de los puntos de pareto y dividirlas entre el numero de puntos de pareto.
final_acqs_dict = dict.fromkeys(acqs[acqs.keys()[0]].keys())
num_samples = len(acqs)
for ps_sample in acqs:
ps_acq = acqs[ps_sample]
for bb_acq in ps_acq:
if final_acqs_dict[bb_acq] is None:
final_acqs_dict[bb_acq] = ps_acq[bb_acq]
else:
final_acqs_dict[bb_acq] += ps_acq[bb_acq]
final_acqs_dict.update((x, y / num_samples) for x, y in final_acqs_dict.items())
#fun_log('BB acqs Autograd', log, {'acq' : final_acqs_dict})
total_acq = 0.0
for task in tasks:
total_acq += final_acqs_dict[ task ]
#Sumar las adquisiciones de cada BB.
#fun_log('total_acq Autograd', log, {'acq' : total_acq})
return total_acq
| 48.334168
| 165
| 0.575106
|
1f585fd19eccfea1eda7844562b17214f2448a0d
| 366
|
py
|
Python
|
setup.py
|
dbogdanov/beets-mpdadd
|
c6ca520714b0decffb66471188cfe260c89f4cd2
|
[
"MIT"
] | 6
|
2015-04-02T01:46:24.000Z
|
2021-10-11T08:23:56.000Z
|
setup.py
|
dbogdanov/beets-mpdadd
|
c6ca520714b0decffb66471188cfe260c89f4cd2
|
[
"MIT"
] | null | null | null |
setup.py
|
dbogdanov/beets-mpdadd
|
c6ca520714b0decffb66471188cfe260c89f4cd2
|
[
"MIT"
] | 1
|
2021-10-09T02:07:23.000Z
|
2021-10-09T02:07:23.000Z
|
from setuptools import setup
setup(
name='beets-mpdadd',
version='0.2',
description='beets plugin that adds query results to the current MPD playlist',
author='Daniel Greve',
author_email='danielgreve@users.noreply.github.com',
license='MIT',
platforms='ALL',
packages=['beetsplug'],
install_requires=['beets', 'python-mpd2'],
)
| 26.142857
| 83
| 0.68306
|
2827f813979752e4d8bf70c94402c779eabb7757
| 3,198
|
py
|
Python
|
jobs/models.py
|
MahmoudSafan/job-seeker
|
00c0fbbfd05c4ef7f78da87c254fedb51f018cb9
|
[
"MIT"
] | 1
|
2021-05-22T09:07:44.000Z
|
2021-05-22T09:07:44.000Z
|
jobs/models.py
|
MahmoudSafan/job-seeker
|
00c0fbbfd05c4ef7f78da87c254fedb51f018cb9
|
[
"MIT"
] | null | null | null |
jobs/models.py
|
MahmoudSafan/job-seeker
|
00c0fbbfd05c4ef7f78da87c254fedb51f018cb9
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.utils.text import slugify
# Create your models here.
jobType = (
("Architecture and Engineering Occupations","Architecture and Engineering Occupations"),
("Arts, Design, Entertainment, Sports, and Media Occupations","Arts, Design, Entertainment, Sports, and Media Occupations"),
("Building and Grounds Cleaning and Maintenance Occupations","Building and Grounds Cleaning and Maintenance Occupations"),
("Business and Financial Operations Occupations","Business and Financial Operations Occupations"),
("Community and Social Services Occupations","Community and Social Services Occupations"),
("Computer and Mathematical Occupations","Computer and Mathematical Occupations"),
("Construction and Extraction Occupations","Construction and Extraction Occupations"),
("Education, Training, and Library Occupations","Education, Training, and Library Occupations"),
("Farming, Fishing, and Forestry Occupations","Farming, Fishing, and Forestry Occupations"),
("Food Preparation and Serving Related Occupations","Food Preparation and Serving Related Occupations"),
("Healthcare Practitioners and Technical Occupations","Healthcare Practitioners and Technical Occupations"),
("Healthcare Support Occupations","Healthcare Support Occupations"),
("Installation, Maintenance, and Repair Occupations","Installation, Maintenance, and Repair Occupations"),
("Legal Occupations","Legal Occupations"),
("Life, Physical, and Social Science Occupations","Life, Physical, and Social Science Occupations"),
("Management Occupations","Management Occupations")
)
time =(
("part-time","part-time"),
("full-time","full-time")
)
def image_upload(instance,fileName): # image upload customize
imageName, extention = fileName.split(".")
return "jobImages/%s.%s"%(instance.id,extention)
class job(models.Model):
id = models.AutoField(primary_key= True,serialize=True)
title = models.CharField(max_length= 50 ,null= False)
gender = models.CharField(max_length=50, null = False)
job_type = models.CharField(max_length = 200,choices=jobType,default="Other")
description = models.TextField(max_length= 500, null = False)
location = models.CharField(max_length= 70, null = False)
vacancy = models.CharField(max_length = 200,choices=time,null=False)
salary = models.FloatField(default=0.0, null = False)
experiance = models.CharField(max_length = 50, null = False)
publishedAt = models.DateTimeField(auto_now_add=True)
slug = models.SlugField(blank= True, null= True)
image = models.ImageField(upload_to =image_upload)
def __str__(self):
return self.title
def save(self, *args, **kwargs):
self.slug = slugify(self.title)
super(job,self).save(*args,**kwargs)
'''
class Apply(models.Model):
name = models.CharField(max_length=50, blanck = True, null = True)
email = models.CharField(max_length=50, blanck = True, null = True)
website = models.URLField( max_length=200)
cvs = models.FileField(upload_to="cvs/")
description = models.TextField(max_length=500, null= True)
'''
| 43.216216
| 128
| 0.726079
|
48510a469f93c17d433e421704706169a0fe4bc0
| 2,736
|
py
|
Python
|
utils/KTS/cpd_auto.py
|
Lorna-Liu/ultrasound_vsumm_RL
|
469214c6c9ef7fb8ecdeb29b831307b5be877d5a
|
[
"MIT"
] | 17
|
2020-09-12T03:28:42.000Z
|
2021-11-15T12:10:31.000Z
|
utils/KTS/cpd_auto.py
|
Lorna-Liu/ultrasound_vsumm_RL
|
469214c6c9ef7fb8ecdeb29b831307b5be877d5a
|
[
"MIT"
] | 3
|
2020-10-16T08:52:05.000Z
|
2022-03-22T02:59:55.000Z
|
utils/KTS/cpd_auto.py
|
Lorna-Liu/ultrasound_vsumm_RL
|
469214c6c9ef7fb8ecdeb29b831307b5be877d5a
|
[
"MIT"
] | 1
|
2021-06-02T11:41:12.000Z
|
2021-06-02T11:41:12.000Z
|
import numpy as np
from KTS.cpd_nonlin import cpd_nonlin
def cpd_auto(K, ncp, vmax, desc_rate=1, **kwargs):
"""Main interface
Detect change points automatically selecting their number
K - kernel between each pair of frames in video
ncp - maximum ncp
vmax - special parameter
Optional arguments:
lmin - minimum segment length
lmax - maximum segment length
desc_rate - rate of descriptor sampling (vmax always corresponds to 1x)
Note:
- cps are always calculated in subsampled coordinates irrespective to
desc_rate
- lmin and m should be in agreement
---
Returns: (cps, costs)
cps - best selected change-points
costs - costs for 0,1,2,...,m change-points
Memory requirement: ~ (3*N*N + N*ncp)*4 bytes ~= 16 * N^2 bytes
That is 1,6 Gb for the N=10000.
"""
m = ncp #maximum ncp
(_, scores) = cpd_nonlin(K, m, backtrack=False, **kwargs)
N = K.shape[0]
N2 = N*desc_rate # length of the video before subsampling
penalties = np.zeros(m+1)
# Prevent division by zero (in case of 0 changes)
ncp = np.arange(1, m+1)
penalties[1:] = (vmax*ncp/(2.0*N2))*(np.log(float(N2)/ncp)+1)
costs = scores/float(N) + penalties
m_best = np.argmin(costs)
(cps, scores2) = cpd_nonlin(K, m_best, **kwargs)
return (cps, costs)
# ------------------------------------------------------------------------------
# Extra functions (currently not used)
def estimate_vmax(K_stable):
"""K_stable - kernel between all frames of a stable segment"""
n = K_stable.shape[0]
vmax = np.trace(centering(K_stable)/n)
return vmax
def centering(K):
"""Apply kernel centering"""
mean_rows = np.mean(K, 1)[:, np.newaxis]
return K - mean_rows - mean_rows.T + np.mean(mean_rows)
def eval_score(K, cps):
""" Evaluate unnormalized empirical score
(sum of kernelized scatters) for the given change-points """
N = K.shape[0]
cps = [0] + list(cps) + [N]
V1 = 0
V2 = 0
for i in range(len(cps)-1):
K_sub = K[cps[i]:cps[i+1], :][:, cps[i]:cps[i+1]]
V1 += np.sum(np.diag(K_sub))
V2 += np.sum(K_sub) / float(cps[i+1] - cps[i])
return (V1 - V2)
def eval_cost(K, cps, score, vmax):
""" Evaluate cost function for automatic number of change points selection
K - kernel between all frames
cps - selected change-points
score - unnormalized empirical score (sum of kernelized scatters)
vmax - vmax parameter"""
N = K.shape[0]
penalty = (vmax*len(cps)/(2.0*N))*(np.log(float(N)/len(cps))+1)
return score/float(N) + penalty
| 31.448276
| 80
| 0.592471
|
0eeab57c303b999e807e39f73e26981432a52bea
| 983
|
py
|
Python
|
setup.py
|
arnaudmiribel/st
|
85ba1902fe4ebc90662b16d3ef6ed1283781448b
|
[
"MIT"
] | 9
|
2021-11-22T15:04:26.000Z
|
2022-01-04T19:57:17.000Z
|
setup.py
|
arnaudmiribel/st
|
85ba1902fe4ebc90662b16d3ef6ed1283781448b
|
[
"MIT"
] | 1
|
2021-11-23T10:14:14.000Z
|
2021-12-13T21:52:50.000Z
|
setup.py
|
arnaudmiribel/st
|
85ba1902fe4ebc90662b16d3ef6ed1283781448b
|
[
"MIT"
] | 1
|
2022-01-02T21:13:59.000Z
|
2022-01-02T21:13:59.000Z
|
from setuptools import setup, find_packages
with open("requirements.txt") as f:
requirements = f.readlines()
long_description = "st is a CLI that helps you kick-off a new Streamlit \
project so you can start crafting the app as soon as possible!"
setup(
name="st-kickoff",
version="0.2",
author="Arnaud Miribel",
author_email="arnaudmiribel@gmail.com",
url="https://github.com/arnaudmiribel/st",
description="st is a CLI that helps you kick-off a new Streamlit project \
so you can start crafting the app as soon as possible!",
long_description=long_description,
long_description_content_type="text/markdown",
license="MIT",
packages=find_packages(),
entry_points={"console_scripts": ["st = source.main:go"]},
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
),
keywords="streamlit cli",
install_requires=requirements,
zip_safe=False,
)
| 33.896552
| 78
| 0.686673
|
afeffccb29bd40fdea8db2098b9cd49e1f36aaac
| 1,013
|
py
|
Python
|
tests/kyu_6_tests/test_prize_draw.py
|
the-zebulan/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | 40
|
2016-03-09T12:26:20.000Z
|
2022-03-23T08:44:51.000Z
|
tests/kyu_6_tests/test_prize_draw.py
|
akalynych/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | null | null | null |
tests/kyu_6_tests/test_prize_draw.py
|
akalynych/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | 36
|
2016-11-07T19:59:58.000Z
|
2022-03-31T11:18:27.000Z
|
import unittest
from katas.kyu_6.prize_draw import rank
class PrizeDrawTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(rank('Elijah,Chloe,Elizabeth,Matthew,Natalie,Jayden',
[1, 3, 5, 5, 3, 6], 2), 'Matthew')
def test_equals_2(self):
self.assertEqual(rank('COLIN,AMANDBA,AMANDAB,CAROL,PauL,JOSEPH',
[1, 4, 4, 5, 2, 1], 4), 'PauL')
def test_equals_3(self):
self.assertEqual(rank(
'Addison,Jayden,Sofia,Michael,Andrew,Lily,Benjamin',
[4, 2, 1, 4, 3, 1, 2], 4), 'Benjamin')
def test_equals_4(self):
self.assertEqual(rank('Lagon,Lily', [1, 5], 2), 'Lagon')
def test_equals_5(self):
self.assertEqual(rank(
'Addison,Jayden,Sofia,Michael,Andrew,Lily,Benjamin',
[4, 2, 1, 4, 3, 1, 2], 8), 'Not enough participants')
def test_equals_6(self):
self.assertEqual(rank('', [4, 2, 1, 4, 3, 1, 2], 6), 'No participants')
| 33.766667
| 79
| 0.579467
|
7bfbb1147c2db6cd52afe4a69c365343276f5cf3
| 4,135
|
py
|
Python
|
selenium/src/py/lib/docutils/parsers/rst/languages/es.py
|
epall/selenium
|
273260522efb84116979da2a499f64510250249b
|
[
"Apache-2.0"
] | 4
|
2015-10-26T01:43:04.000Z
|
2018-05-12T17:39:32.000Z
|
selenium/src/py/lib/docutils/parsers/rst/languages/es.py
|
hugs/selenium
|
9fa09c04294bd43099aa8461b027148ba81be7b7
|
[
"Apache-2.0"
] | 1
|
2021-06-18T00:45:52.000Z
|
2021-06-18T00:45:52.000Z
|
selenium/src/py/lib/docutils/parsers/rst/languages/es.py
|
hugs/selenium
|
9fa09c04294bd43099aa8461b027148ba81be7b7
|
[
"Apache-2.0"
] | 2
|
2016-07-11T20:19:31.000Z
|
2018-11-17T22:36:54.000Z
|
# -*- coding: utf-8 -*-
# Author: Marcelo Huerta San Martín
# Contact: richieadler@users.sourceforge.net
# Revision: $Revision: 4231 $
# Date: $Date: 2005-12-23 03:10:41 +0100 (Fri, 23 Dec 2005) $
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Spanish-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
u'atenci\u00f3n': 'attention',
u'atencion': 'attention',
u'precauci\u00f3n': 'caution',
u'precaucion': 'caution',
u'peligro': 'danger',
u'error': 'error',
u'sugerencia': 'hint',
u'importante': 'important',
u'nota': 'note',
u'consejo': 'tip',
u'advertencia': 'warning',
u'exhortacion': 'admonition',
u'exhortaci\u00f3n': 'admonition',
u'nota-al-margen': 'sidebar',
u'tema': 'topic',
u'bloque-de-lineas': 'line-block',
u'bloque-de-l\u00edneas': 'line-block',
u'literal-evaluado': 'parsed-literal',
u'firma': 'rubric',
u'ep\u00edgrafe': 'epigraph',
u'epigrafe': 'epigraph',
u'destacado': 'highlights',
u'cita-destacada': 'pull-quote',
u'combinacion': 'compound',
u'combinaci\u00f3n': 'compound',
u'contenedor': 'container',
#'questions': 'questions',
#'qa': 'questions',
#'faq': 'questions',
u'tabla': 'table',
u'tabla-vsc': 'csv-table',
u'tabla-csv': 'csv-table',
u'tabla-lista': 'list-table',
u'meta': 'meta',
#'imagemap': 'imagemap',
u'imagen': 'image',
u'figura': 'figure',
u'incluir': 'include',
u'sin-analisis': 'raw',
u'sin-an\u00e1lisis': 'raw',
u'reemplazar': 'replace',
u'unicode': 'unicode',
u'fecha': 'date',
u'clase': 'class',
u'rol': 'role',
u'rol-por-omision': 'default-role',
u'rol-por-omisi\u00f3n': 'default-role',
u'titulo': 'title',
u't\u00edtulo': 'title',
u'contenido': 'contents',
u'numseccion': 'sectnum',
u'numsecci\u00f3n': 'sectnum',
u'numeracion-seccion': 'sectnum',
u'numeraci\u00f3n-secci\u00f3n': 'sectnum',
u'notas-destino': 'target-notes',
u'cabecera': 'header',
u'pie': 'footer',
#'footnotes': 'footnotes',
#'citations': 'citations',
u'restructuredtext-test-directive': 'restructuredtext-test-directive'}
"""Spanish name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
u'abreviatura': 'abbreviation',
u'ab': 'abbreviation',
u'acronimo': 'acronym',
u'acronimo': 'acronym',
u'ac': 'acronym',
u'indice': 'index',
u'i': 'index',
u'subindice': 'subscript',
u'sub\u00edndice': 'subscript',
u'superindice': 'superscript',
u'super\u00edndice': 'superscript',
u'referencia-titulo': 'title-reference',
u'titulo': 'title-reference',
u't': 'title-reference',
u'referencia-pep': 'pep-reference',
u'pep': 'pep-reference',
u'referencia-rfc': 'rfc-reference',
u'rfc': 'rfc-reference',
u'enfasis': 'emphasis',
u'\u00e9nfasis': 'emphasis',
u'destacado': 'strong',
u'literal': 'literal', # "literal" is also a word in Spanish :-)
u'referencia-con-nombre': 'named-reference',
u'referencia-anonima': 'anonymous-reference',
u'referencia-an\u00f3nima': 'anonymous-reference',
u'referencia-nota-al-pie': 'footnote-reference',
u'referencia-cita': 'citation-reference',
u'referencia-sustitucion': 'substitution-reference',
u'referencia-sustituci\u00f3n': 'substitution-reference',
u'destino': 'target',
u'referencia-uri': 'uri-reference',
u'uri': 'uri-reference',
u'url': 'uri-reference',
u'sin-analisis': 'raw',
u'sin-an\u00e1lisis': 'raw',
}
"""Mapping of Spanish role names to canonical role names for interpreted text.
"""
| 33.346774
| 81
| 0.613543
|
9f72193fc076927104fe0c024654ffc9fb3894a9
| 2,262
|
py
|
Python
|
flask_project/campaign_manager/data_providers/_abstract_osmcha_provider.py
|
russbiggs/MapCampaigner
|
05b6c6eb1bdefbe0b60313a9d3cd1a0d2df2a148
|
[
"BSD-3-Clause"
] | 24
|
2018-10-05T06:39:11.000Z
|
2022-02-22T08:54:37.000Z
|
flask_project/campaign_manager/data_providers/_abstract_osmcha_provider.py
|
russbiggs/MapCampaigner
|
05b6c6eb1bdefbe0b60313a9d3cd1a0d2df2a148
|
[
"BSD-3-Clause"
] | 384
|
2017-05-17T07:50:02.000Z
|
2018-09-20T08:18:56.000Z
|
flask_project/campaign_manager/data_providers/_abstract_osmcha_provider.py
|
russbiggs/MapCampaigner
|
05b6c6eb1bdefbe0b60313a9d3cd1a0d2df2a148
|
[
"BSD-3-Clause"
] | 16
|
2017-05-11T08:52:19.000Z
|
2018-06-08T06:55:43.000Z
|
__author__ = 'Irwan Fathurrahman <irwan@kartoza.com>'
__date__ = '12/06/17'
import json
import requests
from app_config import Config
from urllib.error import HTTPError
from campaign_manager.utilities import multi_feature_to_polygon
from campaign_manager.data_providers._abstract_data_provider import (
AbstractDataProvider
)
class AbstractOsmchaProvider(AbstractDataProvider):
"""Data from osmcha"""
limit_per_page = 15
def get_api_url(self):
""" Return url of osmcha
:return: url
:rtype:str
"""
raise NotImplementedError()
def get_data(self,
geometry,
page=1,
start_date='',
end_date='',
max_page=None):
"""Get data from osmcha.
:param geometry: geometry that used by osmcha
:type geometry: dict
:param page: page that used by osmcha
:type page: int
:param start_date: start_date that used by osmcha
:type start_date: str
:param end_date: end_date that used by osmcha
:type end_date: str
:returns: A data from osmcha
:rtype: dict
"""
try:
if max_page:
self.limit_per_page = max_page
single_geometry = multi_feature_to_polygon(geometry)
payload_geometry = json.dumps(
single_geometry['features'][0]['geometry'])
payload = {
'page': page,
'page_size': self.limit_per_page,
'geometry': payload_geometry,
'date__gte': start_date,
'date__lte': end_date,
'area_lt': 2,
'is_suspect': True
}
request = requests.get(
self.get_api_url(),
params=payload,
timeout=60,
)
data = request.json()
except HTTPError as e:
raise e
return {
'max_page': '%d' % (data['count'] / self.limit_per_page),
'previous_page': int(page) - 1,
'current_page': page,
'next_page': int(page) + 1,
'data': data,
'total': data['count']
}
| 27.925926
| 69
| 0.538904
|
de20fca174f936a3137ecbb225e01d4fb750597d
| 2,844
|
py
|
Python
|
openstack/network/v2/rule.py
|
wangrui1121/huaweicloud-sdk-python
|
240abe00288760115d1791012d4e3c4592d77ad1
|
[
"Apache-2.0"
] | 43
|
2018-12-19T08:39:15.000Z
|
2021-07-21T02:45:43.000Z
|
openstack/network/v2/rule.py
|
wangrui1121/huaweicloud-sdk-python
|
240abe00288760115d1791012d4e3c4592d77ad1
|
[
"Apache-2.0"
] | 11
|
2019-03-17T13:28:56.000Z
|
2020-09-23T23:57:50.000Z
|
openstack/network/v2/rule.py
|
wangrui1121/huaweicloud-sdk-python
|
240abe00288760115d1791012d4e3c4592d77ad1
|
[
"Apache-2.0"
] | 47
|
2018-12-19T05:14:25.000Z
|
2022-03-19T15:28:30.000Z
|
# -*- coding:utf-8 -*-
# Copyright 2018 Huawei Technologies Co.,Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
from openstack import resource2
from openstack.network import network_service
class Rule(resource2.Resource):
resource_key = 'rule'
resources_key = 'rules'
base_path = '/lbaas/l7policies/%(policy_id)s/rules'
service = network_service.NetworkService()
_query_mapping = resource2.QueryParameters("id",
"tenant_id",
"admin_state_up",
"type",
"compare_type",
"invert",
"key",
"value"
)
allow_create = True
allow_get = True
allow_update = True
allow_delete = True
allow_list = True
# the rule id
id = resource2.Body("id")
# tenant id
tenant_id = resource2.Body("tenant_id")
# Management status: true/false.
# Instructions for use: Fixed to true
admin_state_up = resource2.Body("admin_state_up", type=bool, default=True)
# Matching content: Can be HOST_NAME, PATH
type = resource2.Body("type")
# Matching method:
# EQUAL_TO when type is HOST_NAME.
# REGEX when the type is PATH, STARTS_WITH, EQUAL_TO
compare_type = resource2.Body("compare_type")
# Whether the match is reversed, true/false.
# Instructions for use: Fixed to false. This field can be updated but will not take effect
invert = resource2.Body("invert", type=bool, default=False)
# Match the content key.
# Usage note: When the current match is HOST_NAME and PATH, this field does not take effect.
# This field can be updated but will not take effect
key = resource2.Body("key")
# The value of the matching content. Its value cannot contain spaces.
# Usage note: When the type is HOST_NAME, the value range is String(100).
# The string can only contain English letters, numbers, "-" or ".",
# and must start with a letter or number.
rule_value = resource2.Body("value")
# policy id the rule belongs to
policy_id = resource2.URI("policy_id")
| 43.090909
| 96
| 0.617089
|
347df4cd3812e8697c448eb87c32733ee6f8fb18
| 5,320
|
py
|
Python
|
Incident-Response/Tools/cyphon/cyphon/engines/elasticsearch/results.py
|
sn0b4ll/Incident-Playbook
|
cf519f58fcd4255674662b3620ea97c1091c1efb
|
[
"MIT"
] | 1
|
2021-07-24T17:22:50.000Z
|
2021-07-24T17:22:50.000Z
|
Incident-Response/Tools/cyphon/cyphon/engines/elasticsearch/results.py
|
sn0b4ll/Incident-Playbook
|
cf519f58fcd4255674662b3620ea97c1091c1efb
|
[
"MIT"
] | 2
|
2022-02-28T03:40:31.000Z
|
2022-02-28T03:40:52.000Z
|
Incident-Response/Tools/cyphon/cyphon/engines/elasticsearch/results.py
|
sn0b4ll/Incident-Playbook
|
cf519f58fcd4255674662b3620ea97c1091c1efb
|
[
"MIT"
] | 2
|
2022-02-25T08:34:51.000Z
|
2022-03-16T17:29:44.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2017-2019 ControlScan, Inc.
#
# This file is part of Cyphon Engine.
#
# Cyphon Engine is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# Cyphon Engine is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cyphon Engine. If not, see <http://www.gnu.org/licenses/>.
"""
Defines helper fuctions for formatting Elasticsearch query results.
============================== =============================================
Function Description
============================== =============================================
:func:`~get_count` Get the number of matching docs.
:func:`~get_doc_ids` Get the ids of matching docs.
:func:`~get_doc_info` Get the index, doc_type, and id of a doc.
:func:`~get_found_docs` Get docs from a multi-get request.
:func:`~get_hits` Get docs from a search result.
:func:`~get_results_and_count` Get docs and doc count from a search result.
:func:`~get_source` Get `_source` fields with added `_id` fields.
:func:`~get_source_with_id` Get a doc's `_source` field with its `_id`.
============================== =============================================
"""
def get_count(results):
"""Get the number of documents in an Elasticsearch search result.
Parameters
----------
results : dict
An Elasticsearch search result.
Returns
-------
int
The total number of documents matching the search criteria.
"""
return results['hits']['total']
def get_hits(results):
"""Return a list of documents from an Elasticsearch search result.
Parameters
----------
results : dict
An Elasticsearch search result.
Returns
-------
|list| of |dict|
The list of documents in the search result.
"""
return results['hits']['hits']
def get_doc_info(result):
"""Get the index, doc_type, and id associated with a document.
Parameters
----------
result : dict
A document from an Elasticsearch search result.
Returns
-------
dict
A dictionary with keys 'index', 'doc_type', and 'id',
containing the name of the index in which the document resides,
the doc_type of the document, and its id.
"""
return {
'index': result['_index'],
'doc_type': result['_type'],
'id': result['_id']
}
def get_doc_ids(results):
"""Get the ids of documents contained in a search result.
Parameters
----------
results : dict
An Elasticsearch search result.
Returns
-------
|list| of |str|
A list of ids of the documents contained in the search result.
"""
return [hit['_id'] for hit in get_hits(results)]
def get_source_with_id(result):
"""Return a document's `_source` field with its `_id` added.
Parameters
----------
result : dict
A document from a set of Elasticsearch search results.
Returns
-------
dict
The document's `_source` field updated with the doc's `_id`.
"""
result['_source'].update({'_id': result['_id']})
return result['_source']
def get_source(results):
"""Get the documents contained in a search result.
Extracts documents from an Elasticsearch search result, adds the
document id to each document, and returns the list of documents.
Parameters
----------
results : dict
An Elasticsearch search result.
Returns
-------
|list| of |dict|
The documents from the search result, with the doc ids added.
"""
return [get_source_with_id(hit) for hit in get_hits(results)]
def get_found_docs(results):
"""Get the documents found through a multi-get request.
This method weeds out failed matches from an mget result set.
Parameters
----------
results : dict
An Elasticsearch mget (find by id) result.
Returns
-------
|list| of |dict|
The documents found by the mget request, with the doc ids added.
"""
docs = results['docs']
matched_docs = []
for doc in docs:
if doc.get('found'):
formatted_doc = get_source_with_id(doc)
matched_docs.append(formatted_doc)
return matched_docs
def get_results_and_count(data):
"""Get the documents and document count from a search result.
Parameters
----------
data : dict
An Elasticsearch search result.
Returns
-------
dict
A dictionary with keys 'count' and 'results'. The 'count' value
is the total number of documents matching the search criteria.
The 'results' value is a list of documents from the search
result, with the doc ids added to each document.
"""
count = get_count(data)
results = get_source(data)
return {
'count': count,
'results': results,
}
| 27.142857
| 77
| 0.604135
|
9ace7246eb18ab3d18bfccb6112d2053e208e67b
| 31,008
|
py
|
Python
|
Cuba/covid19_processing.py
|
ypriverol/COVID19
|
7e8ab018f576ef7b67eee4a1a0114dd06c7a82ee
|
[
"CC0-1.0"
] | 1
|
2020-03-31T00:23:01.000Z
|
2020-03-31T00:23:01.000Z
|
Cuba/covid19_processing.py
|
ypriverol/COVID19
|
7e8ab018f576ef7b67eee4a1a0114dd06c7a82ee
|
[
"CC0-1.0"
] | null | null | null |
Cuba/covid19_processing.py
|
ypriverol/COVID19
|
7e8ab018f576ef7b67eee4a1a0114dd06c7a82ee
|
[
"CC0-1.0"
] | 2
|
2020-03-31T01:21:10.000Z
|
2020-03-31T20:50:11.000Z
|
from covid19_util import *
from matplotlib import dates as mdates
import pandas as pd
import requests
import scipy.optimize
import scipy.stats
from io import StringIO
import datetime
import geonamescache
class Covid19Processing:
def __init__(self):
self.dataframes = {}
gc = geonamescache.GeonamesCache()
gc_data = gc.get_countries()
self.country_metadata = {}
normalized_names = {
"Timor Leste": "East Timor",
"Vatican": "Vatican City",
"Democratic Republic of the Congo": "Congo (Kinshasa)",
"Republic of the Congo": "Congo (Brazzaville)",
"Cabo Verde": "Cape Verde"
}
for country_code in gc_data:
metadata = gc_data[country_code]
name = metadata["name"]
if name in normalized_names:
name = normalized_names[name]
population = metadata["population"]
area = metadata["areakm2"]
continent = continent_codes[metadata["continentcode"]]
self.country_metadata[name] = {
"population": population,
"area": area,
"continent": continent
}
for metric in data_urls.keys():
url = base_url + data_urls[metric] # Combine URL parts
r = requests.get(url) # Retrieve from URL
self.dataframes[metric] = pd.read_csv(StringIO(r.text), sep=",") # Convert into Pandas dataframe
# Display the first lines
display(Markdown("### Raw confirmed cases data, per region/state"))
with pd.option_context("display.max_rows", 10, "display.max_columns", 14):
display(self.dataframes["confirmed"])
def process(self, rows=20, debug=False):
# Clean up
for metric in data_urls.keys():
by_country = self.dataframes[metric].groupby("Country/Region").sum() # Group by country
dates = by_country.columns[2:] # Drop Lat/Long
# Convert to columns to matplotlib dates
by_country = by_country.loc[:, dates]
dates = pd.to_datetime(dates)
by_country.columns = dates
if metric == "confirmed":
# Early China data points
early_china_data = {
"1/17/20": 45,
"1/18/20": 62,
"1/20/20": 218
}
# Insert data points
for d, n in early_china_data.items():
by_country.loc["China", pd.to_datetime(d)] = n
# Retain chronological column order
by_country = by_country.reindex(list(sorted(by_country.columns)), axis=1)
by_country = by_country.fillna(0)
# Correct an odd blip in the Japanese data.
# From 2/5 to 2/7, the Johns Hopkins data for Japan goes 22, 45, 25.
# I assume that the 45 is incorrect. Replace with 23.5, halfway between the values for 2/5 and 2/7
by_country.loc["Japan", pd.to_datetime("2/06/20")] = 23.5
# Change some weird formal names to more commonly used ones
by_country = by_country.rename(index={"Republic of Korea": "South Korea",
"Holy See": "Vatican City",
"Iran (Islamic Republic of)": "Iran",
"Viet Nam": "Vietnam",
"Taipei and environs": "Taiwan",
"Republic of Moldova": "Moldova",
"Russian Federaration": "Russia",
"Korea, South": "South Korea",
"Taiwan*": "Taiwan",
"occupied Palestinian territory": "Palestine",
"West Bank and Gaza": "Palestine",
"Bahamas, The": "Bahamas",
"Cote d'Ivoire": "Ivory Coast",
"Gambia, The": "Gambia",
"US": "United States",
"Cabo Verde": "Cape Verde",
})
by_country.sort_index(inplace=True)
# Store processed results for metric
self.dataframes[metric + "_by_country"] = by_country.fillna(0).astype(int)
# Add in recovered and active
self.dataframes["recovered_by_country"] = pd.DataFrame(columns=self.dataframes["confirmed_by_country"].columns)
self.dataframes["active_by_country"] = pd.DataFrame(columns=self.dataframes["confirmed_by_country"].columns)
for country in self.dataframes["confirmed_by_country"].index:
self.dataframes["recovered_by_country"].loc[country, :] =\
self.simulate_country_history(country).recovered
self.dataframes["active_by_country"].loc[country, :] = \
self.dataframes["confirmed_by_country"].loc[country] - \
self.dataframes["deaths_by_country"].loc[country] - \
self.dataframes["recovered_by_country"].loc[country]
# Add in continents
for metric in list(data_urls.keys()) + ["recovered", "active"]:
continent_data = {}
by_country = self.dataframes[metric+"_by_country"]
for country in by_country.index:
if country in self.country_metadata:
continent = self.country_metadata[country]["continent"]
if continent in continent_data:
continent_data[continent] += by_country.loc[country, :]
else:
continent_data[continent] = by_country.loc[country, :]
elif metric == "confirmed" and debug:
print(f"Missing metadata for {country}!")
by_continent = pd.DataFrame(columns=by_country.columns)
for continent in continent_data:
by_continent.loc[continent, :] = continent_data[continent]
# Add in special regions
all_countries = by_country.sum()
by_continent.loc["All except China", :] = all_countries - by_country.loc["China", dates]
by_continent.loc["World", :] = all_countries
by_continent = by_continent
self.dataframes[metric + "_by_continent"] = by_continent.fillna(0).astype(int)
with pd.option_context("display.max_rows", rows, "display.min_rows", rows, "display.max_columns", 10):
display(Markdown("### Table of confirmed cases by country"))
display(self.dataframes["confirmed_by_country"])
display(Markdown("### Table of confirmed cases by continent/region"))
display(self.dataframes["confirmed_by_continent"])
def list_countries(self, columns=5):
confirmed_by_country = self.dataframes["confirmed_by_country"]
n_countries = len(confirmed_by_country)
display(Markdown(f"### {n_countries} countries/territories affected:\n"))
for i, k in enumerate(confirmed_by_country.index):
if len(k) > 19:
k = k[:18].strip() + "."
print(f"{k:20}", end=" " if (i + 1) % columns else "\n") # Every 5 items, end with a newline
def get_metric_data(self, metric):
if metric+"_by_country" in self.dataframes:
return pd.concat([self.dataframes[metric + "_by_country"], self.dataframes[metric + "_by_continent"]])
elif metric.startswith("new") and metric.split(" ")[1] in self.dataframes:
metric = metric.split(" ")[1]
return pd.concat([self.dataframes[metric + "_by_country"].diff(axis="columns"),
self.dataframes[metric + "_by_continent"].diff(axis="columns")]
)
else:
return
def get_country_data(self, country):
data = {}
for metric in self.dataframes.keys():
if not metric.endswith("by_country"):
continue
series = self.dataframes[metric].loc[country, :]
series.name = metric
data[metric] = series
return pd.DataFrame(data)
def get_new_cases_details(self, country, avg_n=5, median_n=3):
confirmed = self.get_metric_data("confirmed").loc[country]
deaths = self.get_metric_data("deaths").loc[country]
df = pd.DataFrame(confirmed)
df = df.rename(columns={country: "confirmed_cases"})
df.loc[:, "new_cases"] = np.maximum(0, confirmed.diff())
df.loc[:, "new_deaths"] = np.maximum(0, deaths.diff())
df = df.loc[df.new_cases > 1, :]
df.loc[:, "growth_factor"] = df.new_cases.diff() / df.new_cases.shift(1) + 1
df[~np.isfinite(df)] = np.nan
df.loc[:, "filtered_new_cases"] = \
scipy.ndimage.convolve(df.new_cases, np.ones(avg_n) / avg_n, origin=-avg_n // 2 + 1)
df.loc[:, "filtered_growth_factor"] = \
df.filtered_new_cases.diff() / df.filtered_new_cases.shift(1) + 1
df.filtered_growth_factor = scipy.ndimage.median_filter(df.filtered_growth_factor, median_n, mode="nearest")
return df
def plot(self, x_metric, y_metric, countries_to_plot, colormap=cm, use_log_scale=True,
min_cases=0, sigma=5, fixed_country_colors=False):
# layout/style stuff
markers = ["o", "^", "v", "<", ">", "s", "X", "D", "*", "$Y$", "$Z$"]
short_metric_to_long = {
"confirmed": "Confirmed cases",
"deaths": "Deaths",
"active": "Active cases",
"growth_factor": f"{sigma}-day avg growth factor",
"deaths/confirmed": "Case fatality",
"new confirmed": "Daily new cases"
}
fills = ["none", "full"] # alternate between filled and empty markers
length = None
m = len(markers)
cm = plt.cm.get_cmap(colormap)
n_colors = min(len(markers), len(countries_to_plot))
c_norm = matplotlib.colors.Normalize(vmin=0, vmax=n_colors)
scalar_map = matplotlib.cm.ScalarMappable(norm=c_norm, cmap=cm)
y_max = 0
ratio_parts = y_metric.split("/")
if self.get_metric_data(y_metric) is not None:
by_country = self.get_metric_data(y_metric)
elif y_metric == "growth_factor":
by_country = self.get_metric_data("confirmed")
elif y_metric == "active":
by_country = self.get_metric_data("confirmed") - \
self.get_metric_data("deaths") - \
self.get_metric_data("recovered")
by_country = by_country.dropna("columns").astype(int)
elif len(ratio_parts) == 2 and self.get_metric_data(ratio_parts[0]) is not None\
and self.get_metric_data(ratio_parts[1]) is not None:
denominator = self.get_metric_data(ratio_parts[1])
numerator = self.get_metric_data(ratio_parts[0])
numerator = numerator[denominator > min_cases]
denominator = denominator[denominator > min_cases]
by_country = numerator / denominator
if use_log_scale:
by_country += 0.0001
else:
print(f"{y_metric}' is an invalid y_metric!")
for i, country in enumerate(countries_to_plot):
if country in by_country.index:
country_data = by_country.loc[country].dropna()
if country not in by_country.index:
raise KeyError(f"Country '{country}' not found for {y_metric}!")
return
fill = fills[i % (2 * m) < m]
if fixed_country_colors:
color = string_to_color(country)
else:
color = scalar_map.to_rgba(i % n_colors)
if y_metric == "growth_factor":
df = self.get_new_cases_details(country, sigma)
if x_metric == "day_number":
df = df[df.iloc[:, 0] >= min_cases]
country_data = df.filtered_growth_factor
is_valid = sum(np.nan_to_num(country_data)) > 0
if x_metric == "calendar_date" and is_valid:
dates = [datetime.datetime.strftime(x, '%m/%d') for x in country_data.index]
plt.plot(country_data, marker=markers[i % m], label=country,
markersize=6, color=color, alpha=1, fillstyle=fill)
elif x_metric == "day_number":
if y_metric != "growth_factor" and len(ratio_parts) < 2:
country_data = country_data[country_data >= min_cases]
if country == "Outside China":
length = len(country_data)
day_nr = list(range(len(country_data)))
if is_valid:
plt.plot(day_nr, country_data, marker=markers[i % m], label=country,
markersize=6, color=color, alpha=1, fillstyle=fill)
if country_data.max() is not np.nan:
mx = country_data.max()
if not np.isscalar(mx):
m = m.max()
y_max = max(y_max, mx)
if y_metric in short_metric_to_long:
long_y_metric = short_metric_to_long[y_metric]
else:
long_y_metric = y_metric
plt.ylabel(long_y_metric, fontsize=14)
if x_metric == "calendar_date":
plt.xlabel("Date", fontsize=14)
if countries_to_plot[0] in self.dataframes["confirmed_by_country"].index:
title = f"COVID-19 {long_y_metric} over time"
if len(countries_to_plot) > 1:
title += " in selected countries"
else:
title += f" in {countries_to_plot[0]}"
else:
title = f"COVID-19 {long_y_metric} over time by continent"
plt.title(title, fontsize=18)
plt.ylim(0.9 * use_log_scale,
by_country.loc[countries_to_plot].max().max() * (2 - 0.9 * (not use_log_scale)))
firstweekday = pd.Timestamp(country_data.index[0]).dayofweek
n_days = (country_data.index.max() - country_data.index.min()).days + 1
n_weeks = n_days//5
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%m/%d/%Y'))
plt.gca().xaxis.set_major_locator(mdates.WeekdayLocator(interval=n_weeks//7, byweekday=firstweekday))
elif x_metric == "day_number":
if y_metric != "growth_factor" and len(ratio_parts) < 2:
floor = 10 ** math.floor(math.log(min_cases) / math.log(10))
floor = floor * (1 - (not use_log_scale)) * .9
ceil = 10 ** math.ceil(math.log(by_country.loc[countries_to_plot].max().max()) / math.log(10))
ceil = ceil * 1.2
plt.ylim(floor, ceil)
plt.xlim(0, length)
plt.xlabel("Day Number", fontsize=14)
if len(ratio_parts) < 2:
title = f"COVID-19 {long_y_metric}, from the first day with ≥{min_cases} cases"
else:
title = f"COVID-19 {long_y_metric} ratio in selected countries"
plt.title(title, fontsize=18)
plt.legend(frameon=False)
if y_metric == "growth_factor":
plt.gca().get_yaxis().set_major_formatter(matplotlib.ticker.FuncFormatter(lambda x, p: f"{x:,.2f}"))
elif len(ratio_parts) > 1:
plt.gca().get_yaxis().set_major_formatter(matplotlib.ticker.FuncFormatter(lambda x, p: f"{x:.1%}"))
if use_log_scale:
plt.yscale("log")
plt.gca().get_yaxis().set_major_formatter(matplotlib.ticker.FuncFormatter(lambda x, p: f"{x:.3f}"))
# plt.ylim((0.001, 0.12))
else:
pass
else:
set_y_axis_format(y_max, use_log_scale)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.gca().tick_params(which="major", color=light_grey)
set_plot_style()
plt.show()
def plot_pie(self, y_metrics, mode="country"):
plt.figure(figsize=(8, 8*len(y_metrics)))
for i, y_metric in enumerate(y_metrics):
plt.subplot(len(y_metrics), 1, i+1)
short_y = y_metric.split()[0]
data_for_pie = self.dataframes[short_y + "_by_"+mode].iloc[:, -1]
data_for_pie = data_for_pie[~data_for_pie.index.isin(["All except China", "World"])]
data_for_pie = data_for_pie.sort_values(ascending=False).fillna(0)
data_for_pie = np.maximum(0, data_for_pie)
country_names = [x if data_for_pie[x] / data_for_pie.sum() > .015 else "" for x in data_for_pie.index]
data_for_pie.plot.pie(startangle=270, autopct=get_pie_label, labels=country_names,
counterclock=False, pctdistance=.75,
colors=[string_to_color(x) for x in data_for_pie.index],
textprops={'fontsize': 12})
plt.ylabel("")
plt.title(f"{y_metric.capitalize()} as of {data_for_pie.name.date()}", fontsize=16)
plt.show()
def curve_fit(self, country="All except China", days=100, do_plot=True):
country_data = self.get_metric_data("confirmed").loc[country, :]
country_data = country_data[np.isfinite(country_data)]
x = np.arange(days + len(country_data))
current_day = country_data.index[-1]
if country in self.country_metadata:
population = self.country_metadata[country]["population"]
else:
population = 1e8
[L, k, x0], pcov = scipy.optimize.curve_fit(logistic_func, np.arange(len(country_data)),
country_data, maxfev=10000,
p0=[population*0.75, 0.5, np.clip(len(country_data), 1, 200 )],
bounds=([1, 0.1, 1], [population, 0.999, 600]),
method="trf"
)
# dates up to 100 days after start
model_date_list = [country_data.index[0] + datetime.timedelta(days=n) for n in range(0, days + len(country_data))]
model_date_list = [mdates.date2num(x) for x in model_date_list]
n = len(model_date_list)
logistic = logistic_func(x - 2, L, k, x0)
if do_plot:
plt.plot(country_data, label="Confirmed cases in " + country, markersize=3, zorder=1)
plt.plot(model_date_list[-days+1:], np.round(logistic)[-days+1:],
label=f"{L:.0f} / (1 + e^(-{k:.3f} * (x - {x0:.1f})))", zorder=1)
plt.legend(loc="upper left")
plt.title(f"Logistic curve fit and extrapolation for {country}", fontsize=18)
plt.xlabel("Date", fontsize=14)
plt.ylabel("Cases", fontsize=14)
plt.scatter(mdates.date2num(current_day), country_data[-1], s=20, c="C00", zorder=2)
plt.annotate(
f"{datetime.datetime.strftime(current_day, '%m/%d')}: {kmb_number_format(country_data[-1], 3, 0)}",
(mdates.date2num(current_day) - 1, country_data[-1]), fontsize=18, ha="right")
plt.scatter(model_date_list[-1], logistic[-1], s=20, c="C01", zorder=2)
plt.annotate(
f"{mdates.num2date(model_date_list[-1]).strftime('%m/%d')}: {kmb_number_format(logistic[-1], 3, 0)}",
(model_date_list[-1] - 1, logistic[-1] * 1.08), fontsize=18, ha="right")
set_y_axis_format(logistic.max(), True)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.gca().tick_params(which="both", color=light_grey)
for spine in plt.gca().spines.values():
spine.set_visible(False)
bottom, top = plt.ylim()
plt.ylim((bottom, max(bottom+1, top)))
set_plot_style()
plt.show()
def simulate_country_history(self, country, history_length=40, show_result=False):
if country in self.country_metadata:
population = self.country_metadata[country]["population"]
else:
population = np.nan
confirmed = self.dataframes["confirmed_by_country"].loc[country]
deaths = self.dataframes["deaths_by_country"].loc[country]
recovered = np.zeros(len(confirmed))
active = np.zeros(len(confirmed))
uninfected = (population - confirmed).fillna(population)
simulation = pd.DataFrame(data=[confirmed, deaths, recovered, active, uninfected],
index=["confirmed", "deaths", "recovered", "active", "uninfected"]).transpose()
simulation = simulation.fillna(0)
daily_death_distribution = death_chance_per_day(cfr=0.04, s=1.75, mu=0.5, sigma=10, length=history_length)
# reconstruct recovered and active case durations using fatality by case duration stats
for i, day in enumerate(confirmed.index):
case_history = np.zeros(history_length)
if i == 0:
new_recovered = 0
else:
new_cases = simulation.confirmed.diff()[i]
new_deaths = simulation.deaths.diff()[i]
new_deaths_by_case_duration = (new_deaths * daily_death_distribution)
case_history[0] = new_cases
case_history[1:] = simulation.iloc[i - 1, -history_length:-1]
case_history = case_history[:history_length]
case_history -= new_deaths_by_case_duration
case_history = np.maximum(0, case_history)
new_recovered = simulation.recovered.iloc[i - 1] + case_history[-1]
for h in range(history_length):
simulation.at[day.to_datetime64(), f"active_{h}"] = case_history[h]
simulation.at[day.to_datetime64(), f"recovered"] = new_recovered
simulation.at[day.to_datetime64(), f"active"] = sum(case_history)
simulation = simulation.fillna(0).astype(int)
if show_result:
display(Markdown(f"<br>**Last 10 days in {country}, showing a 7-day case duration history:**"))
display(simulation.iloc[-10:, :])
return simulation
def simulate_country(
self,
country, # name of the country to simulate
days=30, # how many days into the future to simulate
cfr=0.03, # case fatality rate, 0 to 1
critical_rate=0.18, # https://jamanetwork.com/journals/jama/fullarticle/2763188, https://www.cdc.gov/mmwr/volumes/69/wr/mm6912e2.htm
cfr_without_icu=0.80, # unknown but high
icu_beds_per_100k=30, # https://www.forbes.com/sites/niallmccarthy/2020/03/12/the-countries-with-the-most-critical-care-beds-per-capita-infographic
icu_availability=0.2, #
history_length=28, # Length of case history
sigma_death_days=6, # Standard deviation in mortality over time distribution
r0=2.5,
mitigation_trend=(1.0, 0.5), # Mitigation factor development over time. This will be linearly
# interpolated to a vector of length {days},
from_day=-1
):
population = self.country_metadata[country]["population"]
country_history = self.simulate_country_history(country, history_length)
if from_day != -1:
country_history = country_history[country_history.confirmed > 0]
country_history = country_history.iloc[:from_day+1, :]
available_icu_beds = int(population/100000 * icu_beds_per_100k * icu_availability)
daily_mitigation = np.interp(np.linspace(0, 1, days),
np.linspace(0, 1, len(mitigation_trend)),
mitigation_trend)
daily_death_chance = death_chance_per_day(cfr, 1.75, 0.5, sigma_death_days, history_length, do_plot=False)
# daily_death_chance_no_icu = death_chance_per_day(cfr_without_icu, 1.75, 0.5,
# sigma_death_days, history_length, do_plot=False)
# https://www.jwatch.org/na51083/2020/03/13/covid-19-incubation-period-update
# https://www.medrxiv.org/content/10.1101/2020.03.05.20030502v1.full.pdf
daily_transmission_chance = scipy.stats.norm.pdf(np.linspace(0, history_length, history_length+1),
loc=4.5, scale=1.6)
today = country_history.index[-1]
for d in range(days):
# column shortcuts
confirmed = country_history.confirmed
deaths = country_history.deaths
recovered = country_history.recovered
case_history = country_history.iloc[-1, -history_length:].copy()
last_day = confirmed.index[-1]
next_day = last_day + pd.DateOffset(1)
current_alive = population - deaths.iloc[-1]
current_uninfected = int(np.maximum(0, population - confirmed.iloc[-1]))
current_uninfected_ratio = current_uninfected / current_alive
current_mitigation = daily_mitigation[d]
# Infect
r_eff = r0 * current_mitigation * current_uninfected_ratio
new_cases = 0
for case_duration in range(history_length):
new_cases_for_case_duration = np.random.binomial(case_history[case_duration],
r_eff*daily_transmission_chance[case_duration])
new_cases += int(round(new_cases_for_case_duration))
# Deaths
new_deaths = 0
for case_duration in range(history_length):
cases = case_history[case_duration]
# TODO: assign patients to mild or critical only once
# critical_patients = (critical_rate * cases).round()
# critical_patients_in_icu = min(available_icu_beds, critical_patients)
# critical_patients_no_icu = max(0, critical_patients - available_icu_beds)
non_icu_patients = cases # - critical_patients
deaths_for_case_duration = np.random.binomial(non_icu_patients, # + critical_patients_in_icu,
daily_death_chance[case_duration])
# deaths_for_case_duration += np.random.binomial(critical_patients_no_icu,
# daily_death_chance_no_icu[case_duration])
case_history[case_duration] -= deaths_for_case_duration
new_deaths += deaths_for_case_duration
# Recoveries
new_recovered = case_history[-1]
# Shift case history
case_history[1:] = case_history[:-1]
case_history.iloc[0] = new_cases
country_history.at[next_day, "confirmed"] = confirmed.loc[last_day] + new_cases
country_history.at[next_day, "deaths"] = deaths.loc[last_day] + new_deaths
country_history.at[next_day, "recovered"] = recovered.loc[last_day] + new_recovered
country_history.at[next_day, "active"] = case_history.sum()
country_history.at[next_day, "uninfected"] = current_uninfected
country_history.iloc[-1, -history_length:] = case_history
return country_history, today
def plot_simulation(self, country, days, mitigation_trend, cfr=0.02, r0=2.5,
history_length=30, use_log_scale=True, scenario_name="", from_day=-1):
simulation, today = self.simulate_country(country=country, days=days, cfr=cfr,
mitigation_trend=mitigation_trend,
r0=r0,
history_length=history_length,
from_day=from_day)
plt.figure(figsize=(13, 8))
metrics = ["confirmed cases", "deaths", "active cases", "recovered cases"]
c = ["tab:blue", "r", "tab:orange", "limegreen", "tab:purple"]
for i, metric in enumerate(metrics):
short_metric = metric.split()[0]
plt.plot(simulation.loc[:today, short_metric], c=c[i], label=f"{metric.capitalize()}")
plt.plot(simulation.loc[today:, short_metric], "-.", c=c[i], alpha=0.75)
plt.plot(simulation.loc[today - pd.DateOffset(1):, "confirmed"].diff(), "-.", c=c[i + 1], alpha=0.75)
plt.plot(simulation.loc[:today, "confirmed"].diff(), c=c[-1], label="Daily new cases")
plt.legend(loc="upper left")
set_y_axis_format(simulation.loc[:, "confirmed"].max().max(), log=use_log_scale)
title = f"{days}-day Covid-19 simulation, {country}"
if scenario_name:
title += ": " + scenario_name
plt.suptitle(title, fontsize=20, y=1.03)
plt.tight_layout()
set_plot_style()
plt.show()
simulation = simulation.astype(int)
display(Markdown(f"### {scenario_name} final tally:"))
peak_active = simulation.active.max()
peak_active_date = simulation.active[simulation.active == simulation.active.max()].index[0].date()
print(f"Confirmed: {kmb_number_format(simulation.confirmed[-1], 3 , False)},\n"
f"Deaths: {kmb_number_format(simulation.deaths[-1], 3 , False)},\n"
f"Recovered: {kmb_number_format(simulation.recovered[-1], 3 , False)},\n"
f"Peak active: {kmb_number_format(peak_active, 3, False)} at {peak_active_date},\n"
f"Uninfected: {kmb_number_format(simulation.uninfected[-1], 3 , False)}"
)
return simulation
def country_highlight(self, country):
metrics = ["new_cases", "new_deaths"]
country_data = self.get_new_cases_details(country).round(2)[metrics]
display(country_data.tail(7))
for metric in metrics:
data = country_data[metric]
plt.plot(country_data.index, data, label=metric.capitalize().replace("_", " "))
plt.title(f"{country} daily changes as of {country_data.index[-1].date()}", fontsize=20)
set_plot_style()
plt.legend()
set_y_axis_format(country_data[metrics].max().max(), log=True)
plt.show()
| 51.08402
| 162
| 0.568982
|
f1344a2b3883e50b8e0dde0c7f57e264f2d2c2a5
| 75
|
py
|
Python
|
futurecashflow/__init__.py
|
maka89/FutureCashFlow
|
a240f8efd973854f02394338c390fb7c3432437b
|
[
"MIT"
] | 1
|
2022-01-11T16:05:20.000Z
|
2022-01-11T16:05:20.000Z
|
futurecashflow/__init__.py
|
maka89/FutureCashFlow
|
a240f8efd973854f02394338c390fb7c3432437b
|
[
"MIT"
] | null | null | null |
futurecashflow/__init__.py
|
maka89/FutureCashFlow
|
a240f8efd973854f02394338c390fb7c3432437b
|
[
"MIT"
] | null | null | null |
from .core_layers import *
from .meta_layers import *
from .model import *
| 18.75
| 26
| 0.76
|
420e3996425093c8590924e2ef792dbab2cb2b13
| 3,848
|
py
|
Python
|
samples/deepbench/conv_overhead_caffe2.py
|
khoaideptrai/deep500
|
0953038f64bc73c8d41d01796e07d3a23ca97822
|
[
"BSD-3-Clause"
] | 90
|
2019-01-02T22:49:08.000Z
|
2022-02-17T21:11:38.000Z
|
samples/deepbench/conv_overhead_caffe2.py
|
khoaideptrai/deep500
|
0953038f64bc73c8d41d01796e07d3a23ca97822
|
[
"BSD-3-Clause"
] | 4
|
2019-02-14T16:19:06.000Z
|
2022-01-11T17:54:42.000Z
|
samples/deepbench/conv_overhead_caffe2.py
|
khoaideptrai/deep500
|
0953038f64bc73c8d41d01796e07d3a23ca97822
|
[
"BSD-3-Clause"
] | 24
|
2019-01-09T18:09:44.000Z
|
2022-01-10T13:04:42.000Z
|
import time
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import workspace, core, model_helper, utils, brew
import deepbench
AVG_OVER = 100
RUNS = 30
device_option = core.DeviceOption(caffe2_pb2.CUDA) # device = core.DeviceOption(caffe2_pb2.CPU)
print('############ Convolution ############')
print('Vanilla Caffe2')
# Native Caffe2 results
with open('0_cf2_conv_deepbench.log', 'w') as fp:
fp.write('n,c,h,w,k,r,s,h_stride,w_stride,time\n')
for test in deepbench.conv_training_set:
print(test)
try:
with core.DeviceScope(device_option):
m = model_helper.ModelHelper(name="test_net")
# Create Pytorch "model"
X = np.random.rand(test.n, test.c, test.h, test.w).astype(np.float32)
W = np.random.rand(test.k, test.c, test.r, test.s).astype(np.float32)
B = np.random.rand(test.k).astype(np.float32)
workspace.FeedBlob("X", X, device_option=device_option)
workspace.FeedBlob("W", W, device_option=device_option)
workspace.FeedBlob("B", B, device_option=device_option)
order = "NCHW"
m.net.Conv(["X", "W", "B"], ["Y"],
kernels=[test.r,test.s],
strides=[test.hstride, test.wstride],
pads=[test.pad_h, test.pad_w] * 2)
times = []
# Warmup run
workspace.RunNetOnce(m.param_init_net)
workspace.RunNetOnce(m.net)
for i in range(RUNS):
s = time.time()
for j in range(AVG_OVER):
workspace.RunNetOnce(m.net)
e = time.time()
times.append((e - s) / AVG_OVER)
except Exception as ex:
print('Exception:', ex)
times = [-1.0]
with open('0_cf2_conv_deepbench.log', 'a') as fp:
fp.writelines([
'{test.n},{test.c},{test.h},{test.w},{test.k},{test.r},{test.s},{test.hstride},{test.wstride},{time:.15f}\n'.format(
test=test, time=time) for time in times])
##############################################################################
print('Deep500 Caffe2 - test_nativeop_*')
# Deep500 Caffe2
import deep500 as d5
from deep500.frameworks import caffe2 as d5cf2
with open('0_d5cf2_conv_deepbench.log', 'w') as fp:
fp.write('n,c,h,w,k,r,s,h_stride,w_stride,time,l2_error,max_error\n')
for test in deepbench.conv_training_set:
print(test)
try:
with core.DeviceScope(device_option):
m = model_helper.ModelHelper(name="test_net")
# Create Pytorch "model"
X = np.random.rand(test.n, test.c, test.h, test.w).astype(np.float32)
W = np.random.rand(test.k, test.c, test.r, test.s).astype(np.float32)
B = np.random.rand(test.k).astype(np.float32)
workspace.FeedBlob("W", W, device_option=device_option)
workspace.FeedBlob("B", B, device_option=device_option)
m.net.Conv(["X", "W", "B"], ["Y"],
kernels=[test.r, test.s],
strides=[test.hstride, test.wstride],
pads=[test.pad_h, test.pad_w] * 2)
times, = d5cf2.test_nativeop_forward(m, [("X", X)], [("Y", None)],
metrics=[d5.WallclockTime(RUNS * AVG_OVER, AVG_OVER)])
except Exception as ex:
print('Exception:', ex)
times = [-1.0]
with open('0_d5cf2_conv_deepbench.log', 'a') as fp:
fp.writelines(['{test.n},{test.c},{test.h},{test.w},'
'{test.k},{test.r},{test.s},'
'{test.hstride},{test.wstride},'
'{time:.15f}\n'.format(test=test, time=time)
for time in times])
| 38.48
| 142
| 0.543139
|
6a26cd48a13e381dc99dfa88325324cdadd050da
| 1,346
|
py
|
Python
|
perceptron/save_load.py
|
KSpenko/perceptron
|
d02f6cb1e9d024156e6db2291a448ccfefaf8219
|
[
"MIT"
] | null | null | null |
perceptron/save_load.py
|
KSpenko/perceptron
|
d02f6cb1e9d024156e6db2291a448ccfefaf8219
|
[
"MIT"
] | null | null | null |
perceptron/save_load.py
|
KSpenko/perceptron
|
d02f6cb1e9d024156e6db2291a448ccfefaf8219
|
[
"MIT"
] | 1
|
2021-02-25T22:29:25.000Z
|
2021-02-25T22:29:25.000Z
|
import xlsxwriter
import openpyxl
import numpy as np
from perceptron import Perceptron
def save_model(model, file_name):
""" save model weights and biases in excel format """
workbook = xlsxwriter.Workbook(file_name)
for i in range(model.nLayers-1):
worksheet = workbook.add_worksheet("Weights "+str(i))
for j in range(len(model.weights[i])):
for k in range(len(model.weights[i][j])):
worksheet.write(j, k, model.weights[i][j][k])
worksheet = workbook.add_worksheet("Biases "+str(i))
for j in range(len(model.biases[i])):
worksheet.write(j, 0, model.biases[i][j])
workbook.close()
def load_xlsx(file_name, layout):
""" import model weights and biases from excel file """
model = Perceptron(layout)
wb = openpyxl.load_workbook(file_name)
sheets = wb.sheetnames
weights = []
biases = []
for i in range(model.nLayers-1):
sheet = wb[sheets[i*2+1]]
temp_bias = []
for j in range(layout[i+1]):
temp_bias.append(np.float64(sheet.cell(row=j+1, column=1).value))
biases.append(np.array(temp_bias))
sheet = wb[sheets[i*2]]
temp_weight = np.empty((layout[i+1], layout[i]))
for j in range(layout[i+1]):
for k in range(layout[i]):
temp_weight[j, k] = np.float64(sheet.cell(row=j+1, column=k+1).value)
weights.append(np.array(temp_weight))
model.weights=weights
model.biases=biases
return model
| 32.047619
| 73
| 0.700594
|
df91e15839d7cb5393f83e1989d3d022e989dea7
| 8,983
|
py
|
Python
|
scripts/filter/gather_filtering_stats.py
|
mahajrod/MAVR
|
4db74dff7376a2ffe4426db720b241de9198f329
|
[
"MIT"
] | 10
|
2015-04-28T14:15:04.000Z
|
2021-03-15T00:07:38.000Z
|
scripts/filter/gather_filtering_stats.py
|
mahajrod/MAVR
|
4db74dff7376a2ffe4426db720b241de9198f329
|
[
"MIT"
] | null | null | null |
scripts/filter/gather_filtering_stats.py
|
mahajrod/MAVR
|
4db74dff7376a2ffe4426db720b241de9198f329
|
[
"MIT"
] | 6
|
2017-03-16T22:38:41.000Z
|
2021-08-11T00:22:52.000Z
|
#!/usr/bin/env python
__author__ = 'Sergei F. Kliver'
import os
import argparse
import numpy as np
from RouToolPa.GeneralRoutines import FileRoutines
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--sample_directory", action="store", dest="samples_dir", required=True,
type=lambda s: FileRoutines.check_path(os.path.abspath(s)),
help="Directory with samples")
parser.add_argument("-s", "--samples", action="store", dest="samples",
help="Comma-separated list of subdirectories(one per sample) to handle. "
"If not set all subdirectories will be considered as containing samples."
"In sample directory should one(in case SE reads) or two(in case PE reads) files."
"Filenames should should contain '_1.fq' or '_1.fastq' for forward(left) reads, "
" '_2.fq' or '_2.fastq' for reverse(right) reads and '.fq' or '.fastq' for SE reads")
parser.add_argument("-o", "--output_dir", action="store", dest="output_dir",
type=lambda s: FileRoutines.check_path(os.path.abspath(s)),
default="./", help="Directory to write output. Default: current directory")
"""
#parser.add_argument("-t", "--threads", action="store", dest="threads", default=1, type=int,
# help="Number of threads to use in Trimmomatic. Default - 1.")
parser.add_argument("-q", "--average_quality_threshold", action="store", dest="average_quality_threshold", default=15,
type=int,
help="Quality threshold for sliding window. Works only if -q/--average_quality_threshold is set"
"Default - 15.")
parser.add_argument("-u", "--score_type", action="store", dest="score_type", default="phred64",
help="Phred quality score type. Allowed: phred33, phred64. Default: phred64")
parser.add_argument("-n", "--name_type", action="store", dest="name_type", default="short",
help="Type of read name. Required to gather per tile filtering statistics. Default: short")
"""
args = parser.parse_args()
samples = args.samples.split(",") if args.samples else sorted(os.listdir(args.samples_dir))
FileRoutines.safe_mkdir(args.output_dir)
overall_stat_file = "%s/overall_samples.stat" % args.output_dir
overall_stat_fd = open(overall_stat_file, "w")
overall_stat_fd.write("#Sample_id\tTotal_pairs\tRetained_pairs\tRetained_pairs_percent\tMin_pairs_retained_in_tiles\n")
for sample in samples:
print("Handling %s" % sample)
sample_dir = "%s%s/" % (args.samples_dir, sample)
sample_out_dir = "%s%s/" % (args.output_dir, sample)
FileRoutines.safe_mkdir(sample_out_dir)
files_from_sample_dir = sorted(os.listdir(sample_dir))
stat_files_from_sample_dir = []
prefix_list = []
for filename in files_from_sample_dir:
if ".stat" == filename[-5:]:
stat_files_from_sample_dir.append("%s%s" % (sample_dir, filename))
prefix_list.append("%s%s" % (sample_out_dir, filename[:-5]))
number_of_stat_files = len(stat_files_from_sample_dir)
percent_total_sample_stat_file = "%s/%s.sample.percent.stats" % (sample_out_dir, sample)
sample_stat_fd = open(percent_total_sample_stat_file, "w")
sample_stat_fd.write("Read_group\t"
"Paires_retained\tForward_only_retained\tReverse_only_retained\tPairs_discarded\t"
"Tile_min_pairs_retained\tTile_min_forward_only_retained\t"
"Tile_min_reverse_only_retained\tTile_min_pairs_discarded\t"
"Tile_max_pairs_retained\tTile_max_forward_only_retained\t"
"Tile_max_reverse_only_retained\tTile_max_pairs_discarded\t"
"Tile_mean_pairs_retained\tTile_mean_forward_only_retained\t"
"Tile_mean_reverse_only_retained\tTile_mean_pairs_discarded\t"
"Tile_median_pairs_retained\tTile_median_forward_only_retained\t"
"Tile_median_reverse_only_retained\tTile_median_pairs_discarded\n")
total_reads_sample_stats = []
min_percent_retained_pairs_in_tile_list = []
for stat_file_index in range(0, number_of_stat_files):
percent_total_stat_file = "%s.total.percent.stats" % prefix_list[stat_file_index]
percent_tile_stat_file = "%s.tile.percent.stats" % prefix_list[stat_file_index]
tile_description_list = []
total_reads_list = []
tile_stats_list = []
with open(stat_files_from_sample_dir[stat_file_index], "r") as stat_fd:
try:
line = stat_fd.readline()
while line[:13] != "instrument_id":
if line[:15] == "Paires retained" or line[:14] == "Pairs retained":
pairs_retained = float(line.strip().split("\t")[-1])
elif line[:21] == "Forward only retained":
forward_only_retained = float(line.strip().split("\t")[-1])
elif line[:21] == "Reverse only retained":
reverse_only_retained = float(line.strip().split("\t")[-1])
elif line[:15] == "Pairs discarded":
pairs_discarded = float(line.strip().split("\t")[-1])
line = stat_fd.readline()
line = stat_fd.readline()
total_stats_list = np.array([pairs_retained, forward_only_retained, reverse_only_retained, pairs_discarded])
for line in stat_fd:
line_list = line.strip().split("\t")
tile_stats = map(float, line_list[-4:])
# skip absent tile
if sum(tile_stats) == 0:
print("\tTile %s is absent in input data for %s" % (line_list[4], prefix_list[stat_file_index]))
continue
tile_description_list.append(line_list[:-4])
tile_stats_list.append(tile_stats)
total_reads_list.append(sum(tile_stats))
except StopIteration:
print("\tEmpty .stat file for %s" % prefix_list[stat_file_index])
continue
total_reads_sample_stats.append(total_stats_list)
total_reads_list = np.array(total_reads_list)
# tile_stats
tile_stats_list = np.array(tile_stats_list)
percent_stats_list = tile_stats_list / total_reads_list[:, None]
#print(percent_stats_list)
# total_stats
total_percent_stats = total_stats_list / sum(total_stats_list)
samples_mean_percent_stats = np.mean(percent_stats_list, axis=0)
samples_median_percent_stats = np.median(percent_stats_list, axis=0)
samples_max_percent_stats = np.max(percent_stats_list, axis=0)
samples_min_percent_stats = np.min(percent_stats_list, axis=0)
min_percent_retained_pairs_in_tile_list.append(samples_min_percent_stats[0])
with open(percent_total_stat_file, "w") as percent_stats_fd:
percent_stats_fd.write("Paires retained\tForward only retained\tReverse only retained\tPairs discarded\n")
percent_stats_fd.write("%s\n" % "\t".join(map(lambda f: "%.3f" % f, total_percent_stats)))
sample_stat_fd.write("%s\t%s\t%s\t%s\t%s\t%s\n" % (prefix_list[stat_file_index].split("/")[-1],
"\t".join(map(lambda f: "%.3f" % f, total_percent_stats)),
"\t".join(map(lambda f: "%.3f" % f, samples_min_percent_stats)),
"\t".join(map(lambda f: "%.3f" % f, samples_max_percent_stats)),
"\t".join(map(lambda f: "%.3f" % f, samples_mean_percent_stats)),
"\t".join(map(lambda f: "%.3f" % f, samples_median_percent_stats))))
total_reads_sample_stats = np.array(total_reads_sample_stats)
# print(total_reads_sample_stats)
summed_reads_sample_stats = np.sum(total_reads_sample_stats, axis=0)
total_number_of_pairs = np.sum(summed_reads_sample_stats)
percent_summed_reads_sample_stats = summed_reads_sample_stats/total_number_of_pairs
sample_stat_fd.write("Total\t%s\n" % ("\t".join(map(lambda f: "%.3f" % f, percent_summed_reads_sample_stats))))
sample_stat_fd.close()
overall_stat_fd.write("%s\t%.0f\t%.0f\t%.3f\t%.3f\n" % (sample, total_number_of_pairs,
summed_reads_sample_stats[0],
percent_summed_reads_sample_stats[0],
min(min_percent_retained_pairs_in_tile_list)))
overall_stat_fd.close()
| 52.841176
| 127
| 0.616275
|
66435ec65114e3ff5c04111d80b01674391bba8c
| 3,249
|
py
|
Python
|
bopt/test_problems/Exeter_CFD_Problems/pitzdaily.py
|
georgedeath/bomean
|
0dad35e0d584cf7c46c9a8cb0445f225875cfa86
|
[
"MIT"
] | 2
|
2020-05-19T15:48:37.000Z
|
2021-08-16T10:41:49.000Z
|
bopt/test_problems/Exeter_CFD_Problems/pitzdaily.py
|
georgedeath/bomean
|
0dad35e0d584cf7c46c9a8cb0445f225875cfa86
|
[
"MIT"
] | null | null | null |
bopt/test_problems/Exeter_CFD_Problems/pitzdaily.py
|
georgedeath/bomean
|
0dad35e0d584cf7c46c9a8cb0445f225875cfa86
|
[
"MIT"
] | null | null | null |
try:
from data.SnappyHexOptimise import BasicPitzDailyRun
except:
from .data.SnappyHexOptimise import BasicPitzDailyRun
try:
from interfaces import ControlPolygonInterface
except:
from .interfaces import ControlPolygonInterface
try:
from base_class import Problem
except:
from .base_class import Problem
try:
from data import support #import data.support as support
except:
from .data import support #as support
import numpy as np
class PitzDaily(Problem, ControlPolygonInterface):
def __init__(self, settings):
self.source_case = settings.get('source_case', 'data/PitzDaily/case_fine')
self.case_path = settings.get('case_path', 'data/PitzDaily/case_single/')
self.domain_files = settings.get('boundary_files', ['data/PitzDaily/boundary.csv'])
self.fixed_points_files = settings.get('fixed_points_files', ['data/PitzDaily/fixed.csv'])
self.n_control = settings.get('n_control', [5])
self.niter = settings.get('niter', 5)
self.thickness = settings.get('thickness', np.array([0, 0, 0.1]))
self.stl_dir = settings.get('stl_dir', 'constant/triSurface/')
self.stl_file_name = settings.get('stl_file_name', 'ribbon.stl')
#import pdb; pdb.set_trace()
self.setup()
def setup(self, verbose=False):
pts = [np.loadtxt(filename, delimiter=',') for filename in self.domain_files]
fixed_points = [list(np.loadtxt(filename, delimiter=',').astype(int))\
for filename in self.fixed_points_files]
cpolys = []
for i in range(len(pts)):
cpolys.append(support.ControlPolygon2D(pts[i], fixed_points[i], self.n_control[i]))
ControlPolygonInterface.__init__(self, cpolys)
problem = BasicPitzDailyRun(case_path=self.case_path)
problem.prepare_case(self.source_case, verbose)
self.problem = problem
def info(self):
raise NotImplementedError
def get_configurable_settings(self):
raise NotImplementedError
def run(self, shape, verbose=False):
curv_data = support.subdc_to_stl_mult(shape, self.niter,\
thickness=self.thickness,\
file_directory=self.case_path+self.stl_dir, \
file_name=self.stl_file_name,\
draw=False)
p = self.problem.cost_function(verbose=verbose)
if p == 0:
raise Exception('Pressure difference is exactly zero. This is a bug related to OpenFoam.')
return np.abs(p)
def evaluate(self, decision_vector, verbose=False):
if not self.constraint(decision_vector):
raise ValueError('Constraint violated. Please supply a feasible decision vector.')
shape = self.convert_decision_to_shape(decision_vector)
return self.run(shape, verbose)
if __name__=='__main__':
import numpy as np
seed = 1435
np.random.seed(seed)
prob = PitzDaily({})
lb, ub = prob.get_decision_boundary()
x = np.random.random((1000, lb.shape[0])) * (ub - lb) + lb
rand_x = []
for i in range(x.shape[0]):
if prob.constraint(x[i]):
rand_x.append(x[i])
res = prob.evaluate(rand_x[0])
print(res)
| 39.144578
| 102
| 0.658972
|
5c8d4bfd561cff4b101ecad0b2bc21af1525c582
| 376
|
py
|
Python
|
server/api/services/__init__.py
|
NUS-CS-MComp/cs-cloud-computing-music-personality
|
35cc926bef83fb8be3c6af680862343a67cd6e1c
|
[
"Apache-2.0"
] | 2
|
2021-07-13T07:57:48.000Z
|
2021-11-18T08:20:38.000Z
|
server/api/services/__init__.py
|
NUS-CS-MComp/cs-cloud-computing-music-personality
|
35cc926bef83fb8be3c6af680862343a67cd6e1c
|
[
"Apache-2.0"
] | null | null | null |
server/api/services/__init__.py
|
NUS-CS-MComp/cs-cloud-computing-music-personality
|
35cc926bef83fb8be3c6af680862343a67cd6e1c
|
[
"Apache-2.0"
] | null | null | null |
from .facebook import FacebookService
from .reddit import RedditService
from .spotify import SpotifyService
from .twitter import TwitterService
from .ibm_watson import IBMWatsonService
from .sagemaker import SagamakerService
__all__ = [
"FacebookService",
"RedditService",
"SpotifyService",
"TwitterService",
"IBMWatsonService",
"SagamakerService",
]
| 23.5
| 40
| 0.771277
|
b6c2e076bc3e25b5b0ec3450fd55a4aed0095c6f
| 437
|
py
|
Python
|
example_project/example_project/urls.py
|
holgerd77/django-public-project
|
0d5684dee9567fe1650ecad6413dd9aef71a8ec2
|
[
"BSD-3-Clause"
] | 13
|
2015-08-27T08:13:47.000Z
|
2021-11-08T11:20:29.000Z
|
example_project/example_project/urls.py
|
holgerd77/django-public-project
|
0d5684dee9567fe1650ecad6413dd9aef71a8ec2
|
[
"BSD-3-Clause"
] | null | null | null |
example_project/example_project/urls.py
|
holgerd77/django-public-project
|
0d5684dee9567fe1650ecad6413dd9aef71a8ec2
|
[
"BSD-3-Clause"
] | null | null | null |
from django.conf import settings
from django.conf.urls import include, url
from django.contrib import admin
admin.autodiscover()
import django.views
from public_project.urls import urlpatterns
urlpatterns += [
url(r'^admin/', include(admin.site.urls)),
]
if settings.DEBUG:
urlpatterns += [
url(r'^media/(?P<path>.*)$', django.views.static.serve, {
'document_root': settings.MEDIA_ROOT,
}),
]
| 21.85
| 65
| 0.677346
|
3f7dd5e4e73dd0f6aaabfda26d0e716397343ac1
| 606
|
py
|
Python
|
tests/test_plot.py
|
resistics/resistics
|
cba60747803b6c582eaaf1a670a7f455f5724ebd
|
[
"MIT"
] | 38
|
2019-03-18T09:06:02.000Z
|
2022-02-18T15:38:54.000Z
|
tests/test_plot.py
|
resistics/resistics
|
cba60747803b6c582eaaf1a670a7f455f5724ebd
|
[
"MIT"
] | 19
|
2019-08-23T03:57:37.000Z
|
2022-03-12T01:07:09.000Z
|
tests/test_plot.py
|
resistics/resistics
|
cba60747803b6c582eaaf1a670a7f455f5724ebd
|
[
"MIT"
] | 8
|
2019-09-26T00:31:57.000Z
|
2021-09-04T09:28:15.000Z
|
import pytest
from typing import List
import numpy as np
@pytest.mark.parametrize(
"y, max_pts, x_expected, y_expected",
[
([0, 1, 3, 4, 2, 3, 4, 3, 4, 5, 5, 5], 5, [0, 3, 4, 9, 11], [0, 4, 2, 5, 5]),
],
)
def test_lttb_downsample(
y: List, max_pts: int, x_expected: List, y_expected: List
) -> None:
"""Test lttb downsampling"""
from resistics.plot import lttb_downsample
x = np.arange(len(y))
y = np.array(y)
nx, ny = lttb_downsample(x, y, max_pts=max_pts)
np.testing.assert_array_equal(nx, x_expected)
np.testing.assert_array_equal(ny, y_expected)
| 26.347826
| 85
| 0.632013
|
af7faf9d477a2a79f8072443ae2bd8fdd83da94a
| 20,407
|
py
|
Python
|
sympy/ntheory/tests/test_ntheory.py
|
Timeroot/sympy
|
f95bf4bbc548d326f4643d22faec32aca7880187
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/ntheory/tests/test_ntheory.py
|
Timeroot/sympy
|
f95bf4bbc548d326f4643d22faec32aca7880187
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/ntheory/tests/test_ntheory.py
|
Timeroot/sympy
|
f95bf4bbc548d326f4643d22faec32aca7880187
|
[
"BSD-3-Clause"
] | null | null | null |
from sympy import Sieve, binomial_coefficients, binomial_coefficients_list, \
multinomial_coefficients, Mul, S, Pow
from sympy import factorial as fac
from sympy.ntheory import isprime, n_order, is_primitive_root, \
is_quad_residue, legendre_symbol, jacobi_symbol, npartitions, totient, \
factorint, primefactors, divisors, randprime, nextprime, prevprime, \
primerange, primepi, prime, pollard_rho, perfect_power, multiplicity, \
trailing, divisor_count, primorial, pollard_pm1
from sympy.ntheory.factor_ import smoothness, smoothness_p
from sympy.ntheory.generate import cycle_length
from sympy.ntheory.primetest import _mr_safe_helper, mr
from sympy.ntheory.bbp_pi import pi_hex_digits
from sympy.ntheory.modular import crt, crt1, crt2, solve_congruence
from sympy.utilities.pytest import raises
from sympy.utilities.iterables import capture
def test_trailing():
assert trailing(0) == 0
assert trailing(1) == 0
assert trailing(-1) == 0
assert trailing(2) == 1
assert trailing(7) == 0
assert trailing(-7) == 0
for i in range(100):
assert trailing((1<<i)) == i
assert trailing((1<<i) * 31337) == i
assert trailing((1<<1000001)) == 1000001
assert trailing((1<<273956)*7**37) == 273956
def test_multiplicity():
for b in range(2, 20):
for i in range(100):
assert multiplicity(b, b**i) == i
assert multiplicity(b, (b**i) * 23) == i
assert multiplicity(b, (b**i) * 1000249) == i
# Should be fast
assert multiplicity(10, 10**10023) == 10023
# Should exit quick
assert multiplicity(1, 1) == 1
def test_perfect_power():
assert perfect_power(0) is False
assert perfect_power(1) is False
assert perfect_power(2) is False
assert perfect_power(3) is False
assert perfect_power(4) == (2, 2)
assert perfect_power(14) is False
assert perfect_power(25) == (5, 2)
assert perfect_power(22) is False
assert perfect_power(22, [2]) is False
assert perfect_power(137**(3*5*13)) == (137, 3*5*13)
assert perfect_power(137**(3*5*13) + 1) is False
assert perfect_power(137**(3*5*13) - 1) is False
assert perfect_power(103005006004**7) == (103005006004, 7)
assert perfect_power(103005006004**7+1) is False
assert perfect_power(103005006004**7-1) is False
assert perfect_power(103005006004**12) == (103005006004, 12)
assert perfect_power(103005006004**12+1) is False
assert perfect_power(103005006004**12-1) is False
assert perfect_power(2**10007) == (2, 10007)
assert perfect_power(2**10007+1) is False
assert perfect_power(2**10007-1) is False
assert perfect_power((9**99 + 1)**60) == (9**99 + 1, 60)
assert perfect_power((9**99 + 1)**60+1) is False
assert perfect_power((9**99 + 1)**60-1) is False
assert perfect_power((10**40000)**2, big=False) == (10**40000, 2)
assert perfect_power(10**100000) == (10, 100000)
assert perfect_power(10**100001) == (10, 100001)
assert perfect_power(13**4, [3, 5]) is False
assert perfect_power(3**4, [3, 10], factor=0) is False
assert perfect_power(3**3*5**3) == (15, 3)
assert perfect_power(2**3*5**5) is False
assert perfect_power(2*13**4) is False
assert perfect_power(2**5*3**3) is False
def test_isprime():
s = Sieve()
s.extend(100000)
ps = set(s.primerange(2, 100001))
for n in range(100001):
# if (n in ps) != isprime(n): print n
assert (n in ps) == isprime(n)
assert isprime(179424673)
# Some Mersenne primes
assert isprime(2**61 - 1)
assert isprime(2**89 - 1)
assert isprime(2**607 - 1)
assert not isprime(2**601 - 1)
#Arnault's number
assert isprime(int('''
803837457453639491257079614341942108138837688287558145837488917522297\
427376533365218650233616396004545791504202360320876656996676098728404\
396540823292873879185086916685732826776177102938969773947016708230428\
687109997439976544144845341155872450633409279022275296229414984230688\
1685404326457534018329786111298960644845216191652872597534901'''))
# pseudoprime that passes the base set [2, 3, 7, 61, 24251]
assert not isprime(9188353522314541)
assert _mr_safe_helper(
"if n < 170584961: return mr(n, [350, 3958281543])") == \
' # [350, 3958281543] stot = 1 clear [2, 3, 5, 7, 29, 67, 679067]'
assert _mr_safe_helper(
"if n < 3474749660383: return mr(n, [2, 3, 5, 7, 11, 13])") == \
' # [2, 3, 5, 7, 11, 13] stot = 7 clear == bases'
def test_prime():
assert prime(1) == 2
assert prime(2) == 3
assert prime(5) == 11
assert prime(11) == 31
assert prime(57) == 269
assert prime(296) == 1949
assert prime(559) == 4051
assert prime(3000) == 27449
assert prime(4096) == 38873
assert prime(9096) == 94321
assert prime(25023) == 287341
def test_primepi():
assert primepi(1) == 0
assert primepi(2) == 1
assert primepi(5) == 3
assert primepi(11) == 5
assert primepi(57) == 16
assert primepi(296) == 62
assert primepi(559) == 102
assert primepi(3000) == 430
assert primepi(4096) == 564
assert primepi(9096) == 1128
assert primepi(25023) == 2763
def test_generate():
assert nextprime(-4) == 2
assert nextprime(2) == 3
assert nextprime(5) == 7
assert nextprime(12) == 13
assert nextprime(90) == 97
assert nextprime(10**40) == (10**40 + 121)
assert prevprime(3) == 2
assert prevprime(7) == 5
assert prevprime(13) == 11
assert prevprime(97) == 89
assert prevprime(10**40) == (10**40 - 17)
assert list(primerange(2, 7)) == [2, 3, 5]
assert list(primerange(2, 10)) == [2, 3, 5, 7]
assert list(primerange(1050, 1100)) == [1051, 1061, \
1063, 1069, 1087, 1091, 1093, 1097]
s = Sieve()
for i in range(30, 2350, 376):
for j in range(2, 5096, 1139):
A = list(s.primerange(i, i+j))
B = list(primerange(i, i+j))
assert A == B
s = Sieve()
assert s[10] == 29
assert nextprime(2, 2) == 5
raises(ValueError, 'totient(0)')
raises(ValueError, 'primorial(0)')
assert mr(1, [2]) == False
func = lambda i: (i**2 + 1) % 51
assert cycle_length(func, 4).next() == (6, 2)
assert list(cycle_length(func, 4, values=True)) == \
[17, 35, 2, 5, 26, 14, 44, 50, 2, 5, 26, 14]
assert cycle_length(func, 4, nmax=5).next() == (5, None)
assert list(cycle_length(func, 4, nmax=5, values=True)) == \
[17, 35, 2, 5, 26]
def test_randprime():
import random
random.seed(1234)
assert randprime(2, 3) == 2
assert randprime(1, 3) == 2
assert randprime(3, 5) == 3
raises(ValueError, 'randprime(20, 22)')
for a in [100, 300, 500, 250000]:
for b in [100, 300, 500, 250000]:
p = randprime(a, a+b)
assert a <= p < (a+b) and isprime(p)
def fac_multiplicity(n, p):
"""Return the power of the prime number p in the
factorization of n!"""
if p > n: return 0
if p > n//2: return 1
q, m = n, 0
while q >= p:
q //= p
m += q
return m
def multiproduct(seq=(), start=1):
"""
Return the product of a sequence of factors with multiplicities,
times the value of the parameter ``start``. The input may be a
sequence of (factor, exponent) pairs or a dict of such pairs.
>>> multiproduct({3:7, 2:5}, 4) # = 3**7 * 2**5 * 4
279936
"""
if not seq:
return start
if isinstance(seq, dict):
seq = seq.iteritems()
units = start
multi = []
for base, exp in seq:
if not exp:
continue
elif exp == 1:
units *= base
else:
if exp % 2:
units *= base
multi.append((base, exp//2))
return units * multiproduct(multi)**2
def test_factorint():
assert primefactors(123456) == [2, 3, 643]
assert factorint(0) == {0:1}
assert factorint(1) == {}
assert factorint(-1) == {-1:1}
assert factorint(-2) == {-1:1, 2:1}
assert factorint(-16) == {-1:1, 2:4}
assert factorint(2) == {2:1}
assert factorint(126) == {2:1, 3:2, 7:1}
assert factorint(123456) == {2:6, 3:1, 643:1}
assert factorint(5951757) == {3:1, 7:1, 29:2, 337:1}
assert factorint(64015937) == {7993:1, 8009:1}
assert factorint(2**(2**6) + 1) == {274177:1, 67280421310721:1}
assert multiproduct(factorint(fac(200))) == fac(200)
for b, e in factorint(fac(150)).items():
assert e == fac_multiplicity(150, b)
assert factorint(103005006059**7) == {103005006059:7}
assert factorint(31337**191) == {31337:191}
assert factorint(2**1000 * 3**500 * 257**127 * 383**60) == \
{2:1000, 3:500, 257:127, 383:60}
assert len(factorint(fac(10000))) == 1229
assert factorint(12932983746293756928584532764589230) == \
{2: 1, 5: 1, 73: 1, 727719592270351: 1, 63564265087747: 1, 383: 1}
assert factorint(727719592270351) == {727719592270351:1}
assert factorint(2**64+1, use_trial=False) == factorint(2**64+1)
for n in range(60000):
assert multiproduct(factorint(n)) == n
assert pollard_rho(2**64+1, seed=1) == 274177
assert pollard_rho(19, seed=1) is None
assert factorint(3, limit=2) == {3: 1}
assert factorint(12345) == {3: 1, 5: 1, 823: 1}
assert factorint(12345, limit=3) == {4115: 1, 3: 1} # the 5 is greater than the limit
assert factorint(1, limit=1) == {}
assert factorint(12, limit=1) == {12: 1}
assert factorint(30, limit=2) == {2: 1, 15: 1}
assert factorint(16, limit=2) == {2: 4}
assert factorint(124, limit=3) == {2: 2, 31: 1}
assert factorint(4*31**2, limit=3) == {2: 2, 31: 2}
p1 = nextprime(2**32)
p2 = nextprime(2**16)
p3 = nextprime(p2)
assert factorint(p1*p2*p3) == {p1: 1, p2: 1, p3: 1}
assert factorint(13*17*19, limit=15) == {13: 1, 17*19: 1}
assert factorint(1951*15013*15053, limit=2000) == {225990689: 1, 1951: 1}
assert factorint(primorial(17)+1, use_pm1=0) == \
{19026377261L: 1, 3467: 1, 277: 1, 105229: 1}
# when prime b is closer than approx sqrt(8*p) to prime p then they are
# "close" and have a trivial factorization
a=nextprime(2**2**8) # 78 digits
b=nextprime(a + 2**2**4)
assert 'Fermat' in capture(lambda : factorint(a*b, verbose=1))
raises(ValueError, 'pollard_rho(4)')
raises(ValueError, 'pollard_pm1(3)')
raises(ValueError, 'pollard_pm1(10, B=2)')
# verbose coverage
n = nextprime(2**16)*nextprime(2**17)*nextprime(1901)
assert 'with primes' in capture(lambda: factorint(n, verbose=1))
capture(lambda: factorint(nextprime(2**16)*1012, verbose=1))
n=nextprime(2**17)
capture(lambda: factorint(n**3, verbose=1)) # perfect power termination
capture(lambda: factorint(2*n, verbose=1)) # factoring complete msg
# exceed 1st
n=nextprime(2**17)
n*=nextprime(n)
assert '1000' in capture(lambda : factorint(n, limit=1000, verbose=1))
n*=nextprime(n)
assert len(factorint(n)) == 3
assert len(factorint(n, limit=p1)) == 3
n*=nextprime(2*n)
# exceed 2nd
assert '2001' in capture(lambda : factorint(n, limit=2000, verbose=1))
assert capture(lambda : factorint(n, limit=4000, verbose=1)).count('Pollard') == 2
# non-prime pm1 result
n=nextprime(8069)
n*=nextprime(2*n)*nextprime(2*n, 2)
capture(lambda: factorint(n, verbose=1)) # non-prime pm1 result
# factor fermat composite
p1 = nextprime(2**17)
p2 = nextprime(2*p1)
assert factorint((p1*p2**2)**3) == {p1: 3, p2: 6}
def divisors_and_divisor_count():
assert divisors(-1) == [1]
assert divisors(0) == []
assert divisors(1) == [1]
assert divisors(2) == [1, 2]
assert divisors(3) == [1, 3]
assert divisors(17) == [1, 17]
assert divisors(10) == [1, 2, 5, 10]
assert divisors(100) == [1, 2, 4, 5, 10, 20, 25, 50, 100]
assert divisors(101) == [1, 101]
assert divisor_count(0) == 0
assert divisor_count(-1) == 1
assert divisor_count(1) == 1
assert divisor_count(6) == 4
assert divisor_count(12) == 6
assert divisor_count(180, 3) == divisor_count(180//3)
assert divisor_count(2*3*5, 7) == 0
def test_totient():
assert [totient(k) for k in range(1, 12)] == \
[1, 1, 2, 2, 4, 2, 6, 4, 6, 4, 10]
assert totient(5005) == 2880
assert totient(5006) == 2502
assert totient(5009) == 5008
def test_partitions():
assert [npartitions(k) for k in range(13)] == \
[1, 1, 2, 3, 5, 7, 11, 15, 22, 30, 42, 56, 77]
assert npartitions(100) == 190569292
assert npartitions(200) == 3972999029388
assert npartitions(1000) == 24061467864032622473692149727991
assert npartitions(2000) == 4720819175619413888601432406799959512200344166
assert npartitions(10000) % 10**10 == 6916435144
assert npartitions(100000) % 10**10 == 9421098519
def test_residue():
assert n_order(2, 13) == 12
assert [n_order(a, 7) for a in range(1, 7)] == \
[1, 3, 6, 3, 6, 2]
assert n_order(5, 17) == 16
assert n_order(17, 11) == n_order(6, 11)
assert n_order(101, 119) == 6
assert is_primitive_root(2, 7) == False
assert is_primitive_root(3, 8) == False
assert is_primitive_root(11, 14) == False
assert is_primitive_root(12, 17) == is_primitive_root(29, 17)
assert is_quad_residue(3, 7) == False
assert is_quad_residue(10, 13) == True
assert is_quad_residue(12364, 139) == is_quad_residue(12364 % 139, 139)
assert is_quad_residue(207, 251) == True
assert is_quad_residue(0, 1) == True
assert is_quad_residue(1, 1) == True
assert is_quad_residue(0, 2) == is_quad_residue(1, 2) == True
assert is_quad_residue(1, 4) == True
assert is_quad_residue(2, 27) == False
assert [j for j in range(14) if is_quad_residue(j, 14)] == \
[0, 1, 2, 4, 7, 8, 9, 11]
raises(ValueError, 'is_quad_residue(1.1, 2)')
assert legendre_symbol(5, 11) == 1
assert legendre_symbol(25, 41) == 1
assert legendre_symbol(67, 101) == -1
assert legendre_symbol(0, 13) == 0
assert legendre_symbol(9, 3) == 0
raises(ValueError, 'legendre_symbol(2, 4)')
assert jacobi_symbol(25, 41) == 1
assert jacobi_symbol(-23, 83) == -1
assert jacobi_symbol(3, 9) == 0
assert jacobi_symbol(42, 97) == -1
assert jacobi_symbol(3, 5) == -1
assert jacobi_symbol(7, 9) == 1
assert jacobi_symbol(0, 3) == 0
assert jacobi_symbol(0, 1) == 1
assert jacobi_symbol(2, 1) == 1
assert jacobi_symbol(1, 3) == 1
raises(ValueError, 'jacobi_symbol(3, 8)')
def test_hex_pi_nth_digits():
assert pi_hex_digits(0) == '3243f6a8885a30'
assert pi_hex_digits(1) == '243f6a8885a308'
assert pi_hex_digits(10000) == '68ac8fcfb8016c'
def test_crt():
def mcrt(m, v, r, symmetric=False):
assert crt(m, v, symmetric)[0] == r
mm, e, s = crt1(m)
assert crt2(m, v, mm, e, s, symmetric) == (r, mm)
mcrt([2, 3, 5], [0, 0, 0], 0)
mcrt([2, 3, 5], [1, 1, 1], 1)
mcrt([2, 3, 5], [-1, -1, -1], -1, True)
mcrt([2, 3, 5], [-1, -1, -1], 2*3*5 - 1, False)
assert crt([656, 350], [811, 133], symmetric=True) == (-56917, 114800)
def test_binomial_coefficients_list():
assert binomial_coefficients_list(0) == [1]
assert binomial_coefficients_list(1) == [1, 1]
assert binomial_coefficients_list(2) == [1, 2, 1]
assert binomial_coefficients_list(3) == [1, 3, 3, 1]
assert binomial_coefficients_list(4) == [1, 4, 6, 4, 1]
assert binomial_coefficients_list(5) == [1, 5, 10, 10, 5, 1]
assert binomial_coefficients_list(6) == [1, 6, 15, 20, 15, 6, 1]
def test_binomial_coefficients():
for n in range(15):
c = binomial_coefficients(n)
l = [c[k] for k in sorted(c)]
assert l == binomial_coefficients_list(n)
def test_multinomial_coefficients():
assert multinomial_coefficients(1, 1) == {(1,): 1}
assert multinomial_coefficients(1, 2) == {(2,): 1}
assert multinomial_coefficients(1, 3) == {(3,): 1}
assert multinomial_coefficients(2, 1) == {(0, 1): 1, (1, 0): 1}
assert multinomial_coefficients(2, 2) == {(2, 0): 1, (0, 2): 1, (1, 1): 2}
assert multinomial_coefficients(2, 3) == {(3, 0): 1, (1, 2): 3, (0, 3): 1,
(2, 1): 3}
assert multinomial_coefficients(3, 1) == {(1, 0, 0): 1, (0, 1, 0): 1,
(0, 0, 1): 1}
assert multinomial_coefficients(3, 2) == {(0, 1, 1): 2, (0, 0, 2): 1,
(1, 1, 0): 2, (0, 2, 0): 1, (1, 0, 1): 2, (2, 0, 0): 1}
assert multinomial_coefficients(3, 3) == {(2, 1, 0): 3, (0, 3, 0): 1,
(1, 0, 2): 3, (0, 2, 1): 3, (0, 1, 2): 3, (3, 0, 0): 1,
(2, 0, 1): 3, (1, 2, 0): 3, (1, 1, 1): 6, (0, 0, 3): 1}
def test_issue1257():
assert factorint(1030903) == {53: 2, 367: 1}
def test_divisors():
assert divisors(28) == [1, 2, 4, 7, 14, 28]
assert [x for x in divisors(3*5*7, 1)] == [1, 3, 5, 15, 7, 21, 35, 105]
assert divisors(0) == []
def test_divisor_count():
assert divisor_count(0) == 0
assert divisor_count(6) == 4
def test_primorial():
assert primorial(1) == 2
assert primorial(1, nth=0) == 1
assert primorial(2) == 6
assert primorial(2, nth=0) == 2
assert primorial(4, nth=0) == 6
def test_smoothness_and_smoothness_p():
assert smoothness(1) == (1, 1)
assert smoothness(2**4*3**2) == (3, 16)
assert smoothness_p(10431, m=1) == \
(1, [(3, (2, 2, 4)), (19, (1, 5, 5)), (61, (1, 31, 31))])
assert smoothness_p(10431) == \
(-1, [(3, (2, 2, 2)), (19, (1, 3, 9)), (61, (1, 5, 5))])
assert smoothness_p(10431, power=1) == \
(-1, [(3, (2, 2, 2)), (61, (1, 5, 5)), (19, (1, 3, 9))])
assert smoothness_p(21477639576571, visual=1) == \
'p**i=4410317**1 has p-1 B=1787, B-pow=1787\n' + \
'p**i=4869863**1 has p-1 B=2434931, B-pow=2434931'
def test_visual_factorint():
assert factorint(1, visual=1) == 1
forty2 = factorint(42, visual=True)
assert type(forty2) == Mul
assert str(forty2) == '2**1*3**1*7**1'
assert factorint(1, visual=True) is S.One
no = dict(evaluate=False)
assert factorint(42**2, visual=True) == Mul(Pow(2, 2, **no),
Pow(3, 2, **no),
Pow(7, 2, **no), **no)
assert Pow(-1, 1, **no) in factorint(-42, visual=True).args
def test_visual_io():
sm = smoothness_p
fi = factorint
# with smoothness_p
n = 124
d = fi(n)
m = fi(d, visual=True)
t = sm(n)
s = sm(t)
for th in [d, s, t, n, m]:
assert sm(th, visual=True) == s
assert sm(th, visual=1) == s
for th in [d, s, t, n, m]:
assert sm(th, visual=False) == t
assert [sm(th, visual=None) for th in [d, s, t, n, m]] == [s, d, s, t, t]
assert [sm(th, visual=0) for th in [d, s, t, n, m]] == [s, d, s, t, t]
# with factorint
for th in [d, m, n]:
assert fi(th, visual=True) == m
assert fi(th, visual=1) == m
for th in [d, m, n]:
assert fi(th, visual=False) == d
assert [fi(th, visual=None) for th in [d, m, n]] == [m, d, d]
assert [fi(th, visual=0) for th in [d, m, n]] == [m, d, d]
# test reevaluation
no = dict(evaluate=False)
assert sm({4: 2}, visual=False) == sm(16)
assert sm(Mul(*[Pow(k, v, **no) for k, v in {4: 2, 2: 6}.items()], **no),
visual=False) == sm(2**10)
assert fi({4: 2}, visual=False) == fi(16)
assert fi(Mul(*[Pow(k, v, **no) for k, v in {4: 2, 2: 6}.items()], **no),
visual=False) == fi(2**10)
def test_modular():
assert solve_congruence(*zip([3, 4, 2], [12, 35, 17])) == (1719, 7140)
assert solve_congruence(*zip([3, 4, 2], [12, 6, 17])) is None
assert solve_congruence(*zip([3, 4, 2], [13, 7, 17])) == (172, 1547)
assert solve_congruence(*zip([-10, -3, -15], [13, 7, 17])) == (172, 1547)
assert solve_congruence(*zip([-10, -3, 1, -15], [13, 7, 7, 17])) is None
assert solve_congruence(*zip([-10, -5, 2, -15], [13, 7, 7, 17])) == (835, 1547)
assert solve_congruence(*zip([-10, -5, 2, -15], [13, 7, 14, 17])) == (2382, 3094)
assert solve_congruence(*zip([-10, 2, 2, -15], [13, 7, 14, 17])) == (2382, 3094)
assert solve_congruence(*zip((1, 1, 2),(3, 2, 4))) is None
raises(ValueError, 'solve_congruence(*zip([3, 4, 2], [12.1, 35, 17]))')
| 38.287054
| 89
| 0.59803
|
a1610af0678557c58919054704ed03866fd3419a
| 2,465
|
py
|
Python
|
tests/basics/Importing.py
|
jayvdb/Nuitka
|
0ff702e065b1b53231ba0cae451385a3da0fe766
|
[
"Apache-2.0"
] | 1
|
2019-03-31T09:56:11.000Z
|
2019-03-31T09:56:11.000Z
|
tests/basics/Importing.py
|
jayvdb/Nuitka
|
0ff702e065b1b53231ba0cae451385a3da0fe766
|
[
"Apache-2.0"
] | null | null | null |
tests/basics/Importing.py
|
jayvdb/Nuitka
|
0ff702e065b1b53231ba0cae451385a3da0fe766
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Python tests originally created or extracted from other peoples work. The
# parts were too small to be protected.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
def localImporter1():
import os
return os
def localImporter1a():
import os as my_os_name
return my_os_name
def localImporter2():
from os import path
return path
def localImporter2a():
from os import path as renamed
return renamed
print("Direct module import", localImporter1())
print("Direct module import using rename", localImporter1a())
print("From module import", localImporter2())
print("From module import using rename", localImporter2a())
from os import * # isort:skip
print("Star import gave us", path)
import os.path as myname # isort:skip
print("As import gave", myname)
def localImportFailure():
try:
from os import path, lala, listdir
except Exception as e:
print("gives", type(e), repr(e))
try:
print(listdir)
except UnboundLocalError:
print("and listdir was not imported", end=" ")
print("but path was", path)
print("From import that fails in the middle", end=" ")
localImportFailure()
def nonPackageImportFailure():
try:
# Not allowed without being a package, should raise ValueError
from . import whatever
except Exception as e:
print(type(e), repr(e))
print("Package import fails in non-package:", end=" ")
nonPackageImportFailure()
def importBuiltinTupleFailure():
try:
value = ("something",)
# Not allowed to not be constant string, optimization might be fooled
# though.
__import__(value)
except Exception as e:
print(type(e), repr(e))
print("The __import__ built-in optimization can handle tuples:", end=" ")
importBuiltinTupleFailure()
| 23.932039
| 79
| 0.688844
|
3c3b1de536652435f887e53d20b7282f96c09e19
| 5,098
|
py
|
Python
|
four.py
|
gto76/n-in-the-row
|
18291a2a9fee1eb47153cbef9bd4764ed9615d59
|
[
"MIT"
] | 1
|
2021-05-10T15:18:22.000Z
|
2021-05-10T15:18:22.000Z
|
four.py
|
gto76/n-in-the-row
|
18291a2a9fee1eb47153cbef9bd4764ed9615d59
|
[
"MIT"
] | null | null | null |
four.py
|
gto76/n-in-the-row
|
18291a2a9fee1eb47153cbef9bd4764ed9615d59
|
[
"MIT"
] | 2
|
2019-02-19T12:02:29.000Z
|
2019-02-21T10:50:53.000Z
|
#!/usr/bin/python3
#
# Usage: four.py
#
from enum import Enum
import copy
SIZE = 6
GOAL = 4
SEARCH_DEPTH = 5
PRUNE_FACTOR = 0.1
class D(Enum):
HOR = 0
VER = 1
DIA = 2
DIA2 = 3
class F(Enum):
E = 0
X = 1
O = 2
class Board:
def __init__(self, size):
self.brd = [size*[F.E] for _ in range(size)]
def __repr__(self):
out = ""
for row in self.brd:
for cell in row:
symbol = "."
if cell == F.X:
symbol = "X"
elif cell == F.O:
symbol = "O"
out += " {}".format(symbol)
out += '\n'
return out
def main():
board = Board(SIZE)
sign = F.X
scr = 0
print(scr)
print(board)
while -float("inf") < scr < float("inf") and \
len(get_coordinates_of(board, F.E)) > 0:
cell = get_next_move(board, sign, SEARCH_DEPTH)[1]
board.brd[cell[0]][cell[1]] = sign
scr = score(board, F.X)
print(scr)
print(board)
input()
sign = get_oposite(sign)
def get_next_move(board, sign, depth):
empty_cells = get_coordinates_of(board, F.E)
scores = [score_for_move(board, sign, cell) for cell in empty_cells]
options = sorted([list(a) for a in zip(scores, empty_cells)], reverse=True)
if options[0][0] in [-float("inf"), float("inf")]:
return options[0]
if depth == 0:
return options [0]
pruned_size = int(SIZE*SIZE*PRUNE_FACTOR)
if pruned_size < 1:
pruned_size = 1
options = options[:pruned_size]
for option in options:
next_board = copy.deepcopy(board)
next_cell = option[1]
next_board.brd[next_cell[0]][next_cell[1]] = sign
option[0] = -get_next_move(next_board, get_oposite(sign), depth-1)[0]
return sorted(options, reverse=True)[0]
def score_for_move(board, sign, cell):
next_board = copy.deepcopy(board)
next_board.brd[cell[0]][cell[1]] = sign
return score(next_board, sign)
def score(board, sign):
if sign == F.X:
return score_for(board, F.X) - score_for(board, F.O)
return score_for(board, F.O) - score_for(board, F.X)
def score_for(board, sign):
out = 0
for cell in get_coordinates_of(board, sign):
for direction in D:
out += get_score_for_cell(board, cell, sign, direction)
return out
def get_coordinates_of(board, sign):
out = []
for i, row in enumerate(board.brd):
for j, cell in enumerate(row):
if cell == sign:
out.append([i, j])
return out
def get_score_for_cell(board, cell, sign, direction):
out = 0
for delta in range(-(GOAL-1), 1):
coordinates = get_coordinates(cell, delta, direction)
if not coordinates:
continue
signs = get_window(board, coordinates)
out += score_window(signs, sign)
return out
def score_window(signs, sign):
oposite_sign = get_oposite(sign)
if signs.count(sign) == 0:
out = -signs.count(oposite_sign)
if out == -GOAL:
return -float("inf")
return out
elif signs.count(oposite_sign) == 0:
out = signs.count(sign)
if out == GOAL:
return float("inf")
return out
return 0
def get_coordinates(cell, delta, direction):
if direction == D.HOR:
start = cell[0] + delta
end = start + GOAL - 1
if start < 0:
return
if end >= SIZE:
return
return list(zip(list(range(start, end+1)), GOAL*[cell[1]]))
elif direction == D.VER:
start = cell[1] + delta
end = start + GOAL - 1
if start < 0:
return
if end >= SIZE:
return
return list(zip(GOAL*[cell[0]], list(range(start, end+1))))
elif direction == D.DIA:
start_x = cell[0] + delta
start_y = cell[1] + delta
end_x = start_x + GOAL - 1
end_y = start_y + GOAL - 1
if start_x < 0 or start_y < 0:
return
if end_x >= SIZE or end_y >= SIZE:
return
return list(zip(list(range(start_x, end_x+1)),
list(range(start_y, end_y+1))))
else:
start_x = cell[0] + delta
start_y = cell[1] - delta
end_x = start_x + GOAL - 1
end_y = start_y - GOAL + 1
extremes = [start_x, start_y, end_x, end_y]
if any(extreme < 0 or extreme >= SIZE for extreme in extremes):
return
return list(zip(uni_range(start_x, end_x),
uni_range(start_y, end_y)))
###
## UTIL
#
def uni_range(a, b):
if b >= a:
return list(range(a, b+1))
return list(reversed(range(b, a+1)))
def get_window(board, cells):
out = []
for cell in cells:
out.append(board.brd[cell[0]][cell[1]])
return out
def column(matrix, i):
return [row[i] for row in matrix]
def get_oposite(sign):
if sign == F.X:
return F.O
return F.X
if __name__ == '__main__':
main()
| 23.934272
| 79
| 0.549235
|
bca481f94a931fb4fc4b00567b9402e7f78ad0d8
| 3,084
|
py
|
Python
|
simba/similarities/mcsg.py
|
babylonhealth/simba
|
b0124281c0efd59b088520e28add36d1038bce3b
|
[
"Apache-2.0"
] | 18
|
2019-11-05T10:30:33.000Z
|
2021-06-04T00:57:38.000Z
|
simba/similarities/mcsg.py
|
babylonhealth/simba
|
b0124281c0efd59b088520e28add36d1038bce3b
|
[
"Apache-2.0"
] | null | null | null |
simba/similarities/mcsg.py
|
babylonhealth/simba
|
b0124281c0efd59b088520e28add36d1038bce3b
|
[
"Apache-2.0"
] | 2
|
2020-06-27T17:44:27.000Z
|
2021-01-19T00:32:18.000Z
|
import numpy as np
from ..utils.gaussian import aic_spherical, tic, tic_spherical
from ..utils.vmf import vmf_aic, vmf_tic
def von_mises_correction_aic(Dnew, Dc):
"""
:param Dnew[nxd matrix]: set 1
:param Dc[mxd matrix]: set 2
:return [float]: semantic similarity measure
(approximation of the bayes factors)
"""
D = np.concatenate((Dnew, Dc), axis=0)
aic_x = -vmf_aic(Dnew)
aic_y = -vmf_aic(Dc)
aic_xy = -vmf_aic(D)
similarity = aic_xy - (aic_x + aic_y)
return similarity
def von_mises_correction_tic(Dnew, Dc):
"""
:param Dnew[nxd matrix]: set 1
:param Dc[mxd matrix]: set 2
:return [float]: semantic similarity measure
(approximation of the bayes factors)
"""
D = np.concatenate((Dnew, Dc), axis=0)
tic_x = -vmf_tic(Dnew)
tic_y = -vmf_tic(Dc)
tic_xy = -vmf_tic(D)
similarity = tic_xy - (tic_x + tic_y)
return similarity
def gaussian_correction_aic(Dnew, Dc):
"""
:param Dnew[nxd matrix]: set 1
:param Dc[mxd matrix]: set 2
:return [float]: semantic similarity measure
(approximation of the bayes factors)
"""
Dnew = np.array(Dnew)
Dc = np.array(Dc)
K, D = Dnew.shape
L, D = Dc.shape
mu_1 = np.mean(Dnew, axis=0)
mu_2 = np.mean(Dc, axis=0)
mu_1_sq = np.mean(Dnew ** 2, axis=0)
mu_2_sq = np.mean(Dc ** 2, axis=0)
p1 = K * 1.0 / (K + L)
mu_3 = p1 * mu_1 + (1 - p1) * mu_2
mu_3_sq = p1 * mu_1_sq + (1 - p1) * mu_2_sq
reg = 1e-5
v_1 = mu_1_sq - mu_1 ** 2 + reg
v_2 = mu_2_sq - mu_2 ** 2 + reg
v_3 = mu_3_sq - mu_3 ** 2 + reg
ll_fast_x = K * np.sum(np.log(v_1))
ll_fast_y = L * np.sum(np.log(v_2))
ll_fast_xy = (K + L) * np.sum(np.log(v_3))
similarity_fast = - ll_fast_xy + ll_fast_x + ll_fast_y
return similarity_fast + 4 * D
def spherical_gaussian_correction_aic(Dnew, Dc):
"""
:param Dnew[nxd matrix]: set 1
:param Dc[mxd matrix]: set 2
:return [float]: semantic similarity measure
(approximation of the bayes factors)
"""
D = np.concatenate((Dnew, Dc), axis=0)
aic_x = -aic_spherical(Dnew)
aic_y = -aic_spherical(Dc)
aic_xy = -aic_spherical(D)
return aic_xy - (aic_x + aic_y)
def gaussian_correction_tic(Dnew, Dc):
"""
:param Dnew[nxd matrix]: set 1
:param Dc[mxd matrix]: set 2
:return [float]: semantic similarity measure
(approximation of the bayes factors)
"""
D = np.concatenate((Dnew, Dc), axis=0)
tic_x = -tic(Dnew)
tic_y = -tic(Dc)
tic_xy = -tic(D)
similarity = tic_xy - (tic_x + tic_y)
return similarity
def spherical_gaussian_correction_tic(Dnew, Dc):
"""
:param Dnew[nxd matrix]: set 1
:param Dc[mxd matrix]: set 2
:return [float]: semantic similarity measure
(approximation of the bayes factors)
"""
D = np.concatenate((Dnew, Dc), axis=0)
tic_x = -tic_spherical(Dnew)
tic_y = -tic_spherical(Dc)
tic_xy = -tic_spherical(D)
similarity = tic_xy - (tic_x + tic_y)
return similarity
| 24.870968
| 62
| 0.613813
|
222d8d2eb1e36538c12a7512b906fcdcb63a8436
| 2,293
|
py
|
Python
|
dummydf/sql/group.py
|
moriyoshi/dummydf
|
39d82f0022ea9d072ce56724f16bf363a37b1bbf
|
[
"MIT"
] | null | null | null |
dummydf/sql/group.py
|
moriyoshi/dummydf
|
39d82f0022ea9d072ce56724f16bf363a37b1bbf
|
[
"MIT"
] | null | null | null |
dummydf/sql/group.py
|
moriyoshi/dummydf
|
39d82f0022ea9d072ce56724f16bf363a37b1bbf
|
[
"MIT"
] | null | null | null |
# coding: utf-8
#
# Copyright 2018 Moriyoshi Koizumi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from .column import _Function, _AggregationStarColumn, infer_data_type
from .types import StructType, StructField
class GroupedData(object):
def __init__(self, df, cols):
self.df = df
self.cols = cols
def agg(self, *exprs):
if len(exprs) == 1 and isinstance(exprs[0], dict):
exprs = [
_Function(
fn_name, [
_DataFrameColumn(
self.df,
self.df.schema[col_name] if col_name != '*'
else _AggregationStarColumn(df)
)
]
)
for col_name, fn_name in exprs[0].items()
]
from .dataframe import _Aggregation, DataFrame
return DataFrame(
self.df.sql_ctx,
schema=StructType([
StructField(
name=str(expr),
dataType=infer_data_type(expr)
)
for expr in self.cols + exprs
]),
modifier=_Aggregation(
self,
exprs
)
)
| 38.216667
| 80
| 0.606629
|
d03de6e76e96ec69f66ea9325eea852d42d3d278
| 4,233
|
py
|
Python
|
master_thesis/generative_models/wgan/wgan_utils.py
|
HitLuca/Master-thesis
|
359d1af662af70c26d1c6babfddf266a75893043
|
[
"MIT"
] | 9
|
2018-11-27T17:42:53.000Z
|
2021-04-22T08:29:52.000Z
|
master_thesis/generative_models/wgan/wgan_utils.py
|
HitLuca/Master-thesis
|
359d1af662af70c26d1c6babfddf266a75893043
|
[
"MIT"
] | 2
|
2019-07-26T14:34:31.000Z
|
2019-07-26T14:37:32.000Z
|
master_thesis/generative_models/wgan/wgan_utils.py
|
jeykang/TimeSeries-WGAN-TF2
|
81a8d2b5b229530f3e39b1b1d9a2328ea7b819ef
|
[
"MIT"
] | 5
|
2018-10-26T17:20:41.000Z
|
2021-01-15T05:09:46.000Z
|
import keras
from keras import Model
from keras.layers import *
from keras.optimizers import RMSprop
from generative_models import utils
def build_generator(latent_dim, timesteps):
generator_inputs = Input((latent_dim,))
generated = generator_inputs
generated = Dense(15)(generated)
generated = utils.BatchNormalization()(generated)
generated = LeakyReLU(0.2)(generated)
generated = Lambda(lambda x: K.expand_dims(x))(generated)
generated = Conv1D(32, 3, padding='same')(generated)
generated = utils.BatchNormalization()(generated)
generated = LeakyReLU(0.2)(generated)
generated = UpSampling1D(2)(generated)
generated = Conv1D(32, 3, padding='same')(generated)
generated = utils.BatchNormalization()(generated)
generated = LeakyReLU(0.2)(generated)
generated = UpSampling1D(2)(generated)
generated = Conv1D(32, 3, padding='same')(generated)
generated = utils.BatchNormalization()(generated)
generated = LeakyReLU(0.2)(generated)
generated = UpSampling1D(2)(generated)
generated = Conv1D(1, 3, padding='same')(generated)
generated = utils.BatchNormalization()(generated)
generated = LeakyReLU(0.2)(generated)
generated = Lambda(lambda x: K.squeeze(x, -1))(generated)
generated = Dense(timesteps, activation='tanh')(generated)
generator = Model(generator_inputs, generated, 'generator')
return generator
def build_critic(timesteps):
kernel_initializer = keras.initializers.RandomNormal(0, 0.02)
critic_inputs = Input((timesteps,))
criticized = Lambda(lambda x: K.expand_dims(x, -1))(critic_inputs)
criticized = Conv1D(32, 3, padding='same', kernel_initializer=kernel_initializer)(criticized)
criticized = LeakyReLU(0.2)(criticized)
criticized = MaxPooling1D(2, padding='same')(criticized)
criticized = Conv1D(32, 3, padding='same', kernel_initializer=kernel_initializer)(criticized)
criticized = LeakyReLU(0.2)(criticized)
criticized = MaxPooling1D(2, padding='same')(criticized)
criticized = Conv1D(32, 3, padding='same', kernel_initializer=kernel_initializer)(criticized)
criticized = LeakyReLU(0.2)(criticized)
criticized = MaxPooling1D(2, padding='same')(criticized)
criticized = Conv1D(32, 3, padding='same', kernel_initializer=kernel_initializer)(criticized)
criticized = LeakyReLU(0.2)(criticized)
criticized = Flatten()(criticized)
criticized = Dense(50, kernel_initializer=kernel_initializer)(criticized)
criticized = LeakyReLU(0.2)(criticized)
criticized = Dense(15, kernel_initializer=kernel_initializer)(criticized)
criticized = LeakyReLU(0.2)(criticized)
criticized = Dense(1, kernel_initializer=kernel_initializer)(criticized)
critic = Model(critic_inputs, criticized, 'critic')
return critic
def build_generator_model(generator, critic, generator_lr, latent_dim):
utils.set_model_trainable(generator, True)
utils.set_model_trainable(critic, False)
noise_samples = Input((latent_dim,))
generated_samples = generator(noise_samples)
generated_criticized = critic(generated_samples)
generator_model = Model(noise_samples, generated_criticized, 'generator_model')
generator_model.compile(loss=utils.wasserstein_loss, optimizer=RMSprop(generator_lr))
return generator_model
def build_critic_model(generator, critic, critic_lr, latent_dim, timesteps):
utils.set_model_trainable(generator, False)
utils.set_model_trainable(critic, True)
noise_samples = Input((latent_dim,))
real_samples = Input((timesteps,))
generated_samples = generator(noise_samples)
generated_criticized = critic(generated_samples)
real_criticized = critic(real_samples)
critic_model = Model([real_samples, noise_samples],
[real_criticized, generated_criticized], 'critic_model')
critic_model.compile(loss=[utils.wasserstein_loss, utils.wasserstein_loss], optimizer=RMSprop(critic_lr),
loss_weights=[1 / 2, 1 / 2])
return critic_model
def clip_weights(model, clip_value):
for l in model.layers:
weights = [np.clip(w, -clip_value, clip_value) for w in l.get_weights()]
l.set_weights(weights)
| 36.491379
| 109
| 0.733759
|
527064f97f4234b096bc6c6c6416cd1e406facf8
| 630
|
py
|
Python
|
manage.py
|
mohsenamoon1160417237/recipe-API
|
8eebb20b6b56cc76670985beb89de4556db94a28
|
[
"MIT"
] | null | null | null |
manage.py
|
mohsenamoon1160417237/recipe-API
|
8eebb20b6b56cc76670985beb89de4556db94a28
|
[
"MIT"
] | 9
|
2019-12-04T23:09:28.000Z
|
2022-02-10T09:27:36.000Z
|
manage.py
|
BuilderTron/recipe_app
|
5a06244d2677a08104df72624008aa8d2f296da8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'recipe_app.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.636364
| 74
| 0.684127
|
8f72224ba525e2ff9036edcc67c6b5c70e3f4e72
| 1,745
|
py
|
Python
|
core/src/autogluon/core/features/types.py
|
songqiang/autogluon
|
529d7cc65fad411622072aa0349215a15e1e901c
|
[
"Apache-2.0"
] | 8
|
2021-05-17T08:02:05.000Z
|
2022-03-05T13:03:17.000Z
|
core/src/autogluon/core/features/types.py
|
songqiang/autogluon
|
529d7cc65fad411622072aa0349215a15e1e901c
|
[
"Apache-2.0"
] | null | null | null |
core/src/autogluon/core/features/types.py
|
songqiang/autogluon
|
529d7cc65fad411622072aa0349215a15e1e901c
|
[
"Apache-2.0"
] | 2
|
2021-01-04T21:38:53.000Z
|
2021-12-10T06:29:54.000Z
|
# Raw types: Raw data type information grouped into families.
# For example: uint8, int8, int16, int32, and int64 features all map to 'int'
R_INT = 'int'
R_FLOAT = 'float'
R_OBJECT = 'object'
R_CATEGORY = 'category'
R_DATETIME = 'datetime'
R_BOOL = 'bool' # TODO: R_BOOL/R_BOOLEAN?
# TODO: R_FLOAT_SPARSE/R_INT_SPARSE/R_CATEGORY_SPARSE?
# Special types: Meta information about the special meaning of a feature that is not present in the raw data.
# feature has been binned into discrete integer values from its original representation
S_BINNED = 'binned'
# feature was originally a datetime type that was converted to numeric
S_DATETIME_AS_INT = 'datetime_as_int'
# feature is a datetime in object form (string dates), which can be converted to datetime via pd.to_datetime
S_DATETIME_AS_OBJECT = 'datetime_as_object'
# feature is an object type that contains text information that can be utilized in natural language processing
S_TEXT = 'text'
# feature is a categorical that was originally text information. It may or may not still contain the raw text in its data.
S_TEXT_AS_CATEGORY = 'text_as_category'
# feature is a generated feature based off of a text feature but is not an ngram. Examples include character count, word count, symbol count, etc.
S_TEXT_SPECIAL = 'text_special'
# feature is a generated feature based off of a text feature that is an ngram.
S_TEXT_NGRAM = 'text_ngram'
# feature is an object type that contains a string path to an image that can be utilized in computer vision
S_IMAGE_PATH = 'image_path'
# feature is a generated feature based off of a ML model's prediction probabilities of the label column for the row.
# Any model which takes a stack feature as input is a stack ensemble.
S_STACK = 'stack'
| 43.625
| 146
| 0.781662
|
b5bd00b37a33a29065853b426b08c21061df3775
| 1,235
|
py
|
Python
|
tests/utils.py
|
nkato/psd-tools2
|
58838cc0a0274ca11507906f0b39d5faf42f445b
|
[
"MIT"
] | null | null | null |
tests/utils.py
|
nkato/psd-tools2
|
58838cc0a0274ca11507906f0b39d5faf42f445b
|
[
"MIT"
] | null | null | null |
tests/utils.py
|
nkato/psd-tools2
|
58838cc0a0274ca11507906f0b39d5faf42f445b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import os
import psd_tools.reader
import psd_tools.decoder
DATA_PATH = os.path.join(
os.path.abspath(os.path.dirname(__file__)), 'psd_files')
def full_name(filename):
return os.path.join(DATA_PATH, filename)
def load_psd(filename):
with open(full_name(filename), 'rb') as f:
return psd_tools.reader.parse(f)
def decode_psd(filename):
return psd_tools.decoder.parse(load_psd(filename))
# see http://lukeplant.me.uk/blog/posts/fuzzy-testing-with-assertnumqueries/
class FuzzyInt(int):
def __new__(cls, lowest, highest):
obj = super(FuzzyInt, cls).__new__(cls, highest)
obj.lowest = lowest
obj.highest = highest
return obj
def __eq__(self, other):
return other >= self.lowest and other <= self.highest
def __repr__(self):
return str("[%d..%d]") % (self.lowest, self.highest)
def with_psb(fixtures):
psb_fixtures = []
for fixture in fixtures:
psb_fixtures.append(
type(fixture)([fixture[0].replace('.psd', '.psb')]) + fixture[1:])
print(fixtures + type(fixtures)(psb_fixtures))
return fixtures + type(fixtures)(psb_fixtures)
| 26.847826
| 78
| 0.676923
|
0858df061a0e808578a02cf05a80a3a237545502
| 3,849
|
py
|
Python
|
3DMatch/trainset_d3feat.py
|
dahliau/3D-PointCloud
|
863b8fa27f2c4f350ebcbbc7a44576d662e8bfe7
|
[
"IJG"
] | 691
|
2020-09-17T04:47:31.000Z
|
2022-03-31T02:13:42.000Z
|
3DMatch/trainset_d3feat.py
|
zhulf0804/Segmentation-Papers
|
62248f9d1e820ad0a8d23a53e96d21028b6080c3
|
[
"IJG"
] | 3
|
2021-12-18T07:58:02.000Z
|
2022-03-14T01:40:49.000Z
|
3DMatch/trainset_d3feat.py
|
zhulf0804/Segmentation-Papers
|
62248f9d1e820ad0a8d23a53e96d21028b6080c3
|
[
"IJG"
] | 99
|
2020-09-24T07:06:53.000Z
|
2022-03-29T06:05:50.000Z
|
import os
import numpy as np
import open3d as o3d
import pickle
import random
def vis_npys(npys):
pcds = []
colors = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
for i, npy in enumerate(npys):
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(npy)
if i < 3:
color = colors[i]
else:
color = [random.random() for _ in range(3)]
pcd.paint_uniform_color(color)
pcds.append(pcd)
o3d.visualization.draw_geometries(pcds)
def decode_points(pts_filename):
'''
# 3DMatch_train_0.030_points.pkl: dict
# key: str, sun3d-brown_bm_1-brown_bm_1/seq-01/cloud_bin_0
# value: np.ndarray n x 3, n是变化的
# 3933个点云
# min: (850, 3), max: (197343, 3), mean: 13565
'''
with open(pts_filename, 'rb') as file:
data = pickle.load(file)
points = [*data.values()]
ids_list = [*data.keys()]
dims = []
for i in range(len(points)):
dims.append(points[i].shape[0])
print('npts min: {}, npts max: {}, npts mean: {}'.
format(min(dims), max(dims), np.mean(dims)))
print('Total number of point cloud: {}'.format(len(dims)))
return data
def decode_overlap(overlap_filename):
'''
# 3DMatch_train_0.030_overlap.pkl: dict
# 35297
# key: str, '7-scenes-pumpkin/seq-07/cloud_bin_11@7-scenes-pumpkin/seq-08/cloud_bin_2'
# val: float, 0.6015544397826535
# min: 0.30000815461143276, max: 0.9954887218045113, mean: 0.5150335449363996
'''
with open(overlap_filename, 'rb') as file:
overlap = pickle.load(file)
scores = []
for k, v in overlap.items():
scores.append(v)
print('overlap min: {}, overlap max: {}, overlap mean: {}'
.format(min(scores), max(scores), np.mean(scores)))
print('Total pairs: {}'.format(len(scores)))
return overlap
def decode_keypts(keypts_filename):
'''
# 3DMatch_train_0.030_keypts.pkl: dict
# 35297
# key: str, analysis-by-synthesis-office2-5b/seq-01/cloud_bin_34@analysis-by-synthesis-office2-5b/seq-01/cloud_bin_35
# val: np.ndarray, m x 2; m是变化的
# min: 445, max: 76307, mean: 8487
'''
with open(keypts_filename, 'rb') as file:
correspondences = pickle.load(file)
pairs = []
for k, v in correspondences.items():
pairs.append(v.shape[0])
print('min: {}, max: {}, mean: {}'.format(min(pairs), max(pairs), np.mean(pairs)))
print('Total pairs: {}'.format(len(pairs)))
return correspondences
if __name__ == '__main__':
root = '/Users/zhulf/Downloads/data/backup'
pts_filename = os.path.join(root, f'3DMatch_train_0.030_points.pkl')
overlap_filename = os.path.join(root, f'3DMatch_train_0.030_overlap.pkl')
keypts_filename = os.path.join(root, f'3DMatch_train_0.030_keypts.pkl')
assert os.path.exists(pts_filename)
print('='*20, '3DMatch_train_0.030_points.pkl', '='*20)
data = decode_points(pts_filename)
print('=' * 20, '3DMatch_train_0.030_points.pkl completed', '=' * 20, '\n')
assert os.path.exists(keypts_filename)
print('=' * 20, '3DMatch_train_0.030_overlap.pkl', '=' * 20)
overlap = decode_overlap(overlap_filename)
print('=' * 20, '3DMatch_train_0.030_overlap.pkl completed', '=' * 20, '\n')
assert os.path.exists(overlap_filename)
print('=' * 20, '3DMatch_train_0.030_keypts.pkl', '=' * 20)
correspondences = decode_keypts(keypts_filename)
print('=' * 20, '3DMatch_train_0.030_keypts.pkl completed', '=' * 20)
f1f2 = list(correspondences.keys())[222]
correspondence = correspondences[f1f2]
path1, path2 = f1f2.split('@')
npy1, npy2 = data[path1], data[path2]
npy3, npy4 = npy1[correspondence[:, 0]], npy2[correspondence[:, 1]]
vis_npys([npy1, npy2])
vis_npys([npy1, npy2, npy3])
vis_npys([npy1, npy2, npy3])
| 32.897436
| 121
| 0.633151
|
6a93a360c6d14d5678b8652336686c341a79c635
| 327
|
py
|
Python
|
test_area/write-a-function.py
|
umiphos/python-exercises
|
a834fb63d3e447e3df096543c0e1850ecc020ffe
|
[
"Apache-2.0"
] | null | null | null |
test_area/write-a-function.py
|
umiphos/python-exercises
|
a834fb63d3e447e3df096543c0e1850ecc020ffe
|
[
"Apache-2.0"
] | null | null | null |
test_area/write-a-function.py
|
umiphos/python-exercises
|
a834fb63d3e447e3df096543c0e1850ecc020ffe
|
[
"Apache-2.0"
] | null | null | null |
# https://www.hackerrank.com/challenges/write-a-function
def is_leap(year):
#years=[2100,2400]
#for i in range(len(years)):
# leap = (years[i]%4==0) and (years[i]%400==0 or years[i]%100!=0)
# print leap
# Write your logic here
leap = (year%4==0) and (year%400==0 or year%100!=0)
return leap
| 27.25
| 72
| 0.599388
|
832d3155a910396c13881bf829f8f818ca2cbd55
| 7,050
|
py
|
Python
|
Ansible-AWS-Provisioning/collections/ansible_collections/community/aws/plugins/modules/ec2_ami_copy.py
|
ginigangadharan/ansible-real-life
|
897c2fc0d05babbb540768b336b6ad399dad5bfa
|
[
"MIT"
] | 22
|
2021-07-16T08:11:22.000Z
|
2022-03-31T07:15:34.000Z
|
Ansible-AWS-Provisioning/collections/ansible_collections/community/aws/plugins/modules/ec2_ami_copy.py
|
premsagar0228/ansible-real-life
|
1a51193b833ab6ad320100472333b9ffb0da39d4
|
[
"MIT"
] | null | null | null |
Ansible-AWS-Provisioning/collections/ansible_collections/community/aws/plugins/modules/ec2_ami_copy.py
|
premsagar0228/ansible-real-life
|
1a51193b833ab6ad320100472333b9ffb0da39d4
|
[
"MIT"
] | 39
|
2021-07-05T02:31:42.000Z
|
2022-03-31T02:46:03.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_ami_copy
short_description: copies AMI between AWS regions, return new image id
description:
- Copies AMI from a source region to a destination region. B(Since version 2.3 this module depends on boto3.)
options:
source_region:
description:
- The source region the AMI should be copied from.
required: true
type: str
source_image_id:
description:
- The ID of the AMI in source region that should be copied.
required: true
type: str
name:
description:
- The name of the new AMI to copy. (As of 2.3 the default is 'default', in prior versions it was 'null'.)
default: "default"
type: str
description:
description:
- An optional human-readable string describing the contents and purpose of the new AMI.
type: str
encrypted:
description:
- Whether or not the destination snapshots of the copied AMI should be encrypted.
type: bool
kms_key_id:
description:
- KMS key id used to encrypt the image. If not specified, uses default EBS Customer Master Key (CMK) for your account.
type: str
wait:
description:
- Wait for the copied AMI to be in state 'available' before returning.
type: bool
default: 'no'
wait_timeout:
description:
- How long before wait gives up, in seconds. Prior to 2.3 the default was 1200.
- From 2.3-2.5 this option was deprecated in favor of boto3 waiter defaults.
This was reenabled in 2.6 to allow timeouts greater than 10 minutes.
default: 600
type: int
tags:
description:
- 'A hash/dictionary of tags to add to the new copied AMI: C({"key":"value"}) and C({"key":"value","key":"value"})'
type: dict
tag_equality:
description:
- Whether to use tags if the source AMI already exists in the target region. If this is set, and all tags match
in an existing AMI, the AMI will not be copied again.
default: false
type: bool
author:
- Amir Moulavi (@amir343) <amir.moulavi@gmail.com>
- Tim C (@defunctio) <defunct@defunct.io>
extends_documentation_fragment:
- amazon.aws.aws
- amazon.aws.ec2
requirements:
- boto3
'''
EXAMPLES = '''
# Basic AMI Copy
- ec2_ami_copy:
source_region: us-east-1
region: eu-west-1
source_image_id: ami-xxxxxxx
# AMI copy wait until available
- ec2_ami_copy:
source_region: us-east-1
region: eu-west-1
source_image_id: ami-xxxxxxx
wait: yes
wait_timeout: 1200 # Default timeout is 600
register: image_id
# Named AMI copy
- ec2_ami_copy:
source_region: us-east-1
region: eu-west-1
source_image_id: ami-xxxxxxx
name: My-Awesome-AMI
description: latest patch
# Tagged AMI copy (will not copy the same AMI twice)
- ec2_ami_copy:
source_region: us-east-1
region: eu-west-1
source_image_id: ami-xxxxxxx
tags:
Name: My-Super-AMI
Patch: 1.2.3
tag_equality: yes
# Encrypted AMI copy
- ec2_ami_copy:
source_region: us-east-1
region: eu-west-1
source_image_id: ami-xxxxxxx
encrypted: yes
# Encrypted AMI copy with specified key
- ec2_ami_copy:
source_region: us-east-1
region: eu-west-1
source_image_id: ami-xxxxxxx
encrypted: yes
kms_key_id: arn:aws:kms:us-east-1:XXXXXXXXXXXX:key/746de6ea-50a4-4bcb-8fbc-e3b29f2d367b
'''
RETURN = '''
image_id:
description: AMI ID of the copied AMI
returned: always
type: str
sample: ami-e689729e
'''
from ansible_collections.amazon.aws.plugins.module_utils.aws.core import AnsibleAWSModule
from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, ansible_dict_to_boto3_tag_list
from ansible.module_utils._text import to_native
try:
from botocore.exceptions import ClientError, NoCredentialsError, WaiterError, BotoCoreError
except ImportError:
pass # caught by AnsibleAWSModule
def copy_image(module, ec2):
"""
Copies an AMI
module : AnsibleModule object
ec2: ec2 connection object
"""
image = None
changed = False
tags = module.params.get('tags')
params = {'SourceRegion': module.params.get('source_region'),
'SourceImageId': module.params.get('source_image_id'),
'Name': module.params.get('name'),
'Description': module.params.get('description'),
'Encrypted': module.params.get('encrypted'),
}
if module.params.get('kms_key_id'):
params['KmsKeyId'] = module.params.get('kms_key_id')
try:
if module.params.get('tag_equality'):
filters = [{'Name': 'tag:%s' % k, 'Values': [v]} for (k, v) in module.params.get('tags').items()]
filters.append(dict(Name='state', Values=['available', 'pending']))
images = ec2.describe_images(Filters=filters)
if len(images['Images']) > 0:
image = images['Images'][0]
if not image:
image = ec2.copy_image(**params)
image_id = image['ImageId']
if tags:
ec2.create_tags(Resources=[image_id],
Tags=ansible_dict_to_boto3_tag_list(tags))
changed = True
if module.params.get('wait'):
delay = 15
max_attempts = module.params.get('wait_timeout') // delay
image_id = image.get('ImageId')
ec2.get_waiter('image_available').wait(
ImageIds=[image_id],
WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts}
)
module.exit_json(changed=changed, **camel_dict_to_snake_dict(image))
except WaiterError as e:
module.fail_json_aws(e, msg='An error occurred waiting for the image to become available')
except (ClientError, BotoCoreError) as e:
module.fail_json_aws(e, msg="Could not copy AMI")
except Exception as e:
module.fail_json(msg='Unhandled exception. (%s)' % to_native(e))
def main():
argument_spec = dict(
source_region=dict(required=True),
source_image_id=dict(required=True),
name=dict(default='default'),
description=dict(default=''),
encrypted=dict(type='bool', default=False, required=False),
kms_key_id=dict(type='str', required=False),
wait=dict(type='bool', default=False),
wait_timeout=dict(type='int', default=600),
tags=dict(type='dict'),
tag_equality=dict(type='bool', default=False))
module = AnsibleAWSModule(argument_spec=argument_spec)
# TODO: Check botocore version
ec2 = module.client('ec2')
copy_image(module, ec2)
if __name__ == '__main__':
main()
| 31.473214
| 124
| 0.65844
|
83bbc77cfd078076cbfd8c5747ba2b325a848b8a
| 7,488
|
py
|
Python
|
src/smirnoff_hack.py
|
bieniekmateusz/forcebalance
|
593791866e622ab4eae23ce29a0bed27499a118d
|
[
"BSD-3-Clause"
] | null | null | null |
src/smirnoff_hack.py
|
bieniekmateusz/forcebalance
|
593791866e622ab4eae23ce29a0bed27499a118d
|
[
"BSD-3-Clause"
] | 6
|
2021-07-20T00:07:12.000Z
|
2022-01-14T16:39:25.000Z
|
src/smirnoff_hack.py
|
bieniekmateusz/forcebalance
|
593791866e622ab4eae23ce29a0bed27499a118d
|
[
"BSD-3-Clause"
] | null | null | null |
## HACK: Improve the performance of the openff forcefield.create_openmm_system()
from openff.toolkit.utils.toolkits import OpenEyeToolkitWrapper, RDKitToolkitWrapper, AmberToolsToolkitWrapper
from openff.toolkit.topology.molecule import Molecule
# time based on total 540s evaluation
# cache for OE find_smarts_matches (save 300+ s)
oe_original_find_smarts_matches = OpenEyeToolkitWrapper.find_smarts_matches
OE_TOOLKIT_CACHE_find_smarts_matches = {}
def oe_cached_find_smarts_matches(self, molecule, smarts, aromaticity_model='OEAroModel_MDL'):
cache_key = hash((molecule, smarts, aromaticity_model))
if cache_key not in OE_TOOLKIT_CACHE_find_smarts_matches:
OE_TOOLKIT_CACHE_find_smarts_matches[cache_key] = oe_original_find_smarts_matches(self, molecule, smarts, aromaticity_model=aromaticity_model)
return OE_TOOLKIT_CACHE_find_smarts_matches[cache_key]
# replace the original function with new one
OpenEyeToolkitWrapper.find_smarts_matches = oe_cached_find_smarts_matches
# cache for RDK find_smarts_matches
rdk_original_find_smarts_matches = RDKitToolkitWrapper.find_smarts_matches
RDK_TOOLKIT_CACHE_find_smarts_matches = {}
def rdk_cached_find_smarts_matches(self, molecule, smarts, aromaticity_model='OEAroModel_MDL'):
cache_key = hash((molecule, smarts, aromaticity_model))
if cache_key not in RDK_TOOLKIT_CACHE_find_smarts_matches:
RDK_TOOLKIT_CACHE_find_smarts_matches[cache_key] = rdk_original_find_smarts_matches(self, molecule, smarts, aromaticity_model=aromaticity_model)
return RDK_TOOLKIT_CACHE_find_smarts_matches[cache_key]
# replace the original function with new one
RDKitToolkitWrapper.find_smarts_matches = rdk_cached_find_smarts_matches
# cache for the validate function (save 94s)
from openff.toolkit.typing.chemistry.environment import ChemicalEnvironment
original_validate = ChemicalEnvironment.validate
TOOLKIT_CACHE_ChemicalEnvironment_validate = {}
def cached_validate(smirks, validate_valence_type=True, toolkit_registry=OpenEyeToolkitWrapper):
cache_key = hash((smirks, validate_valence_type, toolkit_registry))
if cache_key not in TOOLKIT_CACHE_ChemicalEnvironment_validate:
TOOLKIT_CACHE_ChemicalEnvironment_validate[cache_key] = original_validate(smirks, validate_valence_type=validate_valence_type, toolkit_registry=toolkit_registry)
return TOOLKIT_CACHE_ChemicalEnvironment_validate[cache_key]
ChemicalEnvironment.validate = cached_validate
# cache for compute_partial_charges_am1bcc (save 69s)
# No longer needed as of 0.7.0 since all partial charge assignment is routed through ToolkitWrapper.assign_partial_charges
# original_compute_partial_charges_am1bcc = OpenEyeToolkitWrapper.compute_partial_charges_am1bcc
# TOOLKIT_CACHE_compute_partial_charges_am1bcc = {}
# def cached_compute_partial_charges_am1bcc(self, molecule, use_conformers=None, strict_n_conformers=False):
# cache_key = hash(molecule, use_conformers, strict_n_conformers)
# if cache_key not in TOOLKIT_CACHE_compute_partial_charges_am1bcc:
# TOOLKIT_CACHE_compute_partial_charges_am1bcc[cache_key] = original_compute_partial_charges_am1bcc(self, molecule, use_conformers=use_conformers, strict_n_conformers=strict_n_conformers)
# return TOOLKIT_CACHE_compute_partial_charges_am1bcc[cache_key]
# OpenEyeToolkitWrapper.compute_partial_charges_am1bcc = cached_compute_partial_charges_am1bcc
# Cache for OETK assign_partial_charges
oe_original_assign_partial_charges = OpenEyeToolkitWrapper.assign_partial_charges
OE_TOOLKIT_CACHE_assign_partial_charges = {}
def oe_cached_assign_partial_charges(self, molecule, partial_charge_method=None, use_conformers=None, strict_n_conformers=False, _cls=Molecule):
cache_key = hash((molecule, partial_charge_method, str(use_conformers), strict_n_conformers))
if cache_key not in OE_TOOLKIT_CACHE_assign_partial_charges:
oe_original_assign_partial_charges(self, molecule, partial_charge_method=partial_charge_method, use_conformers=use_conformers, strict_n_conformers=strict_n_conformers, _cls=_cls)
OE_TOOLKIT_CACHE_assign_partial_charges[cache_key] = molecule.partial_charges
else:
molecule.partial_charges = OE_TOOLKIT_CACHE_assign_partial_charges[cache_key]
return
OpenEyeToolkitWrapper.assign_partial_charges = oe_cached_assign_partial_charges
# Cache for AmberTools assign_partial_charges
at_original_assign_partial_charges = AmberToolsToolkitWrapper.assign_partial_charges
AT_TOOLKIT_CACHE_assign_partial_charges = {}
def at_cached_assign_partial_charges(self, molecule, partial_charge_method=None, use_conformers=None, strict_n_conformers=False, _cls=Molecule):
cache_key = hash((molecule, partial_charge_method, str(use_conformers), strict_n_conformers))
if cache_key not in AT_TOOLKIT_CACHE_assign_partial_charges:
at_original_assign_partial_charges(self, molecule, partial_charge_method=partial_charge_method, use_conformers=use_conformers, strict_n_conformers=strict_n_conformers, _cls=_cls)
AT_TOOLKIT_CACHE_assign_partial_charges[cache_key] = molecule.partial_charges
else:
molecule.partial_charges = AT_TOOLKIT_CACHE_assign_partial_charges[cache_key]
return
AmberToolsToolkitWrapper.assign_partial_charges = at_cached_assign_partial_charges
# cache the OE generate_conformers function (save 15s)
OE_TOOLKIT_CACHE_molecule_conformers = {}
oe_original_generate_conformers = OpenEyeToolkitWrapper.generate_conformers
def oe_cached_generate_conformers(self, molecule, n_conformers=1, rms_cutoff=None, clear_existing=True):
cache_key = hash((molecule, n_conformers, str(rms_cutoff), clear_existing))
if cache_key not in OE_TOOLKIT_CACHE_molecule_conformers:
oe_original_generate_conformers(self, molecule, n_conformers=n_conformers, rms_cutoff=rms_cutoff, clear_existing=clear_existing)
OE_TOOLKIT_CACHE_molecule_conformers[cache_key] = molecule._conformers
molecule._conformers = OE_TOOLKIT_CACHE_molecule_conformers[cache_key]
OpenEyeToolkitWrapper.generate_conformers = oe_cached_generate_conformers
# cache the RDKit generate_conformers function
RDK_TOOLKIT_CACHE_molecule_conformers = {}
rdk_original_generate_conformers = RDKitToolkitWrapper.generate_conformers
def rdk_cached_generate_conformers(self, molecule, n_conformers=1, rms_cutoff=None, clear_existing=True):
cache_key = hash((molecule, n_conformers, str(rms_cutoff), clear_existing))
if cache_key not in RDK_TOOLKIT_CACHE_molecule_conformers:
rdk_original_generate_conformers(self, molecule, n_conformers=n_conformers, rms_cutoff=rms_cutoff, clear_existing=clear_existing)
RDK_TOOLKIT_CACHE_molecule_conformers[cache_key] = molecule._conformers
molecule._conformers = RDK_TOOLKIT_CACHE_molecule_conformers[cache_key]
RDKitToolkitWrapper.generate_conformers = rdk_cached_generate_conformers
# final timing: 56s
# cache the ForceField creation (no longer needed since using OpenFF API for parameter modifications)
# import hashlib
# from openff.toolkit.typing.engines.smirnoff import ForceField
# SMIRNOFF_FORCE_FIELD_CACHE = {}
# def getForceField(*ffpaths):
# hasher = hashlib.md5()
# for path in ffpaths:
# with open(path, 'rb') as f:
# hasher.update(f.read())
# cache_key = hasher.hexdigest()
# if cache_key not in SMIRNOFF_FORCE_FIELD_CACHE:
# SMIRNOFF_FORCE_FIELD_CACHE[cache_key] = ForceField(*ffpaths, allow_cosmetic_attributes=True)
# return SMIRNOFF_FORCE_FIELD_CACHE[cache_key]
| 61.884298
| 195
| 0.840812
|
b41c118beace63fb535b9fe2d17e4a6a3494d68d
| 10,804
|
py
|
Python
|
model/config.py
|
qkaren/converse_reading_cmr
|
d06d981be12930cff8458e2b1b81be4f5df3a329
|
[
"MIT"
] | 87
|
2019-06-07T18:16:30.000Z
|
2021-11-27T08:18:45.000Z
|
model/config.py
|
qkaren/converse_reading_cmr
|
d06d981be12930cff8458e2b1b81be4f5df3a329
|
[
"MIT"
] | 11
|
2019-06-19T20:53:27.000Z
|
2021-05-07T01:05:01.000Z
|
model/config.py
|
qkaren/converse_reading_cmr
|
d06d981be12930cff8458e2b1b81be4f5df3a329
|
[
"MIT"
] | 17
|
2019-06-08T01:50:23.000Z
|
2022-02-16T07:12:15.000Z
|
# /usr/bin/env python3
import argparse
import multiprocessing
import torch
"""
Configuration file
"""
def model_config(parser):
parser.add_argument('--vocab_size', type=int, default=0)
parser.add_argument('--wemb_dim', type=int, default=300)
parser.add_argument('--covec_on', action='store_false')
parser.add_argument('--embedding_dim', type=int, default=300)
# pos
parser.add_argument('--no_pos', dest='pos_on', action='store_false')
parser.add_argument('--pos_vocab_size', type=int, default=56)
parser.add_argument('--pos_dim', type=int, default=12)
parser.add_argument('--no_ner', dest='ner_on', action='store_false')
parser.add_argument('--ner_vocab_size', type=int, default=19)
parser.add_argument('--ner_dim', type=int, default=8)
parser.add_argument('--no_feat', dest='feat_on', action='store_false')
parser.add_argument('--num_features', type=int, default=4)
# q->p
parser.add_argument('--prealign_on', action='store_false')
parser.add_argument('--prealign_head', type=int, default=1)
parser.add_argument('--prealign_att_dropout', type=float, default=0)
parser.add_argument('--prealign_norm_on', action='store_true')
parser.add_argument('--prealign_proj_on', action='store_true')
parser.add_argument('--prealign_bidi', action='store_true')
parser.add_argument('--prealign_hidden_size', type=int, default=64)
parser.add_argument('--prealign_share', action='store_false')
parser.add_argument('--prealign_residual_on', action='store_true')
parser.add_argument('--prealign_scale_on', action='store_false')
parser.add_argument('--prealign_sim_func', type=str, default='dotproductproject')
parser.add_argument('--prealign_activation', type=str, default='relu')
parser.add_argument('--pwnn_on', action='store_false')
parser.add_argument('--pwnn_hidden_size', type=int, default=64)
##contextual encoding
parser.add_argument('--contextual_hidden_size', type=int, default=64)
parser.add_argument('--contextual_cell_type', type=str, default='lstm')
parser.add_argument('--contextual_weight_norm_on', action='store_true')
parser.add_argument('--contextual_maxout_on', action='store_true')
parser.add_argument('--contextual_residual_on', action='store_true')
parser.add_argument('--contextual_encoder_share', action='store_true')
parser.add_argument('--contextual_num_layers', type=int, default=2)
## mem setting
parser.add_argument('--msum_hidden_size', type=int, default=64)
parser.add_argument('--msum_cell_type', type=str, default='lstm')
parser.add_argument('--msum_weight_norm_on', action='store_true')
parser.add_argument('--msum_maxout_on', action='store_true')
parser.add_argument('--msum_residual_on', action='store_true')
parser.add_argument('--msum_lexicon_input_on', action='store_true')
parser.add_argument('--msum_num_layers', type=int, default=1)
# attention
parser.add_argument('--deep_att_lexicon_input_on', action='store_false')
parser.add_argument('--deep_att_hidden_size', type=int, default=64)
parser.add_argument('--deep_att_sim_func', type=str, default='dotproductproject')
parser.add_argument('--deep_att_activation', type=str, default='relu')
parser.add_argument('--deep_att_norm_on', action='store_false')
parser.add_argument('--deep_att_proj_on', action='store_true')
parser.add_argument('--deep_att_residual_on', action='store_true')
parser.add_argument('--deep_att_share', action='store_false')
parser.add_argument('--deep_att_opt', type=int, default=0)
# self attn
parser.add_argument('--self_attention_on', action='store_false')
parser.add_argument('--self_att_hidden_size', type=int, default=64)
parser.add_argument('--self_att_sim_func', type=str, default='dotproductproject')
parser.add_argument('--self_att_activation', type=str, default='relu')
parser.add_argument('--self_att_norm_on', action='store_true')
parser.add_argument('--self_att_proj_on', action='store_true')
parser.add_argument('--self_att_residual_on', action='store_true')
parser.add_argument('--self_att_dropout', type=float, default=0.1)
parser.add_argument('--self_att_drop_diagonal', action='store_false')
parser.add_argument('--self_att_share', action='store_false')
# query summary
parser.add_argument('--query_sum_att_type', type=str, default='linear',
help='linear/mlp')
parser.add_argument('--query_sum_norm_on', action='store_true')
parser.add_argument('--san_on', action='store_true')
parser.add_argument('--max_len', type=int, default=30)
parser.add_argument('--decoder_hidden_size', type=int, default=512)
parser.add_argument('--decoder_ptr_update_on', action='store_true')
parser.add_argument('--decoder_num_turn', type=int, default=5)
parser.add_argument('--decoder_mem_type', type=int, default=3)
parser.add_argument('--decoder_mem_drop_p', type=float, default=0.2)
parser.add_argument('--decoder_opt', type=int, default=0)
parser.add_argument('--decoder_att_type', type=str, default='bilinear',
help='bilinear/simple/default')
parser.add_argument('--decoder_rnn_type', type=str, default='gru',
help='rnn/gru/lstm')
parser.add_argument('--decoder_sum_att_type', type=str, default='bilinear',
help='bilinear/simple/default')
parser.add_argument('--decoder_weight_norm_on', action='store_true')
return parser
def data_config(parser):
parser.add_argument('--log_file', default='./log/reddit.log', help='path for log file.')
parser.add_argument('--data_dir', default='data')
parser.add_argument('--raw_data_dir', default='./raw_data')
parser.add_argument('--meta', default='reddit_meta.pick', help='path to preprocessed meta file.')
parser.add_argument('--train_data', default='train_100k.json',
help='path to preprocessed training data file.')
parser.add_argument('--dev_data', default='dev_100k.json',
help='path to preprocessed validation data file.')
parser.add_argument('--dev_gold', default='dev_seq_answer',
help='path to preprocessed validation data file.')
parser.add_argument('--covec_path', default='full_test/MT-LSTM.pt')
parser.add_argument('--glove', default='data_processing/glove.840B.300d.txt',
help='path to word vector file.')
parser.add_argument('--glove_dim', type=int, default=300,
help='word vector dimension.')
parser.add_argument('--sort_all', action='store_true',
help='sort the vocabulary by frequencies of all words.'
'Otherwise consider question words first.')
parser.add_argument('--threads', type=int, default=multiprocessing.cpu_count(),
help='number of threads for preprocessing.')
parser.add_argument('--dev_full', default='dev.full')
parser.add_argument('--test_full', default='test.full')
parser.add_argument('--test_data', default='test.json')
parser.add_argument('--test_output', default='test_output')
return parser
def train_config(parser):
parser.add_argument('--cuda', type=bool, default=torch.cuda.is_available(),
help='whether to use GPU acceleration.')
parser.add_argument('--log_per_updates', type=int, default=150)
parser.add_argument('--epoches', type=int, default=400)
parser.add_argument('--eval_step', type=int, default=3000)
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--resume', type=str, default='')
parser.add_argument('--optimizer', default='adam',
help='supported optimizer: adamax, sgd, adadelta, adam')
parser.add_argument('--grad_clipping', type=float, default=5)
parser.add_argument('--weight_decay', type=float, default=0)
parser.add_argument('--learning_rate', type=float, default=0.002)
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--vb_dropout', action='store_false')
parser.add_argument('--dropout_p', type=float, default=0.4)
parser.add_argument('--dropout_emb', type=float, default=0.4)
parser.add_argument('--dropout_w', type=float, default=0.05)
parser.add_argument('--unk_id', type=int, default=1)
parser.add_argument('--decoding', type=str, default='greedy', help='greedy/sample')
parser.add_argument('--temperature', type=float, default=1.0)
parser.add_argument('--top_k', type=int, default=1)
parser.add_argument('--if_train', type=int, default=1)
parser.add_argument('--curve_file', type=str, default='dev_curve.csv')
parser.add_argument('--smooth', type=int, default=-1)
parser.add_argument('--max_doc', type=int, default=100)
parser.add_argument('--is_rep', type=float, default=0)
parser.add_argument('--decoding_topk', type=int, default=8)
parser.add_argument('--decoding_bleu_lambda', type=float, default=0.5)
parser.add_argument('--decoding_bleu_normalize', action='store_true')
parser.add_argument('--model_type', type=str, default='san', help='[san|seq2seq|memnet]')
parser.add_argument('--weight_type', type=str, default='bleu', help='[bleu|nist]')
parser.add_argument('--no_lr_scheduler', dest='have_lr_scheduler', action='store_true')
parser.add_argument('--multi_step_lr', type=str, default='10,20,30')
parser.add_argument('--lr_gamma', type=float, default=0.5)
parser.add_argument('--scheduler_type', type=str, default='ms', help='ms/rop/exp')
parser.add_argument('--fix_embeddings', action='store_true', help='if true, `tune_partial` will be ignored.')
parser.add_argument('--tune_partial', type=int, default=1000,
help='finetune top-x embeddings (including <PAD>, <UNK>).')
parser.add_argument('--model_dir', default='checkpoint')
parser.add_argument('--seed', type=int, default=2018,
help='random seed for data shuffling, embedding init, etc.')
return parser
def decoding_config(parser):
parser.add_argument('--skip_tokens_file', type=str, default="")
parser.add_argument('--skip_tokens_first_file', type=str, default="")
return parser
def set_args():
parser = argparse.ArgumentParser()
parser = data_config(parser)
parser = model_config(parser)
parser = train_config(parser)
parser = decoding_config(parser)
args = parser.parse_args()
return args
| 55.405128
| 114
| 0.687153
|
c52c1009a629571bb04630939f016a8fea775010
| 2,405
|
py
|
Python
|
python/IECore/CapturingMessageHandler.py
|
bradleyhenke/cortex
|
f8245cc6c9464b1de9e6c6e57068248198e63de0
|
[
"BSD-3-Clause"
] | 386
|
2015-01-02T11:10:43.000Z
|
2022-03-10T15:12:20.000Z
|
python/IECore/CapturingMessageHandler.py
|
bradleyhenke/cortex
|
f8245cc6c9464b1de9e6c6e57068248198e63de0
|
[
"BSD-3-Clause"
] | 484
|
2015-01-09T18:28:06.000Z
|
2022-03-31T16:02:04.000Z
|
python/IECore/CapturingMessageHandler.py
|
bradleyhenke/cortex
|
f8245cc6c9464b1de9e6c6e57068248198e63de0
|
[
"BSD-3-Clause"
] | 99
|
2015-01-28T23:18:04.000Z
|
2022-03-27T00:59:39.000Z
|
##########################################################################
#
# Copyright (c) 2008, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from IECore import MessageHandler, Struct
## The CapturingMessageHandler simply stores all messages passed to it
# in an attribute called messages. It's useful for verifying expected
# message output during testing. Each message in the messages list is a Struct
# with "level", "context" and "message" attributes.
# \ingroup python
class CapturingMessageHandler( MessageHandler ) :
def __init__( self ) :
MessageHandler.__init__( self )
self.messages = []
def handle( self, level, context, message ) :
s = Struct()
s.level = level
s.context = context
s.message = message
self.messages.append( s )
| 40.762712
| 78
| 0.703534
|
9d637d0ca591a9cdae3ff1f5ab2445741b710389
| 874
|
py
|
Python
|
test/pytest/Service/test_services.py
|
michellab/BioSimSpaceCloud
|
456b146a2131565e354352872d3e75a08c3652d1
|
[
"Apache-2.0"
] | 2
|
2019-02-15T16:04:19.000Z
|
2019-02-19T15:42:27.000Z
|
test/pytest/Service/test_services.py
|
michellab/BioSimSpaceCloud
|
456b146a2131565e354352872d3e75a08c3652d1
|
[
"Apache-2.0"
] | null | null | null |
test/pytest/Service/test_services.py
|
michellab/BioSimSpaceCloud
|
456b146a2131565e354352872d3e75a08c3652d1
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from Acquire.Service import get_remote_service_info
skip_slow = True
@pytest.mark.slow
def test_login_to_service():
if skip_slow:
return
root_url = "http://130.61.60.88:8080/t"
identity_service_url = "%s/identity" % root_url
access_service_url = "%s/access" % root_url
accounting_service_url = "%s/accounting" % root_url
identity_service = get_remote_service_info(identity_service_url)
# check whois
username = "chryswoods"
(get_username, uid) = identity_service.whois(username=username)
assert(username == get_username)
(get_username, get_uid) = identity_service.whois(user_uid=uid)
assert(username == get_username)
assert(uid == get_uid)
access_service_url = get_remote_service_info(access_service_url)
accounting_service_url = get_remote_service_info(accounting_service_url)
| 24.277778
| 76
| 0.741419
|
2bde139c294b4f2f55cb33466245f210a3e83645
| 17,271
|
py
|
Python
|
py2neo/admin/install.py
|
bwmetz/py2neo
|
93653a2428693cf29ff65647b703c60d763e1808
|
[
"Apache-2.0"
] | null | null | null |
py2neo/admin/install.py
|
bwmetz/py2neo
|
93653a2428693cf29ff65647b703c60d763e1808
|
[
"Apache-2.0"
] | null | null | null |
py2neo/admin/install.py
|
bwmetz/py2neo
|
93653a2428693cf29ff65647b703c60d763e1808
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# Copyright 2011-2018, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from hashlib import sha256
from os import curdir, getenv, kill, listdir, makedirs, rename
from os.path import abspath, dirname, expanduser, isdir, isfile, join as path_join
from random import randint
from re import compile as re_compile
from shutil import rmtree
from socket import create_connection
from subprocess import check_call, check_output, CalledProcessError
from tarfile import TarFile, ReadError
from time import sleep, time
from warnings import warn
from py2neo.admin.dist import Distribution, archive_format
from py2neo.internal.compat import bstr
from py2neo.internal.util import hex_bytes, unhex_bytes
class Warehouse(object):
""" A local storage area for Neo4j installations.
"""
def __init__(self, home=None):
self.home = home or getenv("PY2NEO_HOME") or expanduser("~/.py2neo")
self.dist = path_join(self.home, "dist")
self.run = path_join(self.home, "run")
self.cc = path_join(self.home, "cc")
def get(self, name, database=None, role=None, member=None):
""" Obtain a Neo4j installation by name.
:param name:
:param database:
:param role:
:param member:
:return:
"""
if database and role and member is not None:
container = path_join(self.cc, name, database, role, str(member))
else:
container = path_join(self.run, name)
for dir_name in listdir(container):
dir_path = path_join(container, dir_name)
if isdir(dir_path):
return Installation(dir_path)
raise IOError("Could not locate installation directory")
def install(self, name, edition=None, version=None, database=None, role=None, member=None):
""" Install Neo4j.
:param name:
:param edition:
:param version:
:param database:
:param role:
:param member:
:return:
"""
if database and role and member is not None:
container = path_join(self.cc, name, database, role, str(member))
else:
container = path_join(self.run, name)
rmtree(container, ignore_errors=True)
makedirs(container)
archive_file = Distribution(edition, version).download(self.dist)
try:
with TarFile.open(archive_file, "r:{}".format(archive_format)) as archive:
archive.extractall(container)
except ReadError:
# The tarfile module sometimes has trouble with certain tar
# files for unknown reasons. This workaround falls back to
# command line.
check_call(["tar", "x", "-C", container, "-f", archive_file])
return self.get(name, database, role, member)
def uninstall(self, name, database=None, role=None, member=None):
""" Remove a Neo4j installation.
:param name:
:param database:
:param role:
:param member:
:return:
"""
if database and role and member is not None:
container = path_join(self.cc, name, database, role, str(member))
else:
container = path_join(self.run, name)
rmtree(container, ignore_errors=True)
def directory(self):
""" Fetch a dictionary of :class:`.Installation` objects, keyed
by name, for all available Neo4j installations.
"""
try:
return {name: self.get(name) for name in listdir(self.run) if not name.startswith(".")}
except OSError:
return {}
def rename(self, name, new_name):
""" Rename a Neo4j installation.
:param name:
:param new_name:
:return:
"""
rename(path_join(self.run, name), path_join(self.run, new_name))
class Installation(object):
""" A Neo4j 3.0+ server installation.
"""
config_file = "neo4j.conf"
def __init__(self, home=None):
self.home = home or abspath(curdir)
self.server = Server(self)
self.auth = AuthFile(path_join(self.home, "data", "dbms", "auth"))
def __repr__(self):
return "<%s home=%r>" % (self.__class__.__name__, self.home)
@property
def store_path(self):
""" The location of the graph database store on disk.
"""
return path_join(self.home, "data", "databases",
self.get_config("dbms.active_database", "graph.db"))
def get_config(self, key, default=None):
""" Retrieve the value of a configuration item.
:param key:
:param default:
:return:
"""
config_file_path = path_join(self.home, "conf", self.config_file)
with open(config_file_path, "r") as f_in:
for line in f_in:
if line.startswith(key + "="):
return line.strip().partition("=")[-1]
return default
def set_config(self, key, value):
""" Update a single configuration value.
:param key:
:param value:
"""
self.update_config({key: value})
def update_config(self, properties):
""" Update multiple configuration values.
:param properties:
"""
config_file_path = path_join(self.home, "conf", self.config_file)
with open(config_file_path, "r") as f_in:
lines = f_in.readlines()
with open(config_file_path, "w") as f_out:
properties2 = dict(properties)
for line in lines:
for key, value in properties2.items():
if line.startswith(key + "=") or \
(line.startswith("#") and line[1:].lstrip().startswith(key + "=")):
if value is True:
value = "true"
if value is False:
value = "false"
f_out.write("%s=%s\n" % (key, value))
del properties2[key]
break
else:
f_out.write(line)
for key, value in properties2.items():
if value is True:
value = "true"
if value is False:
value = "false"
f_out.write("%s=%s\n" % (key, value))
@property
def auth_enabled(self):
""" Settable boolean property for enabling and disabling auth
on this server.
"""
return self.get_config("dbms.security.auth_enabled", "true") == "true"
@auth_enabled.setter
def auth_enabled(self, value):
self.set_config("dbms.security.auth_enabled", value)
def _get_protocol_address(self, protocol, default_port):
if self.get_config("dbms.connector.%s.enabled" % protocol, "true") != "true":
raise ValueError("Protocol %r not enabled" % protocol)
address = self.get_config("dbms.connector.%s.listen_address" % protocol)
if address:
host, _, port = address.partition(":")
try:
port = int(port)
except (TypeError, ValueError):
pass
return host or "localhost", port
return "localhost", default_port
def _set_protocol_address(self, protocol, address):
host, port = address
self.set_config("dbms.connector.%s.listen_address" % protocol, "%s:%s" % (host, port))
@property
def http_address(self):
""" The host and port on which this server expects HTTP communication.
:returns: 2-tuple of (host, port)
"""
return self._get_protocol_address("http", 7474)
@http_address.setter
def http_address(self, address):
""" Set the host and port on which this server expects HTTP communication.
"""
self._set_protocol_address("http", address)
@property
def https_address(self):
""" The host and port on which this server expects HTTPS communication.
:returns: 2-tuple of (host, port)
"""
return self._get_protocol_address("https", 7473)
@https_address.setter
def https_address(self, address):
""" Set the host and port on which this server expects HTTPS communication.
"""
self._set_protocol_address("https", address)
@property
def bolt_address(self):
""" The host and port on which this server expects Bolt communication.
:returns: 2-tuple of (host, port)
"""
return self._get_protocol_address("bolt", 7687)
@bolt_address.setter
def bolt_address(self, address):
""" Set the host and port on which this server expects Bolt communication.
"""
self._set_protocol_address("bolt", address)
@property
def http_uri(self):
""" The full HTTP URI for this server.
"""
host, port = self.http_address
return "http://%s:%s" % (host, port)
@property
def https_uri(self):
""" The full HTTPS URI for this server.
"""
host, port = self.https_address
return "https://%s:%s" % (host, port)
@property
def bolt_uri(self):
""" The full Bolt URI for this server.
"""
host, port = self.bolt_address
return "bolt://%s:%s" % (host, port)
@property
def bolt_routing_uri(self):
""" The full Bolt URI for this server.
"""
host, port = self.bolt_address
return "bolt+routing://%s:%s" % (host, port)
def delete_store(self, force=False):
""" Delete the store directory for this server.
:param force:
"""
if force or not self.server.running():
try:
rmtree(self.store_path, ignore_errors=force)
except FileNotFoundError:
pass
else:
raise RuntimeError("Refusing to drop database store while server is running")
class Server(object):
""" Represents a Neo4j server process that can be started and stopped.
"""
def __init__(self, installation):
self.installation = installation
@property
def control_script(self):
return path_join(self.installation.home, "bin", "neo4j")
def start(self, wait=120.0, verbose=False):
""" Start the server.
"""
try:
out = check_output("%s start" % self.control_script, shell=True)
except CalledProcessError as error:
if error.returncode == 2:
raise OSError("Another process is listening on the server port")
elif error.returncode == 512:
raise OSError("Another server process is already running")
else:
raise OSError("An error occurred while trying to start "
"the server [%s]" % error.returncode)
else:
pid = None
for line in out.decode("utf-8").splitlines(False):
if verbose:
print(line)
if line.startswith("process"):
number_in_brackets = re_compile("\[(\d+)\]")
numbers = number_in_brackets.search(line).groups()
if numbers:
pid = int(numbers[0])
elif "(pid " in line:
pid = int(line.partition("(pid ")[-1].partition(")")[0])
running = False
address = self.installation.bolt_address or self.installation.http_address
t0 = time()
while not running and (time() - t0) < wait:
try:
s = create_connection(address)
except IOError:
sleep(0.5)
else:
s.close()
running = True
if not running:
warn("Timed out waiting for server to start")
return pid
def stop(self):
""" Stop the server.
"""
pid = self.running()
if not pid:
return
try:
check_output(("%s stop" % self.control_script), shell=True)
except CalledProcessError as error:
raise OSError("An error occurred while trying to stop the server "
"[%s]" % error.returncode)
while pid:
try:
kill(pid, 0)
except OSError:
pid = 0
else:
pass
def restart(self):
""" Restart the server.
"""
self.stop()
return self.start()
def running(self):
""" The PID of the current executing process for this server.
"""
try:
out = check_output(("%s status" % self.control_script), shell=True)
except CalledProcessError as error:
if error.returncode == 3:
return None
else:
raise OSError("An error occurred while trying to query the "
"server status [%s]" % error.returncode)
else:
p = None
for line in out.decode("utf-8").splitlines(False):
if "running" in line:
p = int(line.rpartition(" ")[-1])
return p
class AuthFile(object):
""" A Neo4j auth file, generally located at data/dbms/auth.
"""
def __init__(self, name):
self.name = name
if not isfile(self.name):
d = dirname(self.name)
try:
makedirs(d)
except OSError:
pass
with open(self.name, "wb"):
pass
def __iter__(self):
with open(self.name, "rb") as f:
for line in f:
yield AuthUser.load(line)
def remove(self, user_name):
""" Remove a user.
"""
user_name = bstr(user_name)
with open(self.name, "rb") as f:
lines = [line for line in f.readlines() if not AuthUser.match(line, user_name)]
with open(self.name, "wb") as f:
f.writelines(lines)
def update(self, user_name, password):
""" Add or update a user.
"""
user_name = bstr(user_name)
password = bstr(password)
updated = False
with open(self.name, "rb") as f:
lines = []
for line in f.readlines():
if AuthUser.match(line, user_name):
lines.append(AuthUser.create(user_name, password).dump())
updated = True
else:
lines.append(line)
if not updated:
lines.append(AuthUser.create(user_name, password).dump())
with open(self.name, "wb") as f:
f.writelines(lines)
class AuthUser(object):
#: Name of user
name = None
#: The hash algorithm mused to encode the user data
hash_algorithm = None
#:
digest = None
#:
salt = None
@classmethod
def create(cls, user_name, password):
user_name = bstr(user_name)
password = bstr(password)
inst = cls(user_name, b"SHA-256", None, None)
inst.set_password(password)
return inst
@classmethod
def load(cls, s):
s = bstr(s)
fields = s.rstrip().split(b":")
name = fields[0]
hash_algorithm, digest, salt = fields[1].split(b",")
return cls(name, hash_algorithm, unhex_bytes(digest), unhex_bytes(salt))
@classmethod
def match(cls, s, user_name):
s = bstr(s)
user_name = bstr(user_name)
candidate_user_name, _, _ = s.partition(b":")
return candidate_user_name == user_name
def dump(self, eol=b"\r\n"):
return self.name + b":" + self.hash_algorithm + b"," + hex_bytes(self.digest) + b"," + \
hex_bytes(self.salt) + b":" + bstr(eol)
def __init__(self, name, hash_algorithm, digest, salt):
assert hash_algorithm == b"SHA-256"
self.name = bstr(name)
self.hash_algorithm = bstr(hash_algorithm)
self.digest = digest
self.salt = salt
def __repr__(self):
return "<AuthUser name=%r>" % self.name
def set_password(self, password):
assert self.hash_algorithm == b"SHA-256"
salt = bytearray(randint(0x00, 0xFF) for _ in range(16))
m = sha256()
m.update(salt)
m.update(bstr(password))
self.digest = m.digest()
self.salt = salt
def check_password(self, password):
assert self.hash_algorithm == b"SHA-256"
m = sha256()
m.update(self.salt)
m.update(bstr(password))
return m.digest() == self.digest
| 33.086207
| 99
| 0.566093
|
b5f9c9aca52d6b5f8329bf18df3bb3a0460b9b6f
| 3,584
|
py
|
Python
|
mylinebot/mylinebot/settings.py
|
tom-lin1128/stocklinebot
|
e2873e8d21fb4cb98b8776a0fc394b05f228ba04
|
[
"MIT"
] | null | null | null |
mylinebot/mylinebot/settings.py
|
tom-lin1128/stocklinebot
|
e2873e8d21fb4cb98b8776a0fc394b05f228ba04
|
[
"MIT"
] | null | null | null |
mylinebot/mylinebot/settings.py
|
tom-lin1128/stocklinebot
|
e2873e8d21fb4cb98b8776a0fc394b05f228ba04
|
[
"MIT"
] | null | null | null |
"""
Django settings for mylinebot project.
Generated by 'django-admin startproject' using Django 3.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-#aj)h^x(_i23ck+ix&31)g5=ose5_l6y1b*@4&5mh!&2qkl30+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [
'78d6a9516f05.ngrok.io'
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'stocklinebot.apps.StocklinebotConfig'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mylinebot.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mylinebot.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
LINE_CHANNEL_ACCESS_TOKEN = 'VhdGQu8tSttV0efVvEY09UTcEqRh5XsBLcom/WezzBZncKu4gjOmzBzrwx7kNa+TkPX+RYT1EiAy+NeE6q5p5WkAYMLkVzlTe5lwKa1PeJcJFeJXb3ft1++sTOju2pdGN4DhzHUITZOWL2J8X7YvewdB04t89/1O/w1cDnyilFU='
LINE_CHANNEL_SECRET = '4eeed9db999545f557100c0de784bd71'
| 27.358779
| 202
| 0.717355
|
a33898b2e08b6ff1766e0bd884c475d52abc4998
| 28,490
|
py
|
Python
|
scripts/cascades/cluster_cascades/sensory_cascades_integration_clusters.py
|
mwinding/connectome_analysis
|
dbc747290891805863c9481921d8080dc2043d21
|
[
"MIT"
] | 1
|
2021-06-10T05:48:16.000Z
|
2021-06-10T05:48:16.000Z
|
cascades/cluster_cascades/sensory_cascades_integration_clusters.py
|
mwinding/connectome_tools
|
0392f6b1e924194299ea7760d8386eb01f3371a3
|
[
"MIT"
] | 2
|
2022-01-21T11:48:45.000Z
|
2022-01-21T11:48:45.000Z
|
scripts/cascades/cluster_cascades/sensory_cascades_integration_clusters.py
|
mwinding/connectome_analysis
|
dbc747290891805863c9481921d8080dc2043d21
|
[
"MIT"
] | 1
|
2022-02-02T15:39:52.000Z
|
2022-02-02T15:39:52.000Z
|
#%%
import os
import sys
try:
os.chdir('/Volumes/GoogleDrive/My Drive/python_code/connectome_tools/')
print(os.getcwd())
except:
pass
# %%
import sys
sys.path.append('/Volumes/GoogleDrive/My Drive/python_code/maggot_models/')
sys.path.append('/Volumes/GoogleDrive/My Drive/python_code/connectome_tools/')
from pymaid_creds import url, name, password, token
import pymaid
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
from graspy.plot import gridplot, heatmap
from graspy.utils import binarize, pass_to_ranks
from src.data import load_metagraph
from src.visualization import CLASS_COLOR_DICT, adjplot
# allows text to be editable in Illustrator
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
plt.rcParams.update({'font.size': 6})
rm = pymaid.CatmaidInstance(url, token, name, password)
mg = load_metagraph("Gad", version="2020-06-10", path = '/Volumes/GoogleDrive/My Drive/python_code/maggot_models/data/processed/')
mg.calculate_degrees(inplace=True)
adj = mg.adj # adjacency matrix from the "mg" object
clusters = pd.read_csv('cascades/data/meta-method=color_iso-d=8-bic_ratio=0.95-min_split=32.csv', index_col = 0, header = 0)
order = pd.read_csv('cascades/data/signal_flow_order_lvl7.csv').values
# make array from list of lists
order_delisted = []
for sublist in order:
order_delisted.append(sublist[0])
order = np.array(order_delisted)
#%%
# pull sensory annotations and then pull associated skids
input_names = pymaid.get_annotated('mw brain inputs').name
input_skids_list = list(map(pymaid.get_skids_by_annotation, pymaid.get_annotated('mw brain inputs').name))
input_skids = [val for sublist in input_skids_list for val in sublist]
output_names = pymaid.get_annotated('mw brain outputs').name
output_skids_list = list(map(pymaid.get_skids_by_annotation, pymaid.get_annotated('mw brain outputs').name))
output_skids = [val for sublist in output_skids_list for val in sublist]
# order names and skids in desired way for the rest of analysis
sensory_order = [0, 3, 4, 1, 2, 6, 5]
input_names_format = ['ORN', 'thermo', 'photo', 'AN', 'MN', 'vtd', 'A00c']
input_names_format_reordered = [input_names_format[i] for i in sensory_order]
input_skids_list_reordered = [input_skids_list[i] for i in sensory_order]
#%%
# cascades from each sensory modality
from src.traverse import Cascade, to_transmission_matrix
from src.traverse import TraverseDispatcher
# convert skids to indices
input_indices_list = []
for input_skids in input_skids_list_reordered:
indices = np.where([x in input_skids for x in mg.meta.index])[0]
input_indices_list.append(indices)
output_indices_list = []
for input_skids in output_skids_list:
indices = np.where([x in input_skids for x in mg.meta.index])[0]
output_indices_list.append(indices)
all_input_indices = np.where([x in input_skids for x in mg.meta.index])[0]
output_indices = np.where([x in output_skids for x in mg.meta.index])[0]
p = 0.05
max_hops = 10
n_init = 100
simultaneous = True
transition_probs = to_transmission_matrix(adj, p)
cdispatch = TraverseDispatcher(
Cascade,
transition_probs,
stop_nodes = output_indices,
max_hops=max_hops,
allow_loops = False,
n_init=n_init,
simultaneous=simultaneous,
)
input_hit_hist_list = []
for input_indices in input_indices_list:
hit_hist = cdispatch.multistart(start_nodes = input_indices)
input_hit_hist_list.append(hit_hist)
all_input_hit_hist = cdispatch.multistart(start_nodes = all_input_indices)
# %%
# identifying locations of intersecting signals in individual neurons
# which clusters and which neurons?
# tested a few different methods to define "integration neurons"
# settled on summing all visits across hops and then 50% signal threshold
# (majority of the time the signal goes through each particular neuron)
# *** this is summed_sensory_hits_all
threshold = n_init/2
# intersection between all sensory modalities, including hops
sensory_hits_all_hops = []
for hop in range(0, len(input_hit_hist_list[0][0, :])):
hops = []
for i in range(0, len(input_hit_hist_list[0])):
sensory_hits = []
for input_hit_hist in input_hit_hist_list:
sensory_hits.append(input_hit_hist[i, hop]>threshold)
hops.append(sensory_hits)
hops = pd.DataFrame(hops, columns = input_names_format_reordered,
index = mg.meta.index)
sensory_hits_all_hops.append(hops)
# intersection between all sensory modalities, thresholding then ignoring hops
sensory_hits_all = []
for i in range(0, len(input_hit_hist_list[0])):
sensory_hits = []
for input_hit_hist in input_hit_hist_list:
sensory_hits.append(sum(input_hit_hist[i]>threshold)>0)
sensory_hits_all.append(sensory_hits)
sensory_hits_all = pd.DataFrame(sensory_hits_all, columns = input_names_format_reordered, index = mg.meta.index)
# intersection between all sensory modalities, summing hops then thresholding
summed_sensory_hits_all = []
for i in range(0, len(input_hit_hist_list[0])):
sensory_hits = []
for input_hit_hist in input_hit_hist_list:
sensory_hits.append(sum(input_hit_hist[i][0:8])>threshold)
summed_sensory_hits_all.append(sensory_hits)
summed_sensory_hits_all = pd.DataFrame(summed_sensory_hits_all, columns = input_names_format_reordered, index = mg.meta.index)
# intersection between all sensory modalities, summing hops (all 10 hops) then thresholding
summed_sensory_hits_all2 = []
for i in range(0, len(input_hit_hist_list[0])):
sensory_hits = []
for input_hit_hist in input_hit_hist_list:
sensory_hits.append(sum(input_hit_hist[i][0:10])>threshold)
summed_sensory_hits_all2.append(sensory_hits)
summed_sensory_hits_all2 = pd.DataFrame(summed_sensory_hits_all2, columns = input_names_format_reordered, index = mg.meta.index)
# intersection between all sensory modalities, sliding window sum hops then thresholding
summed_window_sensory_hits_all = []
for i in range(0, len(input_hit_hist_list[0])):
sensory_hits = []
for input_hit_hist in input_hit_hist_list:
max_sum = max([sum(input_hit_hist[i][0:3]),
sum(input_hit_hist[i][1:4]),
sum(input_hit_hist[i][2:5]),
sum(input_hit_hist[i][3:6]),
sum(input_hit_hist[i][4:7]),
sum(input_hit_hist[i][5:8])])
sensory_hits.append(max_sum>threshold)
summed_window_sensory_hits_all.append(sensory_hits)
summed_window_sensory_hits_all = pd.DataFrame(summed_window_sensory_hits_all, columns = input_names_format_reordered, index = mg.meta.index)
summed_window_small_sensory_hits_all = []
for i in range(0, len(input_hit_hist_list[0])):
sensory_hits = []
for input_hit_hist in input_hit_hist_list:
max_sum = max([sum(input_hit_hist[i][0:2]),
sum(input_hit_hist[i][1:3]),
sum(input_hit_hist[i][2:4]),
sum(input_hit_hist[i][3:5]),
sum(input_hit_hist[i][4:6]),
sum(input_hit_hist[i][5:7]),
sum(input_hit_hist[i][6:8])])
sensory_hits.append(max_sum>threshold)
summed_window_small_sensory_hits_all.append(sensory_hits)
summed_window_small_sensory_hits_all = pd.DataFrame(summed_window_small_sensory_hits_all, columns = input_names_format_reordered, index = mg.meta.index)
# %%
# all permutations of sensory integration, ignoring hops
from upsetplot import plot
from upsetplot import from_contents
from upsetplot import from_memberships
mi = pd.MultiIndex.from_frame(sum(sensory_hits_all_hops)>0)
data = pd.DataFrame(sensory_hits_all_hops[0].index, index = mi)
plot(data, sort_by = 'cardinality', sort_categories_by = None)
plt.savefig('cascades/cluster_plots/threshold_types_on-hops.pdf', format='pdf', bbox_inches='tight')
mi = pd.MultiIndex.from_frame(summed_sensory_hits_all)
data = pd.DataFrame(sensory_hits_all_hops[0].index, index = mi)
sum_plot = plot(data, sort_by = 'cardinality', sort_categories_by = None)
plt.savefig('cascades/cluster_plots/threshold_types_summed.pdf', format='pdf', bbox_inches='tight')
mi = pd.MultiIndex.from_frame(summed_sensory_hits_all2)
data = pd.DataFrame(sensory_hits_all_hops[0].index, index = mi)
sum_plot = plot(data, sort_by = 'cardinality', sort_categories_by = None)
plt.savefig('cascades/cluster_plots/threshold_types_summed2.pdf', format='pdf', bbox_inches='tight')
mi = pd.MultiIndex.from_frame(summed_window_sensory_hits_all)
data = pd.DataFrame(sensory_hits_all_hops[0].index, index = mi)
plot(data, sort_by = 'cardinality', sort_categories_by = None)
plt.savefig('cascades/cluster_plots/threshold_types_summed-window.pdf', format='pdf', bbox_inches='tight')
mi = pd.MultiIndex.from_frame(summed_window_small_sensory_hits_all)
data = pd.DataFrame(sensory_hits_all_hops[0].index, index = mi)
plot(data, sort_by = 'cardinality', sort_categories_by = None)
plt.savefig('cascades/cluster_plots/threshold_types_summed-small-window.pdf', format='pdf', bbox_inches='tight')
# %%
# identify skids in each category of sensory combination
import itertools
from tqdm import tqdm
permut7 = list(itertools.product([True, False], repeat=7))
permut7 = [permut7[x] for x in range(len(permut7)-1)] # remove the all False scenario
permut_members = []
for i, permut in enumerate(permut7):
skids = []
for row in summed_sensory_hits_all.iterrows():
if((row[1][0]==permut[0]) &
(row[1][1]==permut[1]) &
(row[1][2]==permut[2]) &
(row[1][3]==permut[3]) &
(row[1][4]==permut[4]) &
(row[1][5]==permut[5]) &
(row[1][6]==permut[6])):
skids.append(row[1].name)
permut_members.append(skids)
'''
permut_members2 = []
for i, permut in enumerate(permut7):
skids = []
for row in summed_sensory_hits_all2.iterrows():
if((row[1][0]==permut[0]) &
(row[1][1]==permut[1]) &
(row[1][2]==permut[2]) &
(row[1][3]==permut[3]) &
(row[1][4]==permut[4]) &
(row[1][5]==permut[5]) &
(row[1][6]==permut[6])):
skids.append(row[1].name)
permut_members2.append(skids)
'''
# where does sensory integration occur? for each type?
def skid_to_index(skid, mg):
index_match = np.where(mg.meta.index == skid)[0]
if(len(index_match)==1):
return(index_match[0])
if(len(index_match)!=1):
print('Not one match for skid %i!' %skid)
def index_to_skid(index, mg):
return(mg.meta.iloc[index, :].name)
def counts_to_list(count_list):
expanded_counts = []
for i, count in enumerate(count_list):
expanded = np.repeat(i, count)
expanded_counts.append(expanded)
return([x for sublist in expanded_counts for x in sublist])
# identify median hop number per skid
hops_from_sens = []
for i, input_hit_hist in enumerate(input_hit_hist_list):
for j, row in enumerate(input_hit_hist):
median_hop = np.median(counts_to_list(row))
skid = index_to_skid(j, mg)
if(summed_sensory_hits_all.loc[skid, input_names_format_reordered[i]] == True):
hops_from_sens.append([skid, median_hop, input_names_format_reordered[i]])
hops_from_sens = pd.DataFrame(hops_from_sens, columns = ['skid', 'hops_from_sens', 'sensory_modality'])
hops_from_sens_skid = hops_from_sens.groupby('skid')
hops_keys = list(hops_from_sens_skid.groups.keys())
# %%
# UpSet plot for figure
length_permut_members = [len(x) for x in permut_members]
sort = sorted(range(len(length_permut_members)), reverse = True, key = lambda k: length_permut_members[k])
#length_permut_members2 = [len(x) for x in permut_members2]
#sort = sorted(range(len(length_permut_members2)), reverse = True, key = lambda k: length_permut_members2[k])
#subset = [permut_members[x] for x in [0, 1, 95, 111, 63, 15, 3, 123, 125, 126, 119]] # sort[0:8] + all sensory-specific
subset = [permut_members[x] for x in sort[0:17]]
subset = [item for sublist in subset for item in sublist]
subset_upset = summed_sensory_hits_all.loc[subset]
mi = pd.MultiIndex.from_frame(subset_upset)
data = pd.DataFrame(subset, index = mi)
sum_plot = plot(data, sort_categories_by = None)
plt.savefig('cascades/cluster_plots/Integration_Upset_Plot.pdf', format='pdf', bbox_inches='tight')
# %%
# integrative vs nonintegrative
indices_nonintegrative = [i for i, x in enumerate(permut7) if sum(x)==1]
indices_integrative = [i for i, x in enumerate(permut7) if sum(x)>1]
# no comment on brain vs non-brain neurons
# ****change to brain neurons only?
total_nonintegrative = sum([len(permut_members[x]) for x in indices_nonintegrative])
total_integrative = sum([len(permut_members[x]) for x in indices_integrative])
no_signal = len(summed_sensory_hits_all.iloc[:, 0]) - total_nonintegrative - total_integrative
integrative_skids = [permut_members[x] for x in indices_integrative]
integrative_skids = [item for sublist in integrative_skids for item in sublist]
nonintegrative_skids = [permut_members[x] for x in indices_nonintegrative]
nonintegrative_skids = [item for sublist in nonintegrative_skids for item in sublist]
# how many hops from each sensory type
mean_hops_integrative = []
for skid in integrative_skids:
mean_hop = np.mean(hops_from_sens.iloc[hops_from_sens_skid.groups[skid], 1])
mean_hops_integrative.append([skid, mean_hop, 'Integrative'])
#mean_hops = pd.DataFrame(np.concatenate([mean_hops_integrative, mean_hops_nonintegrative]),
# columns = ['skid', 'mean_hops', 'integration'])
#mean_hops['mean_hops'] = mean_hops['mean_hops'].astype('float64')
all_hops_integrative = []
for skid in integrative_skids:
hops = hops_from_sens.iloc[hops_from_sens_skid.groups[skid], 1]
for hop in hops:
all_hops_integrative.append([skid, hop, 'Integrative'])
all_hops_nonintegrative = []
for skid in nonintegrative_skids:
hop = hops_from_sens.iloc[hops_from_sens_skid.groups[skid], 1]
all_hops_nonintegrative.append([skid, hop, 'Labelled Line'])
all_hops = pd.DataFrame(np.concatenate([all_hops_integrative, all_hops_nonintegrative]),
columns = ['skid', 'all_hops', 'integration'])
all_hops['all_hops'] = all_hops['all_hops'].astype('float64')
fig, axs = plt.subplots(
1, 1, figsize = (1.5,1.75)
)
fig.tight_layout(pad = 2.0)
ax = axs
sns.violinplot(data = all_hops, x = 'integration', y = 'all_hops', ax = ax, linewidth = 0.5)
ax.set_ylabel('Hops from Sensory', fontsize = 6)
ax.set_xlabel('')
ax.set_yticks([0, 1, 2, 3, 4, 5, 6, 7, 8])
ax.set_xticklabels( ('Integrative\nN=%i' %len(mean_hops_integrative), 'Labelled Line\nN=%i' %len(all_hops_nonintegrative)) )
fig.savefig('cascades/cluster_plots/Integrative_LabelledLine_plot.pdf', format='pdf', bbox_inches='tight')
plt.rcParams["font.family"] = "Arial"
# %%
# Where do integrations occur?
all_hops_integrative_first_vs_other = []
for skid in integrative_skids:
hops = hops_from_sens.iloc[hops_from_sens_skid.groups[skid], 1]
gate = 0
for hop in hops:
if((hop == min(hops)) & (gate == 0)):
all_hops_integrative_first_vs_other.append([skid, hop, 'Integrative', 'First'])
gate += 1
if((hop != min(hops)) | (gate != 0)):
all_hops_integrative_first_vs_other.append([skid, hop, 'Integrative', 'Additional'])
all_hops_integrative_first_vs_other = pd.DataFrame(all_hops_integrative_first_vs_other,
columns = ['skid', 'all_hops', 'integration', 'step'])
all_hops_integrative_first_vs_other.index = all_hops_integrative_first_vs_other.skid
fig, axs = plt.subplots(
1, 1, figsize = (2,2)
)
fig.tight_layout(pad = 2.0)
ax = axs
vplt = sns.violinplot(data = all_hops_integrative_first_vs_other, x = 'integration', y = 'all_hops',
hue='step', split = True, ax = ax, linewidth = 0.5, legend_out=True, hue_order = ['First', 'Additional'])
ax.set_ylabel('Hops from Sensory', fontsize = 6)
ax.set_xlabel('')
ax.set_xticklabels( ('Integrative\nN=%i' %len(mean_hops_integrative), 'Labelled Line\nN=%i' %len(all_hops_nonintegrative)) )
fig.savefig('cascades/cluster_plots/Integrative_detail_plot.pdf', format='pdf')
plt.rcParams["font.family"] = "Arial"
# %%
# integration detail hop plots for each type in UpSet plot
# permutations to sensory names
# names for plot
permut_names = []
for permut in permut7:
names = []
for i in range(0, len(permut)):
if(permut[i]==True):
names.append(input_names_format_reordered[i])
sep = ' + '
permut_names.append(sep.join(names))
permut_names = np.array(permut_names)
# identifying indices to be used in permut_members
nonintegrative_indices = [i for i, x in enumerate(permut7) if sum(x)==1]
integrative_indices = [x for x in sort[0:17] if (x not in nonintegrative_indices)]
permut_col = []
for skid in all_hops_integrative_first_vs_other.skid:
for i, permut in enumerate(permut_members):
for permut_skid in permut:
if(skid == permut_skid):
permut_col.append([skid, i])
permut_col = pd.DataFrame(permut_col, columns = ['skid', 'permut_index'])
all_hops_integrative_first_vs_other['permut_index'] = permut_col.permut_index.values
all_hops_integrative_first_vs_other['permut_name'] = permut_names[permut_col.permut_index.values]
all_hops_integrative_first_vs_other.index = permut_col.permut_index.values
fig, axs = plt.subplots(
1, 1, figsize = (3.75,1.25)
)
fig.tight_layout(pad = 2.0)
ax = axs
sns.violinplot(data = all_hops_integrative_first_vs_other.loc[integrative_indices],
x = 'permut_name', y = 'all_hops', scale = 'width', hue = 'step', split=True,
hue_order=['First','Additional'], ax = ax, linewidth = 0.5)
ax.set_ylabel('Hops from Sensory', fontsize = 6)
ax.set_xlabel('')
plt.xticks(rotation=45, ha = 'right')
ax.set(ylim = (0, 8))
ax.set_yticks(np.arange(0, 9, 1))
plt.savefig('cascades/cluster_plots/Integrative_hop_violinplots_labels.pdf', format='pdf', bbox_inches='tight')
fig, axs = plt.subplots(
1, 1, figsize = (3.75,1.25)
)
fig.tight_layout(pad = 2.0)
ax = axs
sns.violinplot(data = all_hops_integrative_first_vs_other.loc[integrative_indices],
x = 'permut_name', y = 'all_hops', scale = 'width', hue = 'step', split=True,
hue_order=['First','Additional'], ax = ax, linewidth = 0.5)
ax.set_ylabel('Hops from Sensory', fontsize = 6)
ax.set_xlabel('')
plt.xticks([])
ax.set(ylim = (0, 8))
ax.set_yticks(np.arange(0, 9, 1))
plt.savefig('cascades/cluster_plots/Integrative_hop_violinplots_nolabels.pdf', format='pdf', bbox_inches='tight')
#%%
# same but with nonintegrative
all_hops_nonintegrative = []
for skid in nonintegrative_skids:
hops = hops_from_sens.iloc[hops_from_sens_skid.groups[skid], 1]
gate = 0
for hop in hops:
if((hop == min(hops)) & (gate == 0)):
all_hops_nonintegrative.append([skid, hop, 'Labelled Line', 'First'])
gate += 1
if((hop != min(hops)) | (gate != 0)):
all_hops_nonintegrative.append([skid, hop, 'Labelled Line', 'Additional'])
all_hops_nonintegrative = pd.DataFrame(all_hops_nonintegrative,
columns = ['skid', 'all_hops', 'integration', 'step'])
permut_col_nonintegrative = []
for skid in all_hops_nonintegrative.skid:
for i, permut in enumerate(permut_members):
for permut_skid in permut:
if(skid == permut_skid):
permut_col_nonintegrative.append([skid, i])
permut_col_nonintegrative = pd.DataFrame(permut_col_nonintegrative, columns = ['skid', 'permut_index'])
all_hops_nonintegrative['permut_index'] = permut_col_nonintegrative.permut_index.values
all_hops_nonintegrative['permut_name'] = permut_names[permut_col_nonintegrative.permut_index.values]
all_hops_nonintegrative.index = permut_col_nonintegrative.permut_index.values
fig, axs = plt.subplots(
1, 1, figsize = (3.75,1.25)
)
fig.tight_layout(pad = 2.0)
ax = axs
sns.violinplot(data = all_hops_nonintegrative.loc[nonintegrative_indices],
x = 'permut_name', y = 'all_hops', scale = 'width', ax = ax, linewidth = 0.5)
ax.set_ylabel('Hops from Sensory', fontsize = 6)
ax.set_xlabel('')
ax.set(ylim = (0, 8))
ax.set_yticks(np.arange(0, 9, 1))
plt.savefig('cascades/cluster_plots/Labelled_line_hop_violinplots.pdf', format='pdf', bbox_inches='tight')
# %%
# how many outputs associated with each type?
labelledline_descendings = []
for i in nonintegrative_indices:
skids = np.unique(all_hops_nonintegrative.loc[i].skid)
labelledline_descendings.append([i, sum(mg.meta.loc[skids].dVNC), sum(mg.meta.loc[skids].dSEZ), sum(mg.meta.loc[skids].RG)])
labelledline_descendings = pd.DataFrame(labelledline_descendings, columns = ['permut_number', 'dVNCs', 'dSEZs', 'RG'])
labelledline_descendings.index = labelledline_descendings.permut_number
labelledline_descendings['permut_name'] = permut_names[nonintegrative_indices]
integrative_descendings = []
for i in integrative_indices:
skids = np.unique(all_hops_integrative_first_vs_other.loc[i].skid)
integrative_descendings.append([i,
sum(mg.meta.loc[skids].dVNC), sum(mg.meta.loc[skids].dSEZ), sum(mg.meta.loc[skids].RG)])
integrative_descendings = pd.DataFrame(integrative_descendings, columns = ['permut_number', 'dVNCs', 'dSEZs', 'RG'])
integrative_descendings.index = integrative_descendings.permut_number
integrative_descendings['permut_name'] = permut_names[integrative_indices]
fig, axs = plt.subplots(
1, 1, figsize=(3.75,.4)
)
ax = axs
sns.heatmap(labelledline_descendings.iloc[:, 1:4].T, ax = ax, annot=True, cmap = 'Blues')
fig.savefig('cascades/cluster_plots/Labelled_line_hop_violinplots_bottom.pdf', format='pdf', bbox_inches='tight')
fig, axs = plt.subplots(
1, 1, figsize=(3.75,.4)
)
ax = axs
#ax.set_xticklables(integrative_descendings.permut_name.values)
sns.heatmap(integrative_descendings.iloc[:, 1:4].T, ax = ax, annot=True, cmap = 'Oranges')
fig.savefig('cascades/cluster_plots/Integrative_hop_violinplots_bottom.pdf', format='pdf', bbox_inches='tight')
# order in main figure
# 31, 79, 15, 47, 13, 3, 7, 1, 0
fig, axs = plt.subplots(
1, 1, figsize=(3.75,.4)
)
ax = axs
sns.heatmap(integrative_descendings.loc[[31, 79, 15, 47, 13, 3, 7, 1, 0], ['dVNCs', 'dSEZs', 'RG']].T, ax = ax, annot=True, cmap = 'Oranges')
fig.savefig('cascades/cluster_plots/Integrative_hop_violinplots_bottom_alt.pdf', format='pdf', bbox_inches='tight')
# %%
#Questions
# which clusters contain these neuron types?
# which clusters display which types of sensory input and how much?
# %%
# plotting sensory integration make-up per cluster
# level 7 clusters
lvl7 = clusters.groupby('lvl7_labels')
# integration types per cluster
cluster_lvl7 = []
for key in lvl7.groups.keys():
for i, permut in enumerate(permut_members):
for permut_skid in permut:
if((permut_skid in lvl7.groups[key].values) & (i in nonintegrative_indices)):
cluster_lvl7.append([key, permut_skid, i, permut_names[i], 'labelled_line'])
if((permut_skid in lvl7.groups[key].values) & (i not in nonintegrative_indices)):
cluster_lvl7.append([key, permut_skid, i, permut_names[i], 'integrative'])
cluster_lvl7 = pd.DataFrame(cluster_lvl7, columns = ['key', 'skid', 'permut_index', 'permut_name', 'integration'])
cluster_lvl7_groups = cluster_lvl7.groupby('key')
percent_integrative = [sum(x[1].integration == 'integrative')/len(x[1].integration) for x in list(cluster_lvl7_groups)]
percent_labelled_line = [sum(x[1].integration == 'labelled_line')/len(x[1].integration) for x in list(cluster_lvl7_groups)]
percent_labelled_line_subtypes = []
for index in nonintegrative_indices:
percent_labelled_line_subtypes.append(
[sum(x[1].permut_index == index)/len(x[1].permut_index) for x in list(cluster_lvl7_groups)]
)
percent_integrative_subtypes = []
for index in integrative_indices:
percent_integrative_subtypes.append(
[sum(x[1].permut_index == index)/len(x[1].permut_index) for x in list(cluster_lvl7_groups)]
)
cluster_character = pd.DataFrame([percent_integrative, percent_labelled_line], columns = lvl7.groups.keys(), index = ['integrative', 'labelled_line']).T
cluster_character_sub_labelled_line = pd.DataFrame(percent_labelled_line_subtypes, columns = lvl7.groups.keys(),
index = [permut_names[x] for x in nonintegrative_indices]).T
cluster_character_sub_integrative = pd.DataFrame(percent_integrative_subtypes, columns = lvl7.groups.keys(),
index = integrative_indices).T
import cmasher as cmr
fig, axs = plt.subplots(
1, 1, figsize = (1.5, 2)
)
ax = axs
ax.set_ylabel('Individual Clusters')
ax.set_yticks([]);
ax.set_xticks([]);
sns.heatmap(cluster_character.loc[order, ['labelled_line', 'integrative']], cmap = 'Greens', rasterized = True)
fig.savefig('cascades/cluster_plots/Clusters_ll_vs_integrative.pdf', format='pdf', bbox_inches='tight')
ind = np.arange(0, len(cluster_character.index))
data1 = cluster_character.loc[order, ['labelled_line']].values
data1 = [x for sublist in data1 for x in sublist]
data2 = cluster_character.loc[order, ['integrative']].values
data2 = [x for sublist in data2 for x in sublist]
#data2 = np.array(data1) + np.array(data2)
plt.bar(ind, data1, color = 'orange', alpha = 0.5)
plt.bar(ind, data2, bottom = data1, color = 'blue', alpha = 0.5)
fig, axs = plt.subplots(
1, 1, figsize = (5, 5)
)
ax = axs
ax.set_ylabel('Individual Clusters')
ax.set_yticks([]);
ax.set_xticks([]);
sns.heatmap(cluster_character_sub_labelled_line.loc[order], cmap = 'Greens', rasterized = True, ax = ax)
fig.savefig('cascades/cluster_plots/Clusters_labelled_line_character.pdf', format='pdf', bbox_inches='tight')
fig, axs = plt.subplots(
1, 1, figsize = (5, 5)
)
ax = axs
ax.set_ylabel('Individual Clusters')
ax.set_yticks([]);
ax.set_xticks([]);
sns.heatmap(cluster_character_sub_integrative.loc[order], cmap = 'Greens', rasterized = True, ax = ax)
fig.savefig('cascades/cluster_plots/Clusters_integrative_character.pdf', format='pdf', bbox_inches='tight')
# stacked bar plot for all types of integrative and non integrative
ORN_frac = cluster_character_sub_labelled_line.loc[order,'ORN'].values
AN_frac = cluster_character_sub_labelled_line.loc[order,'AN'].values
MN_frac = cluster_character_sub_labelled_line.loc[order,'MN'].values
thermo_frac = cluster_character_sub_labelled_line.loc[order,'thermo'].values
photo_frac = cluster_character_sub_labelled_line.loc[order,'photo'].values
A00c_frac = cluster_character_sub_labelled_line.loc[order,'A00c'].values
vtd_frac = cluster_character_sub_labelled_line.loc[order,'vtd'].values
labelledline_frac = ORN_frac + AN_frac + MN_frac + thermo_frac + photo_frac + A00c_frac + vtd_frac
all_integrative_frac = cluster_character_sub_integrative.loc[order, 0].values
most_integrative_frac = cluster_character_sub_integrative.loc[order, 1].values
OR_AN_MN_integrative_frac = cluster_character_sub_integrative.loc[order, 15].values
rest_integrative_frac = cluster_character_sub_integrative.loc[order, :].sum(axis = 1) - all_integrative_frac - most_integrative_frac - OR_AN_MN_integrative_frac
plt.bar(ind, ORN_frac, color = 'blue')
plt.bar(ind, AN_frac, bottom = ORN_frac, color = 'tab:blue')
plt.bar(ind, MN_frac, bottom = ORN_frac + AN_frac, color = 'tab:cyan')
plt.bar(ind, thermo_frac, bottom = ORN_frac + AN_frac + MN_frac, color = 'purple')
plt.bar(ind, photo_frac, bottom = ORN_frac + AN_frac + MN_frac + thermo_frac, color = 'tab:purple')
plt.bar(ind, A00c_frac, bottom = ORN_frac + AN_frac + MN_frac + thermo_frac + photo_frac, color = 'mediumorchid')
plt.bar(ind, vtd_frac, bottom = ORN_frac + AN_frac + MN_frac + thermo_frac + photo_frac + A00c_frac, color = 'plum')
plt.bar(ind, all_integrative_frac, bottom = labelledline_frac, color = 'maroon')
plt.bar(ind, most_integrative_frac, bottom = labelledline_frac + all_integrative_frac, color = 'firebrick')
plt.bar(ind, OR_AN_MN_integrative_frac, bottom = labelledline_frac + all_integrative_frac + most_integrative_frac, color = 'salmon')
plt.bar(ind, rest_integrative_frac, bottom = labelledline_frac + all_integrative_frac + most_integrative_frac + OR_AN_MN_integrative_frac, color = 'lightsalmon')
plt.savefig('cascades/cluster_plots/Clusters_multisensory_character.pdf', format='pdf', bbox_inches='tight')
# %%
| 40.816619
| 161
| 0.726079
|
cffb3cc192da039cb574d4e5f9d4ed0d81b383b5
| 1,427
|
py
|
Python
|
app/recipe/serializers.py
|
demirtaserdem/django-rest-adv
|
a8259fa4308c71dcc80456d9f0266fd40c22480c
|
[
"MIT"
] | null | null | null |
app/recipe/serializers.py
|
demirtaserdem/django-rest-adv
|
a8259fa4308c71dcc80456d9f0266fd40c22480c
|
[
"MIT"
] | null | null | null |
app/recipe/serializers.py
|
demirtaserdem/django-rest-adv
|
a8259fa4308c71dcc80456d9f0266fd40c22480c
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from core.models import Tag, Ingredient, Recipe
class TagSerializer(serializers.ModelSerializer):
"""Serializer for tag objects"""
class Meta:
model = Tag
fields = ('id', 'name')
read_only_fields = ('id',)
class IngredientSerializer(serializers.ModelSerializer):
"""Serializer for ingredient objects"""
class Meta:
model = Ingredient
fields = ('id', 'name')
read_only_fields = ('id',)
class RecipeSerializer(serializers.ModelSerializer):
"""Serializer a recipe"""
ingredients = serializers.PrimaryKeyRelatedField(
many=True,
queryset=Ingredient.objects.all()
)
tags = serializers.PrimaryKeyRelatedField(
many=True,
queryset=Tag.objects.all()
)
class Meta:
model = Recipe
fields = (
'id', 'title', 'ingredients', 'tags', 'time_minutes',
'price', 'link'
)
read_only_fields = ('id',)
class RecipeDetailSerializer(RecipeSerializer):
"""Serialize a recipe detail"""
ingredients = IngredientSerializer(many=True, read_only=True)
tags = TagSerializer(many=True, read_only=True)
class RecipeImageSerializer(serializers.ModelSerializer):
"""Serializer for uploading images to recipes"""
class Meta:
model = Recipe
fields = ('id', 'image')
read_only_fields = ('id',)
| 26.924528
| 65
| 0.641205
|
3f84670f98832589ea47852a7d369f54dc76c8ed
| 566
|
py
|
Python
|
rcsystem/migrations/0040_auto_20200831_2100.py
|
perought/recommender-web-site
|
ce62c71ca86756cc2810966cacd0e4597918c7dd
|
[
"MIT"
] | null | null | null |
rcsystem/migrations/0040_auto_20200831_2100.py
|
perought/recommender-web-site
|
ce62c71ca86756cc2810966cacd0e4597918c7dd
|
[
"MIT"
] | null | null | null |
rcsystem/migrations/0040_auto_20200831_2100.py
|
perought/recommender-web-site
|
ce62c71ca86756cc2810966cacd0e4597918c7dd
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.3 on 2020-08-31 18:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rcsystem', '0039_auto_20200831_2056'),
]
operations = [
migrations.AlterField(
model_name='moviedatabase',
name='vote_average',
field=models.CharField(max_length=250),
),
migrations.AlterField(
model_name='moviedatabase',
name='vote_count',
field=models.CharField(max_length=250),
),
]
| 23.583333
| 51
| 0.59364
|
cba1e2ab5d29278fc05617e07688fb57047860d0
| 196
|
py
|
Python
|
touch_time_test.py
|
goldang01/Python_Example
|
f0de3384278762d7935885ca0bc08303346ad5a1
|
[
"MIT"
] | null | null | null |
touch_time_test.py
|
goldang01/Python_Example
|
f0de3384278762d7935885ca0bc08303346ad5a1
|
[
"MIT"
] | null | null | null |
touch_time_test.py
|
goldang01/Python_Example
|
f0de3384278762d7935885ca0bc08303346ad5a1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import datetime
import pandas as pd
now = datetime.datetime.now()
df = pd.DataFrame({
"time":[now]
})
df.to_csv("time_test.csv", mode = "a")
| 13.066667
| 38
| 0.637755
|
119f82b56d2ea87640cd03670ca01ed078091aff
| 367
|
py
|
Python
|
tests/test_reddit.py
|
bwlodarski277/reddit-async
|
c7a54c539eb1b71dc2160e08294ca7f18d66fe32
|
[
"MIT"
] | null | null | null |
tests/test_reddit.py
|
bwlodarski277/reddit-async
|
c7a54c539eb1b71dc2160e08294ca7f18d66fe32
|
[
"MIT"
] | null | null | null |
tests/test_reddit.py
|
bwlodarski277/reddit-async
|
c7a54c539eb1b71dc2160e08294ca7f18d66fe32
|
[
"MIT"
] | null | null | null |
import pytest
from core.reddit import Reddit
class TestInit:
@pytest.mark.asyncio
async def test_invalid_input(self):
with pytest.raises(ValueError):
reddit = await Reddit.init('', '', '', '')
@pytest.mark.asyncio
async def test_valid_input(self):
reddit = await Reddit.init()
assert 'error' not in reddit.auth
| 22.9375
| 54
| 0.645777
|
468de0a71f818fd7009b36c60d3996346d2a5a68
| 1,717
|
py
|
Python
|
wap_mms_message/__init__.py
|
whardier/wap-mms-message
|
8c217d9e05ff0338f3cbd8ab910737ea9258a014
|
[
"MIT"
] | null | null | null |
wap_mms_message/__init__.py
|
whardier/wap-mms-message
|
8c217d9e05ff0338f3cbd8ab910737ea9258a014
|
[
"MIT"
] | null | null | null |
wap_mms_message/__init__.py
|
whardier/wap-mms-message
|
8c217d9e05ff0338f3cbd8ab910737ea9258a014
|
[
"MIT"
] | null | null | null |
# The MIT License (MIT)
# Copyright (c) 2020 Shane R. Spencer <spencersr@gmail.com>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# SPDX-License-Identifier: MIT
# Author: Shane R. Spencer <spencersr@gmail.com>
"""Extremely simple encoder for application/vnd.wap.mms-message payloads"""
# version is a human-readable version number.
# version_info is a four-tuple for programmatic comparison. The first
# three numbers are the components of the version number. The fourth
# is zero for an official release, positive for a development branch,
# or negative for a release candidate or beta (after the base version
# number has been incremented)
version = "2020.4.0a1"
version_info = (2020, 4, 0, -200)
| 41.878049
| 79
| 0.771112
|
18e8d395a3da8868f071c5ed0749270bf3c5420b
| 5,712
|
py
|
Python
|
Agent/Agent.py
|
OctThe16th/BetterTrainingDataMnist_RL_GAN
|
fcc75c9ddf768d7c66c9fade3e86973a4c828624
|
[
"MIT"
] | null | null | null |
Agent/Agent.py
|
OctThe16th/BetterTrainingDataMnist_RL_GAN
|
fcc75c9ddf768d7c66c9fade3e86973a4c828624
|
[
"MIT"
] | null | null | null |
Agent/Agent.py
|
OctThe16th/BetterTrainingDataMnist_RL_GAN
|
fcc75c9ddf768d7c66c9fade3e86973a4c828624
|
[
"MIT"
] | null | null | null |
import os
import numba as nb
import numpy as np
import math
import keras.backend as K
from PriorityExperienceReplay.PriorityExperienceReplay import Experience
import tensorflow as tf
from Actor import ActorNetwork
from Critic import CriticNetwork
from Environnement.Environnement import Environnement
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
class Agent:
def __init__(self, amount_per_class=10):
self.environnement = Environnement(amount_per_class=amount_per_class)
self.batch_size = amount_per_class * 10
# Bunch of placeholders values
self.dummy_value = np.zeros((self.batch_size, 1))
self.dummy_predictions = np.zeros((self.batch_size, 784))
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
self.sess = tf.Session(config=config)
K.set_learning_phase(1)
K.set_session(self.sess)
self.atoms = 51
self.v_max = 2
self.v_min = -2
self.delta_z = (self.v_max - self.v_min) / float(self.atoms - 1)
self.z_steps = np.array([self.v_min + i * self.delta_z for i in range(self.atoms)]).astype(np.float32)
self.actor = ActorNetwork(self.sess, 28*28 + 10, 28*28, tau=0.001, lr=5*10e-5)
self.critic = CriticNetwork(self.sess, 28*28 + 10, 28*28, tau=0.001, lr=5*10e-5)
self.memory = Experience(memory_size=1000000, batch_size=self.batch_size*10, alpha=0.5)
def train(self, epoch):
e = 0
while e <= epoch:
done = False
print('Epoch :', e)
batch_num = 0
while self.memory.tree.size < 10000:
self.add_values_to_memory()
while done is False:
if batch_num % 4 == 0:
self.add_values_to_memory()
self.train_loop()
batch_num += 1
if batch_num % (100000//self.batch_size) == 0:
batch_x, batch_f1, batch_y = self.environnement.query_state()
batch_y_prime = self.flatten_digit_class(batch_y)
pred_x = np.reshape(batch_x, (self.batch_size, 28 * 28))
pred_x = np.concatenate([pred_x, batch_y_prime], axis=1)
old_predictions = self.actor.model.predict([pred_x])
values, test_values = self.get_values(np.reshape(batch_x, old_predictions.shape) + 2 * old_predictions, batch_f1, batch_y)
print('Batch num :', batch_num, '\tValues :', np.mean(values), '\tTest values :', np.mean(test_values))
e += 1
def train_loop(self):
states, actions, reward, indices = self.make_dataset()
loss = self.critic.model.train_on_batch([states, actions], reward)
a_for_grad = self.actor.model.predict(states)
grads = self.critic.gradients(states, a_for_grad)
self.actor.train(states, grads)
# self.actor.target_train()
# self.critic.target_train()
self.memory.priority_update(indices, [loss for _ in range(len(indices))])
def add_values_to_memory(self):
batch_x, batch_f1, batch_y = self.environnement.query_state()
batch_y_prime = self.flatten_digit_class(batch_y)
pred_x = np.reshape(batch_x, (self.batch_size, 28 * 28))
pred_x = np.concatenate([pred_x, batch_y_prime], axis=1)
old_predictions = self.actor.model.predict(pred_x)
actions, new_predictions = self.get_actions(old_predictions, batch_x)
values, test = self.get_values(new_predictions, batch_f1, batch_y)
for i in range(pred_x.shape[0]):
self.memory.add([pred_x[i], actions[i], values[i]], 5)
@nb.jit
def flatten_digit_class(self, batch_y):
batch_y_prime = np.zeros(shape=(self.batch_size, 10))
for i in range(batch_y.shape[0]):
batch_y_prime[batch_y[i]] = 1
return batch_y_prime
@nb.jit
def get_values(self, actions, batch_f1, batch_y):
class_values, test = self.environnement.get_values(actions, batch_y)
class_values -= batch_f1
values = np.zeros(batch_y.shape)
test -= batch_f1
for i in range(values.shape[0]):
values[i] = class_values[int(batch_y[i])]
normalizing_factor = np.nanmean(batch_f1)/2
return values/normalizing_factor, test/normalizing_factor
@nb.jit
def get_actions(self, old_predictions, old_state):
actions = old_predictions + np.random.normal(loc=0, scale=1, size=old_predictions.shape)
new_predictions = np.reshape(old_state, (actions.shape)) + actions
actions = np.clip(actions, -1, 1)
new_predictions = np.clip(new_predictions, -1, 1)
return actions, new_predictions
def make_dataset(self):
data, weights, indices = self.memory.select(0.6)
states, reward, actions = [], [], []
for i in range(self.batch_size):
states.append(data[i][0])
actions.append(data[i][1])
reward.append(data[i][2])
states = np.array(states)
reward = np.array(reward)
actions = np.array(actions)
return states, actions, reward, indices
@nb.jit
def update_m_prob(self, reward, m_prob, z):
for i in range(self.batch_size):
for j in range(self.atoms):
Tz = min(self.v_max, max(self.v_min, reward[i]))
bj = (Tz - self.v_min) / self.delta_z
m_l, m_u = math.floor(bj), math.ceil(bj)
m_prob[i, int(m_l)] += z[i, j] * (m_u - bj)
m_prob[i, int(m_u)] += z[i, j] * (bj - m_l)
if __name__ == '__main__':
agent = Agent(amount_per_class=1)
agent.train(epoch=1)
| 38.33557
| 142
| 0.619398
|
370db6095ef8c70f32ea1c65e37c09fba4546d28
| 1,701
|
py
|
Python
|
tests/base.py
|
vtrbtf/http-prompt
|
b814f6a36a0c724ce60a21f52ef5c71708d39395
|
[
"MIT"
] | null | null | null |
tests/base.py
|
vtrbtf/http-prompt
|
b814f6a36a0c724ce60a21f52ef5c71708d39395
|
[
"MIT"
] | null | null | null |
tests/base.py
|
vtrbtf/http-prompt
|
b814f6a36a0c724ce60a21f52ef5c71708d39395
|
[
"MIT"
] | 1
|
2018-05-03T18:14:01.000Z
|
2018-05-03T18:14:01.000Z
|
import os
import shutil
import sys
import tempfile
import unittest
import six
class TempAppDirTestCase(unittest.TestCase):
"""Set up temporary app data and config directories before every test
method, and delete them afterwards.
"""
def setUp(self):
# Create a temp dir that will contain data and config directories
self.temp_dir = tempfile.mkdtemp()
if sys.platform == 'win32':
self.homes = {
# subdir_name: envvar_name
'data': 'LOCALAPPDATA',
'config': 'LOCALAPPDATA'
}
else:
self.homes = {
# subdir_name: envvar_name
'data': 'XDG_DATA_HOME',
'config': 'XDG_CONFIG_HOME'
}
# Used to restore
self.orig_envvars = {}
for subdir_name, envvar_name in self.homes.items():
if envvar_name in os.environ:
self.orig_envvars[envvar_name] = os.environ[envvar_name]
os.environ[envvar_name] = os.path.join(self.temp_dir, subdir_name)
def tearDown(self):
# Restore envvar values
for name in self.homes.values():
if name in self.orig_envvars:
os.environ[name] = self.orig_envvars[name]
else:
del os.environ[name]
shutil.rmtree(self.temp_dir)
def make_tempfile(self, data=''):
"""Create a file under self.temp_dir and return the path."""
if isinstance(data, six.text_type):
data = data.encode('utf-8')
with tempfile.NamedTemporaryFile(dir=self.temp_dir, delete=False) as f:
f.write(data)
return f.name
| 29.842105
| 79
| 0.579071
|
cfd7f5a623a3f00ebf0656522e04773827883c1c
| 9,500
|
py
|
Python
|
utils/utils_bbox.py
|
bubbliiiing/faster-rcnn-tf2
|
c3982016c7c209a3fa71a30facbe53396e6340ee
|
[
"MIT"
] | 133
|
2020-11-23T05:43:08.000Z
|
2022-03-30T08:10:50.000Z
|
utils/utils_bbox.py
|
Whale1024/faster-rcnn-tf2
|
6343860e5d44e8441fa736620762e4c256b27083
|
[
"MIT"
] | 15
|
2020-12-01T12:07:14.000Z
|
2021-09-30T02:27:37.000Z
|
utils/utils_bbox.py
|
Whale1024/faster-rcnn-tf2
|
6343860e5d44e8441fa736620762e4c256b27083
|
[
"MIT"
] | 40
|
2020-11-24T13:01:41.000Z
|
2022-03-30T08:10:53.000Z
|
import math
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as K
class BBoxUtility(object):
def __init__(self, num_classes, rpn_pre_boxes = 12000, rpn_nms = 0.7, nms_iou = 0.3, min_k = 300):
#---------------------------------------------------#
# 种类数量
#---------------------------------------------------#
self.num_classes = num_classes
#---------------------------------------------------#
# 建议框非极大抑制前的框的数量
#---------------------------------------------------#
self.rpn_pre_boxes = rpn_pre_boxes
#---------------------------------------------------#
# 非极大抑制的iou
#---------------------------------------------------#
self.rpn_nms = rpn_nms
self.nms_iou = nms_iou
#---------------------------------------------------#
# 建议框非极大抑制后的框的数量
#---------------------------------------------------#
self._min_k = min_k
def decode_boxes(self, mbox_loc, anchors, variances):
# 获得先验框的宽与高
anchor_width = anchors[:, 2] - anchors[:, 0]
anchor_height = anchors[:, 3] - anchors[:, 1]
# 获得先验框的中心点
anchor_center_x = 0.5 * (anchors[:, 2] + anchors[:, 0])
anchor_center_y = 0.5 * (anchors[:, 3] + anchors[:, 1])
# 真实框距离先验框中心的xy轴偏移情况
detections_center_x = mbox_loc[:, 0] * anchor_width * variances[0]
detections_center_x += anchor_center_x
detections_center_y = mbox_loc[:, 1] * anchor_height * variances[1]
detections_center_y += anchor_center_y
# 真实框的宽与高的求取
detections_width = np.exp(mbox_loc[:, 2] * variances[2])
detections_width *= anchor_width
detections_height = np.exp(mbox_loc[:, 3] * variances[3])
detections_height *= anchor_height
# 获取真实框的左上角与右下角
detections_xmin = detections_center_x - 0.5 * detections_width
detections_ymin = detections_center_y - 0.5 * detections_height
detections_xmax = detections_center_x + 0.5 * detections_width
detections_ymax = detections_center_y + 0.5 * detections_height
# 真实框的左上角与右下角进行堆叠
detections = np.concatenate((detections_xmin[:, None],
detections_ymin[:, None],
detections_xmax[:, None],
detections_ymax[:, None]), axis=-1)
# 防止超出0与1
detections = np.minimum(np.maximum(detections, 0.0), 1.0)
return detections
def detection_out_rpn(self, predictions, anchors, variances = [0.25, 0.25, 0.25, 0.25]):
#---------------------------------------------------#
# 获得种类的置信度
#---------------------------------------------------#
mbox_conf = predictions[0]
#---------------------------------------------------#
# mbox_loc是回归预测结果
#---------------------------------------------------#
mbox_loc = predictions[1]
results = []
# 对每一张图片进行处理,由于在predict.py的时候,我们只输入一张图片,所以for i in range(len(mbox_loc))只进行一次
for i in range(len(mbox_loc)):
#--------------------------------#
# 利用回归结果对先验框进行解码
#--------------------------------#
detections = self.decode_boxes(mbox_loc[i], anchors, variances)
#--------------------------------#
# 取出先验框内包含物体的概率
#--------------------------------#
c_confs = mbox_conf[i, :, 0]
c_confs_argsort = np.argsort(c_confs)[::-1][:self.rpn_pre_boxes]
#------------------------------------#
# 原始的预测框较多,先选一些高分框
#------------------------------------#
confs_to_process = c_confs[c_confs_argsort]
boxes_to_process = detections[c_confs_argsort, :]
#--------------------------------#
# 进行iou的非极大抑制
#--------------------------------#
idx = tf.image.non_max_suppression(boxes_to_process, confs_to_process, self._min_k, iou_threshold = self.rpn_nms).numpy()
#--------------------------------#
# 取出在非极大抑制中效果较好的内容
#--------------------------------#
good_boxes = boxes_to_process[idx]
results.append(good_boxes)
return np.array(results)
def frcnn_correct_boxes(self, box_xy, box_wh, input_shape, image_shape):
#-----------------------------------------------------------------#
# 把y轴放前面是因为方便预测框和图像的宽高进行相乘
#-----------------------------------------------------------------#
box_yx = box_xy[..., ::-1]
box_hw = box_wh[..., ::-1]
input_shape = np.array(input_shape)
image_shape = np.array(image_shape)
box_mins = box_yx - (box_hw / 2.)
box_maxes = box_yx + (box_hw / 2.)
boxes = np.concatenate([box_mins[..., 0:1], box_mins[..., 1:2], box_maxes[..., 0:1], box_maxes[..., 1:2]], axis=-1)
boxes *= np.concatenate([image_shape, image_shape], axis=-1)
return boxes
def detection_out_classifier(self, predictions, rpn_results, image_shape, input_shape, confidence = 0.5, variances = [0.125, 0.125, 0.25, 0.25]):
#---------------------------------------------------#
# proposal_conf是种类的置信度
#---------------------------------------------------#
proposal_conf = predictions[0]
#---------------------------------------------------#
# proposal_loc是回归预测结果
#---------------------------------------------------#
proposal_loc = predictions[1]
results = []
#------------------------------------------------------------------------------------------------------------------#
# 对每一张图片进行处理,由于在predict.py的时候,我们只输入一张图片,所以for i in range(len(mbox_loc))只进行一次
#------------------------------------------------------------------------------------------------------------------#
for i in range(len(proposal_conf)):
results.append([])
#------------------------------------------#
# 利用classifier预测结果
# 对建议框进行解码,并且判断物品种类
#------------------------------------------#
detections = []
#---------------------------------------------------#
# 计算建议框中心和宽高
#---------------------------------------------------#
rpn_results[i, :, 2] = rpn_results[i, :, 2] - rpn_results[i, :, 0]
rpn_results[i, :, 3] = rpn_results[i, :, 3] - rpn_results[i, :, 1]
rpn_results[i, :, 0] = rpn_results[i, :, 0] + rpn_results[i, :, 2] / 2
rpn_results[i, :, 1] = rpn_results[i, :, 1] + rpn_results[i, :, 3] / 2
for j in range(proposal_conf[i].shape[0]):
#---------------------------------------------------#
# 计算建议框的种类
#---------------------------------------------------#
score = np.max(proposal_conf[i][j, :-1])
label = np.argmax(proposal_conf[i][j, :-1])
if score < confidence:
continue
#---------------------------------------------------#
# 对建议框中心宽高进行调整获得预测框
#---------------------------------------------------#
x, y, w, h = rpn_results[i, j, :]
tx, ty, tw, th = proposal_loc[i][j, 4 * label: 4 * (label + 1)]
x1 = tx * variances[0] * w + x
y1 = ty * variances[1] * h + y
w1 = math.exp(tw * variances[2]) * w
h1 = math.exp(th * variances[3]) * h
xmin = x1 - w1/2.
ymin = y1 - h1/2.
xmax = x1 + w1/2
ymax = y1 + h1/2
detections.append([xmin, ymin, xmax, ymax, score, label])
detections = np.array(detections)
if len(detections) > 0:
for c in range(self.num_classes):
c_confs_m = detections[:, -1] == c
if len(detections[c_confs_m]) > 0:
boxes_to_process = detections[:, :4][c_confs_m]
confs_to_process = detections[:, 4][c_confs_m]
#-----------------------------------------#
# 进行iou的非极大抑制
#-----------------------------------------#
idx = tf.image.non_max_suppression(boxes_to_process, confs_to_process, self._min_k, iou_threshold = self.nms_iou).numpy()
#-----------------------------------------#
# 取出在非极大抑制中效果较好的内容
#-----------------------------------------#
results[-1].extend(detections[c_confs_m][idx])
if len(results[-1]) > 0:
results[-1] = np.array(results[-1])
box_xy, box_wh = (results[-1][:, 0:2] + results[-1][:, 2:4])/2, results[-1][:, 2:4] - results[-1][:, 0:2]
results[-1][:, :4] = self.frcnn_correct_boxes(box_xy, box_wh, input_shape, image_shape)
return results
| 48.969072
| 150
| 0.380105
|
dda6d852e5b6190979d444e0ea78a2263892ef33
| 20,605
|
py
|
Python
|
tb/test_axis_switch_4x4_64.py
|
wufubao/verilog-axis
|
7c6da337b05e5eea675159798aa979f459d89095
|
[
"MIT"
] | 2
|
2018-08-25T03:10:41.000Z
|
2018-08-25T22:52:30.000Z
|
tb/test_axis_switch_4x4_64.py
|
nqHITSZ/verilog-axis
|
7c6da337b05e5eea675159798aa979f459d89095
|
[
"MIT"
] | null | null | null |
tb/test_axis_switch_4x4_64.py
|
nqHITSZ/verilog-axis
|
7c6da337b05e5eea675159798aa979f459d89095
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
Copyright (c) 2016-2018 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from myhdl import *
import os
import axis_ep
module = 'axis_switch_4x4'
testbench = 'test_%s_64' % module
srcs = []
srcs.append("../rtl/%s.v" % module)
srcs.append("../rtl/arbiter.v")
srcs.append("../rtl/priority_encoder.v")
srcs.append("%s.v" % testbench)
src = ' '.join(srcs)
build_cmd = "iverilog -o %s.vvp %s" % (testbench, src)
def bench():
# Parameters
DATA_WIDTH = 64
KEEP_ENABLE = (DATA_WIDTH>8)
KEEP_WIDTH = (DATA_WIDTH/8)
ID_ENABLE = 1
ID_WIDTH = 8
DEST_WIDTH = 3
USER_ENABLE = 1
USER_WIDTH = 1
OUT_0_BASE = 0
OUT_0_TOP = 0
OUT_0_CONNECT = 0xf
OUT_1_BASE = 1
OUT_1_TOP = 1
OUT_1_CONNECT = 0xf
OUT_2_BASE = 2
OUT_2_TOP = 2
OUT_2_CONNECT = 0xf
OUT_3_BASE = 3
OUT_3_TOP = 3
OUT_3_CONNECT = 0xf
ARB_TYPE = "ROUND_ROBIN"
LSB_PRIORITY = "HIGH"
# Inputs
clk = Signal(bool(0))
rst = Signal(bool(0))
current_test = Signal(intbv(0)[8:])
input_0_axis_tdata = Signal(intbv(0)[DATA_WIDTH:])
input_0_axis_tkeep = Signal(intbv(1)[KEEP_WIDTH:])
input_0_axis_tvalid = Signal(bool(0))
input_0_axis_tlast = Signal(bool(0))
input_0_axis_tid = Signal(intbv(0)[ID_WIDTH:])
input_0_axis_tdest = Signal(intbv(0)[DEST_WIDTH:])
input_0_axis_tuser = Signal(intbv(0)[USER_WIDTH:])
input_1_axis_tdata = Signal(intbv(0)[DATA_WIDTH:])
input_1_axis_tkeep = Signal(intbv(1)[KEEP_WIDTH:])
input_1_axis_tvalid = Signal(bool(0))
input_1_axis_tlast = Signal(bool(0))
input_1_axis_tid = Signal(intbv(0)[ID_WIDTH:])
input_1_axis_tdest = Signal(intbv(0)[DEST_WIDTH:])
input_1_axis_tuser = Signal(intbv(0)[USER_WIDTH:])
input_2_axis_tdata = Signal(intbv(0)[DATA_WIDTH:])
input_2_axis_tkeep = Signal(intbv(1)[KEEP_WIDTH:])
input_2_axis_tvalid = Signal(bool(0))
input_2_axis_tlast = Signal(bool(0))
input_2_axis_tid = Signal(intbv(0)[ID_WIDTH:])
input_2_axis_tdest = Signal(intbv(0)[DEST_WIDTH:])
input_2_axis_tuser = Signal(intbv(0)[USER_WIDTH:])
input_3_axis_tdata = Signal(intbv(0)[DATA_WIDTH:])
input_3_axis_tkeep = Signal(intbv(1)[KEEP_WIDTH:])
input_3_axis_tvalid = Signal(bool(0))
input_3_axis_tlast = Signal(bool(0))
input_3_axis_tid = Signal(intbv(0)[ID_WIDTH:])
input_3_axis_tdest = Signal(intbv(0)[DEST_WIDTH:])
input_3_axis_tuser = Signal(intbv(0)[USER_WIDTH:])
output_0_axis_tready = Signal(bool(0))
output_1_axis_tready = Signal(bool(0))
output_2_axis_tready = Signal(bool(0))
output_3_axis_tready = Signal(bool(0))
# Outputs
input_0_axis_tready = Signal(bool(0))
input_1_axis_tready = Signal(bool(0))
input_2_axis_tready = Signal(bool(0))
input_3_axis_tready = Signal(bool(0))
output_0_axis_tdata = Signal(intbv(0)[DATA_WIDTH:])
output_0_axis_tkeep = Signal(intbv(1)[KEEP_WIDTH:])
output_0_axis_tvalid = Signal(bool(0))
output_0_axis_tlast = Signal(bool(0))
output_0_axis_tid = Signal(intbv(0)[ID_WIDTH:])
output_0_axis_tdest = Signal(intbv(0)[DEST_WIDTH:])
output_0_axis_tuser = Signal(intbv(0)[USER_WIDTH:])
output_1_axis_tdata = Signal(intbv(0)[DATA_WIDTH:])
output_1_axis_tkeep = Signal(intbv(1)[KEEP_WIDTH:])
output_1_axis_tvalid = Signal(bool(0))
output_1_axis_tlast = Signal(bool(0))
output_1_axis_tid = Signal(intbv(0)[ID_WIDTH:])
output_1_axis_tdest = Signal(intbv(0)[DEST_WIDTH:])
output_1_axis_tuser = Signal(intbv(0)[USER_WIDTH:])
output_2_axis_tdata = Signal(intbv(0)[DATA_WIDTH:])
output_2_axis_tkeep = Signal(intbv(1)[KEEP_WIDTH:])
output_2_axis_tvalid = Signal(bool(0))
output_2_axis_tlast = Signal(bool(0))
output_2_axis_tid = Signal(intbv(0)[ID_WIDTH:])
output_2_axis_tdest = Signal(intbv(0)[DEST_WIDTH:])
output_2_axis_tuser = Signal(intbv(0)[USER_WIDTH:])
output_3_axis_tdata = Signal(intbv(0)[DATA_WIDTH:])
output_3_axis_tkeep = Signal(intbv(1)[KEEP_WIDTH:])
output_3_axis_tvalid = Signal(bool(0))
output_3_axis_tlast = Signal(bool(0))
output_3_axis_tid = Signal(intbv(0)[ID_WIDTH:])
output_3_axis_tdest = Signal(intbv(0)[DEST_WIDTH:])
output_3_axis_tuser = Signal(intbv(0)[USER_WIDTH:])
# sources and sinks
source_0_pause = Signal(bool(0))
source_1_pause = Signal(bool(0))
source_2_pause = Signal(bool(0))
source_3_pause = Signal(bool(0))
sink_0_pause = Signal(bool(0))
sink_1_pause = Signal(bool(0))
sink_2_pause = Signal(bool(0))
sink_3_pause = Signal(bool(0))
source_0 = axis_ep.AXIStreamSource()
source_0_logic = source_0.create_logic(
clk,
rst,
tdata=input_0_axis_tdata,
tkeep=input_0_axis_tkeep,
tvalid=input_0_axis_tvalid,
tready=input_0_axis_tready,
tlast=input_0_axis_tlast,
tid=input_0_axis_tid,
tdest=input_0_axis_tdest,
tuser=input_0_axis_tuser,
pause=source_0_pause,
name='source_0'
)
source_1 = axis_ep.AXIStreamSource()
source_1_logic = source_1.create_logic(
clk,
rst,
tdata=input_1_axis_tdata,
tkeep=input_1_axis_tkeep,
tvalid=input_1_axis_tvalid,
tready=input_1_axis_tready,
tlast=input_1_axis_tlast,
tid=input_1_axis_tid,
tdest=input_1_axis_tdest,
tuser=input_1_axis_tuser,
pause=source_1_pause,
name='source_1'
)
source_2 = axis_ep.AXIStreamSource()
source_2_logic = source_2.create_logic(
clk,
rst,
tdata=input_2_axis_tdata,
tkeep=input_2_axis_tkeep,
tvalid=input_2_axis_tvalid,
tready=input_2_axis_tready,
tlast=input_2_axis_tlast,
tid=input_2_axis_tid,
tdest=input_2_axis_tdest,
tuser=input_2_axis_tuser,
pause=source_2_pause,
name='source_2'
)
source_3 = axis_ep.AXIStreamSource()
source_3_logic = source_3.create_logic(
clk,
rst,
tdata=input_3_axis_tdata,
tkeep=input_3_axis_tkeep,
tvalid=input_3_axis_tvalid,
tready=input_3_axis_tready,
tlast=input_3_axis_tlast,
tid=input_3_axis_tid,
tdest=input_3_axis_tdest,
tuser=input_3_axis_tuser,
pause=source_3_pause,
name='source_3'
)
sink_0 = axis_ep.AXIStreamSink()
sink_0_logic = sink_0.create_logic(
clk,
rst,
tdata=output_0_axis_tdata,
tkeep=output_0_axis_tkeep,
tvalid=output_0_axis_tvalid,
tready=output_0_axis_tready,
tlast=output_0_axis_tlast,
tid=output_0_axis_tid,
tdest=output_0_axis_tdest,
tuser=output_0_axis_tuser,
pause=sink_0_pause,
name='sink_0'
)
sink_1 = axis_ep.AXIStreamSink()
sink_1_logic = sink_1.create_logic(
clk,
rst,
tdata=output_1_axis_tdata,
tkeep=output_1_axis_tkeep,
tvalid=output_1_axis_tvalid,
tready=output_1_axis_tready,
tlast=output_1_axis_tlast,
tid=output_1_axis_tid,
tdest=output_1_axis_tdest,
tuser=output_1_axis_tuser,
pause=sink_1_pause,
name='sink_1'
)
sink_2 = axis_ep.AXIStreamSink()
sink_2_logic = sink_2.create_logic(
clk,
rst,
tdata=output_2_axis_tdata,
tkeep=output_2_axis_tkeep,
tvalid=output_2_axis_tvalid,
tready=output_2_axis_tready,
tlast=output_2_axis_tlast,
tid=output_2_axis_tid,
tdest=output_2_axis_tdest,
tuser=output_2_axis_tuser,
pause=sink_2_pause,
name='sink_2'
)
sink_3 = axis_ep.AXIStreamSink()
sink_3_logic = sink_3.create_logic(
clk,
rst,
tdata=output_3_axis_tdata,
tkeep=output_3_axis_tkeep,
tvalid=output_3_axis_tvalid,
tready=output_3_axis_tready,
tlast=output_3_axis_tlast,
tid=output_3_axis_tid,
tdest=output_3_axis_tdest,
tuser=output_3_axis_tuser,
pause=sink_3_pause,
name='sink_3'
)
# DUT
if os.system(build_cmd):
raise Exception("Error running build command")
dut = Cosimulation(
"vvp -m myhdl %s.vvp -lxt2" % testbench,
clk=clk,
rst=rst,
current_test=current_test,
input_0_axis_tdata=input_0_axis_tdata,
input_0_axis_tkeep=input_0_axis_tkeep,
input_0_axis_tvalid=input_0_axis_tvalid,
input_0_axis_tready=input_0_axis_tready,
input_0_axis_tlast=input_0_axis_tlast,
input_0_axis_tid=input_0_axis_tid,
input_0_axis_tdest=input_0_axis_tdest,
input_0_axis_tuser=input_0_axis_tuser,
input_1_axis_tdata=input_1_axis_tdata,
input_1_axis_tkeep=input_1_axis_tkeep,
input_1_axis_tvalid=input_1_axis_tvalid,
input_1_axis_tready=input_1_axis_tready,
input_1_axis_tlast=input_1_axis_tlast,
input_1_axis_tid=input_1_axis_tid,
input_1_axis_tdest=input_1_axis_tdest,
input_1_axis_tuser=input_1_axis_tuser,
input_2_axis_tdata=input_2_axis_tdata,
input_2_axis_tkeep=input_2_axis_tkeep,
input_2_axis_tvalid=input_2_axis_tvalid,
input_2_axis_tready=input_2_axis_tready,
input_2_axis_tlast=input_2_axis_tlast,
input_2_axis_tid=input_2_axis_tid,
input_2_axis_tdest=input_2_axis_tdest,
input_2_axis_tuser=input_2_axis_tuser,
input_3_axis_tdata=input_3_axis_tdata,
input_3_axis_tkeep=input_3_axis_tkeep,
input_3_axis_tvalid=input_3_axis_tvalid,
input_3_axis_tready=input_3_axis_tready,
input_3_axis_tlast=input_3_axis_tlast,
input_3_axis_tid=input_3_axis_tid,
input_3_axis_tdest=input_3_axis_tdest,
input_3_axis_tuser=input_3_axis_tuser,
output_0_axis_tdata=output_0_axis_tdata,
output_0_axis_tkeep=output_0_axis_tkeep,
output_0_axis_tvalid=output_0_axis_tvalid,
output_0_axis_tready=output_0_axis_tready,
output_0_axis_tlast=output_0_axis_tlast,
output_0_axis_tid=output_0_axis_tid,
output_0_axis_tdest=output_0_axis_tdest,
output_0_axis_tuser=output_0_axis_tuser,
output_1_axis_tdata=output_1_axis_tdata,
output_1_axis_tkeep=output_1_axis_tkeep,
output_1_axis_tvalid=output_1_axis_tvalid,
output_1_axis_tready=output_1_axis_tready,
output_1_axis_tlast=output_1_axis_tlast,
output_1_axis_tid=output_1_axis_tid,
output_1_axis_tdest=output_1_axis_tdest,
output_1_axis_tuser=output_1_axis_tuser,
output_2_axis_tdata=output_2_axis_tdata,
output_2_axis_tkeep=output_2_axis_tkeep,
output_2_axis_tvalid=output_2_axis_tvalid,
output_2_axis_tready=output_2_axis_tready,
output_2_axis_tlast=output_2_axis_tlast,
output_2_axis_tid=output_2_axis_tid,
output_2_axis_tdest=output_2_axis_tdest,
output_2_axis_tuser=output_2_axis_tuser,
output_3_axis_tdata=output_3_axis_tdata,
output_3_axis_tkeep=output_3_axis_tkeep,
output_3_axis_tvalid=output_3_axis_tvalid,
output_3_axis_tready=output_3_axis_tready,
output_3_axis_tlast=output_3_axis_tlast,
output_3_axis_tid=output_3_axis_tid,
output_3_axis_tdest=output_3_axis_tdest,
output_3_axis_tuser=output_3_axis_tuser
)
@always(delay(4))
def clkgen():
clk.next = not clk
def wait_normal():
while input_0_axis_tvalid or input_1_axis_tvalid or input_2_axis_tvalid or input_3_axis_tvalid:
yield clk.posedge
def wait_pause_source():
while input_0_axis_tvalid or input_1_axis_tvalid or input_2_axis_tvalid or input_3_axis_tvalid:
source_0_pause.next = True
source_1_pause.next = True
source_2_pause.next = True
source_3_pause.next = True
yield clk.posedge
yield clk.posedge
yield clk.posedge
source_0_pause.next = False
source_1_pause.next = False
source_2_pause.next = False
source_3_pause.next = False
yield clk.posedge
def wait_pause_sink():
while input_0_axis_tvalid or input_1_axis_tvalid or input_2_axis_tvalid or input_3_axis_tvalid:
sink_0_pause.next = True
sink_1_pause.next = True
sink_2_pause.next = True
sink_3_pause.next = True
yield clk.posedge
yield clk.posedge
yield clk.posedge
sink_0_pause.next = False
sink_1_pause.next = False
sink_2_pause.next = False
sink_3_pause.next = False
yield clk.posedge
@instance
def check():
yield delay(100)
yield clk.posedge
rst.next = 1
yield clk.posedge
rst.next = 0
yield clk.posedge
yield delay(100)
yield clk.posedge
# testbench stimulus
yield clk.posedge
print("test 1: 0123 -> 0123")
current_test.next = 1
test_frame0 = axis_ep.AXIStreamFrame(b'\x01\x00\x00\xFF\x01\x02\x03\x04\x05\x06\x07\x08', id=0, dest=0)
test_frame1 = axis_ep.AXIStreamFrame(b'\x01\x01\x01\xFF\x01\x02\x03\x04\x05\x06\x07\x08', id=1, dest=1)
test_frame2 = axis_ep.AXIStreamFrame(b'\x01\x02\x02\xFF\x01\x02\x03\x04\x05\x06\x07\x08', id=2, dest=2)
test_frame3 = axis_ep.AXIStreamFrame(b'\x01\x03\x03\xFF\x01\x02\x03\x04\x05\x06\x07\x08', id=3, dest=3)
for wait in wait_normal, wait_pause_source, wait_pause_sink:
source_0.send(test_frame0)
source_1.send(test_frame1)
source_2.send(test_frame2)
source_3.send(test_frame3)
yield clk.posedge
yield clk.posedge
yield wait()
yield clk.posedge
yield clk.posedge
rx_frame0 = sink_0.recv()
assert rx_frame0 == test_frame0
rx_frame1 = sink_1.recv()
assert rx_frame1 == test_frame1
rx_frame2 = sink_2.recv()
assert rx_frame2 == test_frame2
rx_frame3 = sink_3.recv()
assert rx_frame3 == test_frame3
yield delay(100)
yield clk.posedge
print("test 2: 0123 -> 3210")
current_test.next = 2
test_frame0 = axis_ep.AXIStreamFrame(b'\x02\x00\x03\xFF\x01\x02\x03\x04\x05\x06\x07\x08', id=0, dest=3)
test_frame1 = axis_ep.AXIStreamFrame(b'\x02\x01\x02\xFF\x01\x02\x03\x04\x05\x06\x07\x08', id=1, dest=2)
test_frame2 = axis_ep.AXIStreamFrame(b'\x02\x02\x01\xFF\x01\x02\x03\x04\x05\x06\x07\x08', id=2, dest=1)
test_frame3 = axis_ep.AXIStreamFrame(b'\x02\x03\x00\xFF\x01\x02\x03\x04\x05\x06\x07\x08', id=3, dest=0)
for wait in wait_normal, wait_pause_source, wait_pause_sink:
source_0.send(test_frame0)
source_1.send(test_frame1)
source_2.send(test_frame2)
source_3.send(test_frame3)
yield clk.posedge
yield clk.posedge
yield wait()
yield clk.posedge
yield clk.posedge
rx_frame0 = sink_0.recv()
assert rx_frame0 == test_frame3
rx_frame1 = sink_1.recv()
assert rx_frame1 == test_frame2
rx_frame2 = sink_2.recv()
assert rx_frame2 == test_frame1
rx_frame3 = sink_3.recv()
assert rx_frame3 == test_frame0
yield delay(100)
yield clk.posedge
print("test 3: 0000 -> 0123")
current_test.next = 3
test_frame0 = axis_ep.AXIStreamFrame(b'\x02\x00\x00\xFF\x01\x02\x03\x04\x05\x06\x07\x08', id=0, dest=0)
test_frame1 = axis_ep.AXIStreamFrame(b'\x02\x00\x01\xFF\x01\x02\x03\x04\x05\x06\x07\x08', id=0, dest=1)
test_frame2 = axis_ep.AXIStreamFrame(b'\x02\x00\x02\xFF\x01\x02\x03\x04\x05\x06\x07\x08', id=0, dest=2)
test_frame3 = axis_ep.AXIStreamFrame(b'\x02\x00\x03\xFF\x01\x02\x03\x04\x05\x06\x07\x08', id=0, dest=3)
for wait in wait_normal, wait_pause_source, wait_pause_sink:
source_0.send(test_frame0)
source_0.send(test_frame1)
source_0.send(test_frame2)
source_0.send(test_frame3)
yield clk.posedge
yield clk.posedge
yield wait()
yield clk.posedge
yield clk.posedge
rx_frame0 = sink_0.recv()
assert rx_frame0 == test_frame0
rx_frame1 = sink_1.recv()
assert rx_frame1 == test_frame1
rx_frame2 = sink_2.recv()
assert rx_frame2 == test_frame2
rx_frame3 = sink_3.recv()
assert rx_frame3 == test_frame3
yield delay(100)
yield clk.posedge
print("test 4: 0123 -> 0000")
current_test.next = 4
test_frame0 = axis_ep.AXIStreamFrame(b'\x02\x00\x00\xFF\x01\x02\x03\x04\x05\x06\x07\x08', id=0, dest=0)
test_frame1 = axis_ep.AXIStreamFrame(b'\x02\x01\x00\xFF\x01\x02\x03\x04\x05\x06\x07\x08', id=1, dest=0)
test_frame2 = axis_ep.AXIStreamFrame(b'\x02\x02\x00\xFF\x01\x02\x03\x04\x05\x06\x07\x08', id=2, dest=0)
test_frame3 = axis_ep.AXIStreamFrame(b'\x02\x03\x00\xFF\x01\x02\x03\x04\x05\x06\x07\x08', id=3, dest=0)
for wait in wait_normal, wait_pause_source, wait_pause_sink:
source_0.send(test_frame0)
yield clk.posedge
source_1.send(test_frame1)
source_2.send(test_frame2)
source_3.send(test_frame3)
yield clk.posedge
yield wait()
yield clk.posedge
yield clk.posedge
rx_frame0 = sink_0.recv()
assert rx_frame0 == test_frame0
rx_frame1 = sink_0.recv()
assert rx_frame1 == test_frame1
rx_frame2 = sink_0.recv()
assert rx_frame2 == test_frame2
rx_frame3 = sink_0.recv()
assert rx_frame3 == test_frame3
yield delay(100)
yield clk.posedge
print("test 1: bad decoding")
current_test.next = 1
test_frame0 = axis_ep.AXIStreamFrame(b'\x01\x00\x00\xFF\x01\x02\x03\x04\x05\x06\x07\x08', id=0, dest=0)
test_frame1 = axis_ep.AXIStreamFrame(b'\x01\x01\x01\xFF\x01\x02\x03\x04\x05\x06\x07\x08', id=1, dest=1)
test_frame2 = axis_ep.AXIStreamFrame(b'\x01\x02\x04\xFF\x01\x02\x03\x04\x05\x06\x07\x08', id=2, dest=4)
test_frame3 = axis_ep.AXIStreamFrame(b'\x01\x03\x05\xFF\x01\x02\x03\x04\x05\x06\x07\x08', id=3, dest=5)
for wait in wait_normal, wait_pause_source, wait_pause_sink:
source_0.send(test_frame0)
source_1.send(test_frame1)
source_2.send(test_frame2)
source_3.send(test_frame3)
yield clk.posedge
yield clk.posedge
yield wait()
yield clk.posedge
yield clk.posedge
rx_frame0 = sink_0.recv()
assert rx_frame0 == test_frame0
rx_frame1 = sink_1.recv()
assert rx_frame1 == test_frame1
yield delay(100)
raise StopSimulation
return dut, source_0_logic, source_1_logic, source_2_logic, source_3_logic, sink_0_logic, sink_1_logic, sink_2_logic, sink_3_logic, clkgen, check
def test_bench():
sim = Simulation(bench())
sim.run()
if __name__ == '__main__':
print("Running test...")
test_bench()
| 33.504065
| 149
| 0.663771
|
77a700343b7a547884d858e3ecd1e416636b7bd3
| 2,012
|
py
|
Python
|
pyisis/tests/test_parser.py
|
rodsenra/pyisis
|
f5815fd096a463902893f87f309f8117b5705621
|
[
"MIT"
] | null | null | null |
pyisis/tests/test_parser.py
|
rodsenra/pyisis
|
f5815fd096a463902893f87f309f8117b5705621
|
[
"MIT"
] | null | null | null |
pyisis/tests/test_parser.py
|
rodsenra/pyisis
|
f5815fd096a463902893f87f309f8117b5705621
|
[
"MIT"
] | 2
|
2019-11-08T20:51:54.000Z
|
2021-08-17T23:49:48.000Z
|
# -*- coding: utf-8 -*-
"""
File to test Formatting Language Parser
"""
__created__ = "2008-05-27"
__updated__ = "2008-05-26"
__author__ = "Rodrigo Senra <rsenra@acm.org>"
import tempfile
from pyisis.records import MasterRecord
from pyisis.lexer import PftLexer
from pyisis.parser import PftParser
from pyisis.ast import *
from pyisis.tests.config_tests import test_data, Lyer, initialize, run_list
config = initialize()
tmpdir = tempfile.gettempdir()
lexer = PftLexer()
parser = PftParser(lexer=lexer.lexer, debug=False, outputdir=tmpdir)
compiler = PftCompiler()
def setup():
"Fixture: Called before every test to reset the lexer"
pass
def test_empty():
"""Parse empty expression"""
try:
pft_ast = parser.parse("")
except RuntimeError:
assert False, "Incorrect handling of empty expression"
else:
pass
def test_field():
"""Parse field access"""
ast = parser.parse("V70")
assert type(ast)==Field, "AST has no FieldAccess node as first node"
def test_compile_code():
"""Compile a simple ast into Python executable code"""
rec = MasterRecord()
data = "Paper on: <plant physiology><plant transpiration><measurement and instruments>"
rec.v69 = data
ast = parser.parse("V69")
chain = flatten(ast)
pft = compiler.compile_code(chain)
result = pft(rec=rec, mst=None)
#print "result=", repr(result), "\ndata=", repr(data)
# remove line braks prior to comparison with original data
assert result.replace("\n","")==data
def test_ilit():
"""Test Inconditional literal"""
rec = MasterRecord()
rec.v26 = "^aParis^bUnesco^c-1965"
rec.v30 = "^ap. 211-224^billus."
ast = parser.parse("'Pais:'v26^a,v30^a")
def debug_format(fmt):
lexer.input(fmt)
print "Lexer\n"
for tok in lexer:
print tok
print "Parser\n"
ast = parser.parse(fmt)
print ast
if __name__ == "__main__":
#test_empty()
#test_field()
#test_ilit()
#test_compile_code()
pass
| 24.839506
| 91
| 0.668489
|
5d23324bebbb96bcd86062de4dbb3f8f58d2f232
| 40
|
py
|
Python
|
bot_facebook/views/__init__.py
|
lariodiniz/BootPlayGame
|
1ba40611eb085e2deaff63a36bdb669b594c7c02
|
[
"MIT"
] | null | null | null |
bot_facebook/views/__init__.py
|
lariodiniz/BootPlayGame
|
1ba40611eb085e2deaff63a36bdb669b594c7c02
|
[
"MIT"
] | null | null | null |
bot_facebook/views/__init__.py
|
lariodiniz/BootPlayGame
|
1ba40611eb085e2deaff63a36bdb669b594c7c02
|
[
"MIT"
] | null | null | null |
from .ativa_bot_view import ativaBotView
| 40
| 40
| 0.9
|
d6087e3e49d300f1770548713421b41f7906d0a8
| 5,713
|
py
|
Python
|
arcade/examples/sprite_collect_coins_diff_levels.py
|
KommentatorForAll/arcade
|
88343fe87efc1bd619412261a725a8f04de08fc7
|
[
"MIT"
] | null | null | null |
arcade/examples/sprite_collect_coins_diff_levels.py
|
KommentatorForAll/arcade
|
88343fe87efc1bd619412261a725a8f04de08fc7
|
[
"MIT"
] | null | null | null |
arcade/examples/sprite_collect_coins_diff_levels.py
|
KommentatorForAll/arcade
|
88343fe87efc1bd619412261a725a8f04de08fc7
|
[
"MIT"
] | null | null | null |
"""
Sprite Collect Coins with Different Levels
Simple program to show basic sprite usage.
Artwork from https://kenney.nl
If Python and Arcade are installed, this example can be run from the command line with:
python -m arcade.examples.sprite_collect_coins_diff_levels
"""
import random
import arcade
import os
SPRITE_SCALING = 0.5
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
SCREEN_TITLE = "Sprite Collect Coins with Different Levels Example"
class FallingCoin(arcade.Sprite):
""" Simple sprite that falls down """
def update(self):
""" Move the coin """
# Fall down
self.center_y -= 2
# Did we go off the screen? If so, pop back to the top.
if self.top < 0:
self.bottom = SCREEN_HEIGHT
class RisingCoin(arcade.Sprite):
""" Simple sprite that falls up """
def update(self):
""" Move the coin """
# Move up
self.center_y += 2
# Did we go off the screen? If so, pop back to the bottom.
if self.bottom > SCREEN_HEIGHT:
self.top = 0
class MyGame(arcade.Window):
"""
Main application class.
"""
def __init__(self, width, height, title):
""" Initialize """
# Call the parent class initializer
super().__init__(width, height, title)
# Set the working directory (where we expect to find files) to the same
# directory this .py file is in. You can leave this out of your own
# code, but it is needed to easily run the examples using "python -m"
# as mentioned at the top of this program.
file_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(file_path)
# Variables that will hold sprite lists
self.player_list = None
self.coin_list = None
# Set up the player info
self.player_sprite = None
self.score = 0
self.level = 1
# Don't show the mouse cursor
self.set_mouse_visible(False)
# Set the background color
arcade.set_background_color(arcade.color.AMAZON)
def level_1(self):
for i in range(20):
# Create the coin instance
coin = arcade.Sprite(":resources:images/items/coinGold.png", SPRITE_SCALING / 3)
# Position the coin
coin.center_x = random.randrange(SCREEN_WIDTH)
coin.center_y = random.randrange(SCREEN_HEIGHT)
# Add the coin to the lists
self.coin_list.append(coin)
def level_2(self):
for i in range(30):
# Create the coin instance
coin = FallingCoin(":resources:images/items/coinBronze.png" , SPRITE_SCALING / 2)
# Position the coin
coin.center_x = random.randrange(SCREEN_WIDTH)
coin.center_y = random.randrange(SCREEN_HEIGHT, SCREEN_HEIGHT * 2)
# Add the coin to the lists
self.coin_list.append(coin)
def level_3(self):
for i in range(30):
# Create the coin instance
coin = RisingCoin(":resources:images/items/coinSilver.png" , SPRITE_SCALING / 2)
# Position the coin
coin.center_x = random.randrange(SCREEN_WIDTH)
coin.center_y = random.randrange(-SCREEN_HEIGHT, 0)
# Add the coin to the lists
self.coin_list.append(coin)
def setup(self):
""" Set up the game and initialize the variables. """
self.score = 0
self.level = 1
# Sprite lists
self.player_list = arcade.SpriteList()
self.coin_list = arcade.SpriteList()
# Set up the player
self.player_sprite = arcade.Sprite(":resources:images/animated_characters/female_person/femalePerson_idle.png",
SPRITE_SCALING)
self.player_sprite.center_x = 50
self.player_sprite.center_y = 50
self.player_list.append(self.player_sprite)
self.level_1()
def on_draw(self):
"""
Render the screen.
"""
# This command has to happen before we start drawing
arcade.start_render()
# Draw all the sprites.
self.player_sprite.draw()
self.coin_list.draw()
# Put the text on the screen.
output = f"Score: {self.score}"
arcade.draw_text(output, 10, 20, arcade.color.WHITE, 15)
output = f"Level: {self.level}"
arcade.draw_text(output, 10, 35, arcade.color.WHITE, 15)
def on_mouse_motion(self, x, y, dx, dy):
"""
Called whenever the mouse moves.
"""
self.player_sprite.center_x = x
self.player_sprite.center_y = y
def on_update(self, delta_time):
""" Movement and game logic """
# Call update on all sprites (The sprites don't do much in this
# example though.)
self.coin_list.update()
# Generate a list of all sprites that collided with the player.
hit_list = arcade.check_for_collision_with_list(self.player_sprite, self.coin_list)
# Loop through each colliding sprite, remove it, and add to the score.
for coin in hit_list:
coin.remove_from_sprite_lists()
self.score += 1
# See if we should go to level 2
if len(self.coin_list) == 0 and self.level == 1:
self.level += 1
self.level_2()
# See if we should go to level 3
elif len(self.coin_list) == 0 and self.level == 2:
self.level += 1
self.level_3()
def main():
""" Main method """
window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
window.setup()
arcade.run()
if __name__ == "__main__":
main()
| 28.142857
| 119
| 0.605986
|
135e63503762daca2c8b5e855e2ffd35ebaefde4
| 809
|
py
|
Python
|
website/cloud_site/twitter_app/urls.py
|
arshmankhalid/twitter_sentiment_analysis
|
b67dd3b1dd81b2efd67e1de2d4de289b9987dc28
|
[
"MIT"
] | null | null | null |
website/cloud_site/twitter_app/urls.py
|
arshmankhalid/twitter_sentiment_analysis
|
b67dd3b1dd81b2efd67e1de2d4de289b9987dc28
|
[
"MIT"
] | null | null | null |
website/cloud_site/twitter_app/urls.py
|
arshmankhalid/twitter_sentiment_analysis
|
b67dd3b1dd81b2efd67e1de2d4de289b9987dc28
|
[
"MIT"
] | 3
|
2020-05-25T13:38:12.000Z
|
2020-05-26T01:30:35.000Z
|
from django.conf.urls import url
from . import views
app_name = 'twitter_app'
urlpatterns = [
url(r'^$', views.HomeTemplateView.as_view(), name='home_page'),
url(r'^map/$', views.MapTemplateView.as_view(), name='map_page'),
url(r'^team/$', views.TeamTemplateView.as_view(), name='team_page'),
url(r'^day_sum_table/$', views.DaySumTableTemplateView.as_view(), name='day_sum_table'),
url(r'^hour_sum_table/$', views.HourSumTableTemplateView.as_view(), name='hour_sum_table'),
url(r'^map/$', views.MapTemplateView.as_view(), name='map'),
url(r'^hash_tag/$', views.HashTagTemplateView.as_view(), name='hash_tag'),
url(r'^region/$', views.LocationTemplateView.as_view(), name='region'),
url(r'^avg_length/$', views.LengthTemplateView.as_view(), name='avg_length'),
]
| 31.115385
| 95
| 0.690977
|
b1b3ba6737fd921cbef7489aa07d0b5c6891f332
| 2,725
|
py
|
Python
|
mtp_api/apps/credit/management/commands/send_prisoner_credit_notices.py
|
ministryofjustice/money-to-prisoners-api
|
7d86791c24f2cea19dd9ca3f69fad7693c841949
|
[
"MIT"
] | 5
|
2016-01-05T12:21:35.000Z
|
2020-10-28T17:06:02.000Z
|
mtp_api/apps/credit/management/commands/send_prisoner_credit_notices.py
|
ministryofjustice/money-to-prisoners-api
|
7d86791c24f2cea19dd9ca3f69fad7693c841949
|
[
"MIT"
] | 209
|
2015-06-12T09:39:41.000Z
|
2022-03-21T16:01:19.000Z
|
mtp_api/apps/credit/management/commands/send_prisoner_credit_notices.py
|
ministryofjustice/money-to-prisoners-api
|
7d86791c24f2cea19dd9ca3f69fad7693c841949
|
[
"MIT"
] | 1
|
2021-04-11T06:19:23.000Z
|
2021-04-11T06:19:23.000Z
|
import pathlib
import shutil
import tempfile
from django.core.management import BaseCommand, call_command
from mtp_common.tasks import send_email
from credit.management.commands.create_prisoner_credit_notices import parsed_date_or_yesterday
from prison.models import PrisonerCreditNoticeEmail
class Command(BaseCommand):
"""
Emails a PDF bundle of credit notices to prisons
"""
help = __doc__.strip().splitlines()[0]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.verbosity = 1
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument('--prison', help='NOMIS id of prison, defaults to all prisons.')
parser.add_argument('--date', help='Credited date, defaults to yesterday.')
def handle(self, prison=None, date=None, **options):
self.verbosity = options.get('verbosity', self.verbosity)
if not prison:
credit_notice_emails = PrisonerCreditNoticeEmail.objects.all()
else:
credit_notice_emails = PrisonerCreditNoticeEmail.objects.filter(prison=prison)
if not credit_notice_emails.exists():
if prison:
self.stderr.write('No email address found for %s' % prison)
else:
self.stderr.write('No known email addresses')
return
bundle_dir = pathlib.Path(tempfile.mkdtemp())
try:
for credit_notice_email in credit_notice_emails:
path = bundle_dir / ('prison-credits-%s.pdf' % credit_notice_email.prison.nomis_id)
self.handle_prison(credit_notice_email, path, date, **options)
finally:
if bundle_dir.exists():
shutil.rmtree(str(bundle_dir))
def handle_prison(self, credit_notice_email, path, date, **options):
call_command(
'create_prisoner_credit_notices',
path, credit_notice_email.prison.nomis_id,
date=date, **options
)
date_reference = parsed_date_or_yesterday(date).strftime('%Y-%m-%d')
if not path.exists():
if self.verbosity:
self.stdout.write('Nothing to send to %s' % credit_notice_email)
return
if self.verbosity:
self.stdout.write('Sending prisoner notice email to %s' % credit_notice_email)
send_email(
template_name='api-prisoner-notice-email',
to=credit_notice_email.email,
personalisation={
'attachment': path.read_bytes(),
},
reference=f'credit-notices-{date_reference}-{credit_notice_email.prison.nomis_id}',
staff_email=True,
)
| 37.328767
| 99
| 0.64
|
718a082d3ed2e87592944ecbf791edcdfbedd4c7
| 1,551
|
py
|
Python
|
backend/tasks/utils.py
|
dbauducco/DistributedReplays
|
07e6f4c2bf104e98102b092d8a1a3ce2ac7ab291
|
[
"Apache-2.0"
] | 69
|
2018-07-17T19:40:21.000Z
|
2022-02-25T14:23:53.000Z
|
backend/tasks/utils.py
|
dbauducco/DistributedReplays
|
07e6f4c2bf104e98102b092d8a1a3ce2ac7ab291
|
[
"Apache-2.0"
] | 335
|
2018-07-25T19:34:55.000Z
|
2022-02-26T06:04:32.000Z
|
backend/tasks/utils.py
|
dbauducco/DistributedReplays
|
07e6f4c2bf104e98102b092d8a1a3ce2ac7ab291
|
[
"Apache-2.0"
] | 42
|
2018-07-21T00:04:23.000Z
|
2022-02-25T14:23:42.000Z
|
from backend.database.startup import get_strict_redis
PRIORITY_SEP = '\x06\x16'
DEFAULT_PRIORITY_STEPS = [0, 3, 6, 9]
def make_queue_name_for_pri(queue, pri):
"""Make a queue name for redis
Celery uses PRIORITY_SEP to separate different priorities of tasks into
different queues in Redis. Each queue-priority combination becomes a key in
redis with names like:
- batch1\x06\x163 <-- P3 queue named batch1
There's more information about this in Github, but it doesn't look like it
will change any time soon:
- https://github.com/celery/kombu/issues/422
In that ticket the code below, from the Flower project, is referenced:
- https://github.com/mher/flower/blob/master/flower/utils/broker.py#L135
:param queue: The name of the queue to make a name for.
:param pri: The priority to make a name with.
:return: A name for the queue-priority pair.
"""
if pri not in DEFAULT_PRIORITY_STEPS:
raise ValueError('Priority not in priority steps')
return '{0}{1}{2}'.format(*((queue, PRIORITY_SEP, pri) if pri else
(queue, '', '')))
def get_queue_length(queue_name='celery'):
"""Get the number of tasks in a celery queue.
:param queue_name: The name of the queue you want to inspect.
:return: the number of items in the queue.
"""
priority_names = [make_queue_name_for_pri(queue_name, pri) for pri in
DEFAULT_PRIORITY_STEPS]
r = get_strict_redis()
return [r.llen(x) for x in priority_names]
| 34.466667
| 79
| 0.681496
|
b81df8793b206a4a3f1ef1db8d3506c983974c4f
| 3,485
|
py
|
Python
|
nussl/separation/factorization/ica.py
|
ZhaoJY1/nussl
|
57aabeabca3b2e75849e1659a522e3c2f77e9172
|
[
"MIT"
] | 259
|
2016-10-04T19:21:25.000Z
|
2020-04-15T14:39:54.000Z
|
nussl/separation/factorization/ica.py
|
titocaco/nussl
|
af7d0c50e01d107f4ef3305b89eb130d95d0a7cd
|
[
"MIT"
] | 115
|
2016-10-26T01:58:04.000Z
|
2020-04-15T07:53:00.000Z
|
nussl/separation/factorization/ica.py
|
titocaco/nussl
|
af7d0c50e01d107f4ef3305b89eb130d95d0a7cd
|
[
"MIT"
] | 64
|
2017-04-11T04:10:35.000Z
|
2020-04-08T11:23:48.000Z
|
import copy
import numpy as np
import sklearn
from .. import SeparationBase
from ... import AudioSignal
from ...core import utils
class ICA(SeparationBase):
"""
Separate sources using the Independent Component Analysis, given
observations of the audio scene. nussl's ICA is a wrapper for sci-kit learn's
implementation of FastICA, and provides a way to interop between
nussl's :ref:`AudioSignal` objects and FastICA.
References:
`sci-kit learn FastICA <http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.fastica.html>`_
Args:
audio_signals: list of AudioSignal objects containing the observations
of the mixture. Will be converted into a single multichannel AudioSignal.
max_iterations (int): Max number of iterations to run ICA for. Defaults to 200.
**kwargs: Additional keyword arguments that will be passed to
`sklearn.decomposition.FastICA`
"""
def __init__(self, audio_signals, max_iterations=200, **kwargs):
super().__init__(input_audio_signal=audio_signals)
# FastICA setup attributes
self.num_components = self.audio_signal.num_channels
self.kwargs = kwargs
self.max_iterations = max_iterations
# Results attributes
self.estimated_sources = None
self.estimated_mixing_params = None
self.mean = None
self.ica_output = None
@property
def audio_signal(self):
"""
Copy of AudioSignal that is made on initialization.
"""
return self._audio_signal
@audio_signal.setter
def audio_signal(self, audio_signals):
"""
Takes a list of audio signals and constructs a single multichannel audio signal
object.
Args:
audio_signal (list or AudioSignal): Either a multichannel audio signal,
or a list of AudioSignals containing the observations.
"""
if isinstance(audio_signals, list):
audio_signals = utils.verify_audio_signal_list_strict(audio_signals)
audio_data = np.vstack([s.audio_data for s in audio_signals])
audio_signal = audio_signals[0].make_copy_with_audio_data(audio_data)
self._audio_signal = audio_signal
elif isinstance(audio_signals, AudioSignal):
self._audio_signal = copy.deepcopy(audio_signals)
def run(self):
ica = sklearn.decomposition.FastICA(
n_components=self.num_components, max_iter=self.max_iterations,
**self.kwargs)
# save for normalizing the estimated signals
max_input_amplitude = np.max(np.abs(self.audio_signal.audio_data))
# run ICA
ica_output = ica.fit_transform(self.audio_signal.audio_data.T).T
# now normalize the estimated signals
max_output_amplitude = np.max(np.abs(ica_output))
ica_output /= max_output_amplitude
ica_output *= max_input_amplitude
# store the resultant computations
self.estimated_mixing_params = ica.mixing_
self.mean = ica.mean_
self.ica_output = ica_output
return self.ica_output
def make_audio_signals(self):
estimated_sources = [
AudioSignal(
audio_data_array=self.ica_output[i, :],
sample_rate=self.audio_signal.sample_rate)
for i in range(self.ica_output.shape[0])
]
return estimated_sources
| 34.166667
| 118
| 0.670301
|
f162f3c0ca3235cf11da6f3280fa78a64d00294d
| 98
|
py
|
Python
|
theme_kit/models/__init__.py
|
agenterpgmbh/misc-addons
|
27e36d119b1e73089a2ebfcd8d4cfc706c8f1f41
|
[
"MIT"
] | null | null | null |
theme_kit/models/__init__.py
|
agenterpgmbh/misc-addons
|
27e36d119b1e73089a2ebfcd8d4cfc706c8f1f41
|
[
"MIT"
] | 1
|
2020-05-03T04:27:29.000Z
|
2020-05-03T04:27:29.000Z
|
theme_kit/models/__init__.py
|
eneldoserrata/misc-addons
|
6f3b94d8a71d603d9ad449f96edfc66385e78080
|
[
"MIT"
] | 1
|
2022-02-04T11:27:12.000Z
|
2022-02-04T11:27:12.000Z
|
# License MIT (https://opensource.org/licenses/MIT).
from . import theme
from . import res_config
| 24.5
| 52
| 0.755102
|
f5481bedd0e79aa891329306b3c259a798e235cc
| 843
|
py
|
Python
|
user/emails.py
|
Ilmanfordinner/registration
|
d729f9a990d6f63dc29e14c1657dba87c3233971
|
[
"MIT"
] | null | null | null |
user/emails.py
|
Ilmanfordinner/registration
|
d729f9a990d6f63dc29e14c1657dba87c3233971
|
[
"MIT"
] | null | null | null |
user/emails.py
|
Ilmanfordinner/registration
|
d729f9a990d6f63dc29e14c1657dba87c3233971
|
[
"MIT"
] | null | null | null |
from app import emails
def create_verify_email(user, activate_url):
c = {
'user': user,
'activate_url': activate_url
}
return emails.render_mail('mails/verify_email',
user.email, c)
def create_password_reset_email(user, reset_url):
c = {
'user': user,
'reset_url': reset_url
}
return emails.render_mail('mails/password_reset',
user.email, c)
def create_sponsor_link_email(user, user_sponsor_url, app_sponsor_url, sponsor_name):
c = {
'user': user,
'user_sponsor_url': user_sponsor_url,
'app_sponsor_url': app_sponsor_url,
'sponsor_name': sponsor_name,
}
return emails.render_mail('mails/sponsor_link',
user.email, c)
| 27.193548
| 86
| 0.571767
|
c7cc50d2ba6c162b8d8546cd6263ae75f3c991f2
| 23,246
|
py
|
Python
|
Lib/site-packages/tensorflow_estimator/python/estimator/keras.py
|
caiyongji/py36-tf2.0rc
|
c5b4b364ba14214534228570e58ef96b1a8bb6dc
|
[
"CNRI-Python-GPL-Compatible"
] | 2
|
2019-08-04T20:28:14.000Z
|
2019-10-27T23:26:42.000Z
|
Lib/site-packages/tensorflow_estimator/python/estimator/keras.py
|
caiyongji/py36-tf2.0rc
|
c5b4b364ba14214534228570e58ef96b1a8bb6dc
|
[
"CNRI-Python-GPL-Compatible"
] | null | null | null |
Lib/site-packages/tensorflow_estimator/python/estimator/keras.py
|
caiyongji/py36-tf2.0rc
|
c5b4b364ba14214534228570e58ef96b1a8bb6dc
|
[
"CNRI-Python-GPL-Compatible"
] | 1
|
2020-11-04T03:16:29.000Z
|
2020-11-04T03:16:29.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Home of estimator related functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
from tensorflow.python.client import session
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import models
from tensorflow.python.keras.engine import training_utils
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import training_util
from tensorflow.python.training.tracking import graph_view
from tensorflow.python.training.tracking import util as trackable_util
from tensorflow_estimator.python.estimator import estimator as estimator_lib
from tensorflow_estimator.python.estimator import model_fn as model_fn_lib
from tensorflow_estimator.python.estimator.export import export_lib
from tensorflow_estimator.python.estimator.mode_keys import ModeKeys
_DEFAULT_SERVING_KEY = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
def _cast_tensor_to_floatx(x):
"""Cast tensor to keras's floatx dtype if it is not already the same dtype."""
if x.dtype == K.floatx():
return x
else:
return math_ops.cast(x, K.floatx())
def _convert_tensor(x):
"""Create or cast tensor if needed."""
if not tensor_util.is_tensor(x):
# x is a numpy array
x = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(x)
return x
def _any_weight_initialized(keras_model):
"""Check if any weights has been initialized in the Keras model.
Args:
keras_model: An instance of compiled keras model.
Returns:
boolean, True if at least one weight has been initialized, else False.
Currently keras initialize all weights at get_session().
"""
if keras_model is None:
return False
if ops.executing_eagerly_outside_functions():
return True
for layer in keras_model.layers:
for weight in layer.weights:
if hasattr(weight, '_keras_initialized'):
return True
return False
def _convert_estimator_io_to_keras(keras_model, features, labels):
"""Converts estimator features and labels to keras input and target tensors.
Args:
keras_model: a compiled `tf.keras.Model` instance, used to determine the
order of the returned lists.
features: Dict of tensors or `None`.
labels: Dict of tensors, a single tensor, or `None`.
Returns:
Tuple of (
list of input tensors or `None`,
list of target tensors or `None`,
list of sample weight tensors or `None`)
The order of tensors is determined by the order set in the keras model.
"""
def _to_ordered_tensor_list(obj, key_order, obj_name, order_name):
"""Convert obj to an ordered list of tensors.
Args:
obj: List, dict, or single tensor. May be `None`.
key_order: List of strings with the order to return (used if obj is a
dict).
obj_name: String name of object (e.g. "features" or "labels")
order_name: String name of the key order (e.g. "inputs" or "outputs")
Returns:
List of tensors, or `None`
Raises:
KeyError: If obj has invalid keys.
"""
if obj is None:
return None
elif isinstance(obj, (list, tuple)):
return [_convert_tensor(x) for x in obj]
elif isinstance(obj, dict):
# Ensure that the obj keys and keys in key_order are exactly the same.
different_keys = set(obj.keys()) ^ set(key_order)
if different_keys:
raise KeyError(
'The dictionary passed into {obj_name} does not have the expected '
'{order_name} keys defined in the keras model.'
'\n\tExpected keys: {order_keys}'
'\n\t{obj_name} keys: {obj_keys}'
'\n\tDifference: {different_keys}'.format(
order_name=order_name, order_keys=set(key_order),
obj_name=obj_name, obj_keys=set(obj.keys()),
different_keys=different_keys))
return [_convert_tensor(obj[key]) for key in key_order]
else: # Assume obj is a tensor.
return [_convert_tensor(obj)]
features, sample_weight_tensors = _extract_sample_weight_tensors(features)
input_names = None
output_names = None
if isinstance(features, dict):
input_names = (
keras_model.input_names if keras_model._is_graph_network else
['input_%d' % i for i in range(1, len(features) + 1)])
if isinstance(labels, dict):
output_names = (
keras_model.output_names if keras_model._is_graph_network else
['output_%d' % i for i in range(1, len(labels) + 1)])
input_tensors = _to_ordered_tensor_list(
features, input_names, 'features', 'inputs')
target_tensors = _to_ordered_tensor_list(
labels, output_names, 'labels', 'outputs')
return input_tensors, target_tensors, sample_weight_tensors
def _extract_sample_weight_tensors(features):
if isinstance(features, dict) and set(features.keys()) == {
'features', 'sample_weights'}:
feature_tensor = features['features']
sample_weight_tensors = features['sample_weights']
else:
feature_tensor = features
sample_weight_tensors = None
return feature_tensor, sample_weight_tensors
def _clone_and_build_model(mode,
keras_model,
custom_objects,
features=None,
labels=None,
optimizer_config=None):
"""Clone and build the given keras_model.
Args:
mode: training mode.
keras_model: an instance of compiled keras model.
custom_objects: Dictionary for custom objects.
features: Dict of tensors.
labels: Dict of tensors, or single tensor instance.
optimizer_config: Optimizer config dictionary, returned by
`optimizer.get_config()`. This is used when cloning a model with
an optimizer. Since `_clone_and_build_model` is called in a different
graph and session from the model, `optimizer.get_config()` may raise an
error during the attempt to serialize the optimizer hyperparameter values.
Returns:
The newly built model.
"""
# Set to True during training, False for inference or testing.
K.set_learning_phase(mode == ModeKeys.TRAIN)
input_tensors, target_tensors, sample_weight_tensors = (
_convert_estimator_io_to_keras(keras_model, features, labels))
compile_clone = (mode != ModeKeys.PREDICT)
global_step = None
if compile_clone:
# Set iterations to the global step created by tf.train.create_global_step()
# which is automatically run in the estimator framework.
global_step = training_util.get_or_create_global_step()
K.track_variable(global_step)
clone = models.clone_and_build_model(
keras_model, input_tensors, target_tensors, custom_objects,
compile_clone=compile_clone,
in_place_reset=(not keras_model._is_graph_network),
optimizer_iterations=global_step,
optimizer_config=optimizer_config)
if sample_weight_tensors is not None:
sample_weight_tensors = training_utils.standardize_sample_weights(
sample_weight_tensors, clone.output_names)
# Update calculated loss (model.total_loss) to include sample weights.
clone._compile_weights_loss_and_weighted_metrics(sample_weight_tensors)
return clone
def _convert_keras_metrics_to_estimator(model):
"""Convert metrics from a Keras model to ops used by the Estimator framework.
Args:
model: A `tf.keras.Model` object.
Returns:
Dictionary mapping metric names to tuples of (value, update) ops. May return
`None` if the model does not contain any metrics.
"""
if not getattr(model, '_compile_metrics', None):
return None
# We are not using model.metrics here because we want to exclude the metrics
# added using `add_metric` API.
# TODO(psv): Remove this condition here after
# _compile_stateful_metric_functions becomes obsolete with the Estimator repo.
if hasattr(model, '_compile_stateful_metric_functions'):
return {m.name: m for m in model._compile_stateful_metric_functions}
return {m.name: m for m in model._compile_metric_functions}
def _create_keras_model_fn(keras_model, custom_objects=None,
save_object_ckpt=False):
"""Creates model_fn for keras Estimator.
Args:
keras_model: an instance of compiled keras model.
custom_objects: Dictionary for custom objects.
save_object_ckpt: Whether to save an object-based checkpoint.
Returns:
The model_fn for a keras Estimator.
"""
# Get optimizer config in the current context (since model_fn is called in the
# estimator graph and session). OptimizerV2 objects serialize variable/tensor
# hyperparameters in their configs, resulting to wrong-session errors during
# model cloning.
try:
optimizer_config = keras_model.optimizer.get_config()
except (NotImplementedError, AttributeError):
# TFOptimizers and other custom optimizers do not have a config.
optimizer_config = None
def model_fn(features, labels, mode):
"""model_fn for keras Estimator."""
model = _clone_and_build_model(
mode=mode,
keras_model=keras_model,
custom_objects=custom_objects,
features=features,
labels=labels,
optimizer_config=optimizer_config)
model_output_names = []
# We need to make sure that the output names of the last layer in the model
# is the same for each of the cloned models. This is required for mirrored
# strategy when we call regroup.
if distribution_strategy_context.has_strategy():
for name in model.output_names:
name = re.compile(r'_\d$').sub('', name)
model_output_names.append(name)
else:
model_output_names = model.output_names
# Get inputs to EstimatorSpec
predictions = dict(zip(model_output_names, model.outputs))
loss = None
train_op = None
eval_metric_ops = None
# Set loss and metric only during train and evaluate.
if mode is not ModeKeys.PREDICT:
if mode is ModeKeys.TRAIN:
model._make_train_function() # pylint: disable=protected-access
else:
model._make_test_function() # pylint: disable=protected-access
loss = model.total_loss
eval_metric_ops = _convert_keras_metrics_to_estimator(model)
# Set train_op only during train.
if mode is ModeKeys.TRAIN:
train_op = model.train_function.updates_op
if not model._is_graph_network:
# Reset model state to original state,
# to avoid `model_fn` being destructive for the initial model argument.
models.in_place_subclassed_model_state_restoration(keras_model)
scaffold = None
if save_object_ckpt:
model._track_trackable(training_util.get_global_step(),
'estimator_global_step')
# Create saver that maps variable names to object-checkpoint keys.
object_graph = graph_view.ObjectGraphView(model)
var_list = object_graph.frozen_saveable_objects()
saver = saver_lib.Saver(var_list=var_list, sharded=True)
saver._object_restore_saver = trackable_util.frozen_saver(model)
scaffold = monitored_session.Scaffold(saver=saver)
return model_fn_lib.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops,
export_outputs={
_DEFAULT_SERVING_KEY:
export_lib.PredictOutput(predictions)
},
scaffold=scaffold
)
return model_fn
def _save_first_checkpoint(keras_model, custom_objects, config,
save_object_ckpt):
"""Save first checkpoint for the keras Estimator.
Args:
keras_model: an instance of compiled keras model.
custom_objects: Dictionary for custom objects.
config: Estimator config.
save_object_ckpt: Whether to save an object-based checkpoint.
Returns:
The path where keras model checkpoint is saved.
"""
# save checkpoint into subdirectory to allow warm start
keras_model_dir = os.path.join(config.model_dir, 'keras')
# Load weights and save to checkpoint if there is no checkpoint
latest_path = checkpoint_management.latest_checkpoint(keras_model_dir)
if not latest_path:
keras_weights = None
if _any_weight_initialized(keras_model):
keras_weights = keras_model.get_weights()
if not gfile.IsDirectory(keras_model_dir):
gfile.MakeDirs(keras_model_dir)
with ops.Graph().as_default():
random_seed.set_random_seed(config.tf_random_seed)
training_util.create_global_step()
model = _clone_and_build_model(ModeKeys.TRAIN, keras_model,
custom_objects)
# Init the train_function outside of the context of session. This is due
# to the fact that train function will update the graph by adding backprop
# parts. This will potentially trying to update the node in forward graph
# which will fail if it is done within same session.
# Always create the train_function here since the model is just cloned.
# See https://github.com/tensorflow/tensorflow/issues/27750 for details.
model._make_train_function() # pylint: disable=protected-access
# save to checkpoint
with session.Session(config=config.session_config) as sess:
if keras_weights:
model.set_weights(keras_weights)
# model._make_train_function() will potentially create the optimizer
# variable, which will require another variable initialization.
K._initialize_variables(sess) # pylint: disable=protected-access
if save_object_ckpt:
model._track_trackable( # pylint: disable=protected-access
training_util.get_global_step(), 'estimator_global_step')
latest_path = os.path.join(keras_model_dir, 'keras_model.ckpt')
model.save_weights(latest_path)
else:
saver = saver_lib.Saver()
latest_path = os.path.join(keras_model_dir, 'keras_model.ckpt')
saver.save(sess, latest_path)
return latest_path
def _get_file_from_google_storage(keras_model_path, model_dir):
"""Get file from google storage and download to local file.
Args:
keras_model_path: a google storage path for compiled keras model.
model_dir: the directory from estimator config.
Returns:
The path where keras model is saved.
Raises:
ValueError: if storage object name does not end with .h5.
"""
try:
from google.cloud import storage # pylint:disable=g-import-not-at-top
except ImportError:
raise TypeError('Could not save model to Google cloud storage; please '
'install `google-cloud-storage` via '
'`pip install google-cloud-storage`.')
storage_client = storage.Client()
path, blob_name = os.path.split(keras_model_path)
_, bucket_name = os.path.split(path)
keras_model_dir = os.path.join(model_dir, 'keras')
if not gfile.Exists(keras_model_dir):
gfile.MakeDirs(keras_model_dir)
file_name = os.path.join(keras_model_dir, 'keras_model.h5')
try:
blob = storage_client.get_bucket(bucket_name).blob(blob_name)
blob.download_to_filename(file_name)
except:
raise ValueError('Failed to download keras model, please check '
'environment variable GOOGLE_APPLICATION_CREDENTIALS '
'and model path storage.googleapis.com/{bucket}/{object}.')
logging.info('Saving model to {}'.format(file_name))
del storage_client
return file_name
# LINT.IfChange
def model_to_estimator(keras_model=None,
keras_model_path=None,
custom_objects=None,
model_dir=None,
config=None,
checkpoint_format=None,
use_v2_estimator=False):
# LINT.ThenChange(//tensorflow/python/keras/estimator/__init__.py)
"""Constructs an `Estimator` instance from given keras model.
For usage example, please see:
[Creating estimators from Keras
Models](https://tensorflow.org/guide/estimators#model_to_estimator).
__Sample Weights__
Estimators returned by `model_to_estimator` are configured to handle sample
weights (similar to `keras_model.fit(x, y, sample_weights)`). To pass sample
weights when training or evaluating the Estimator, the first item returned by
the input function should be a dictionary with keys `features` and
`sample_weights`. Example below:
```
keras_model = tf.keras.Model(...)
keras_model.compile(...)
estimator = tf.keras.estimator.model_to_estimator(keras_model)
def input_fn():
return dataset_ops.Dataset.from_tensors(
({'features': features, 'sample_weights': sample_weights},
targets))
estimator.train(input_fn, steps=1)
```
Args:
keras_model: A compiled Keras model object. This argument is mutually
exclusive with `keras_model_path`.
keras_model_path: Path to a compiled Keras model saved on disk, in HDF5
format, which can be generated with the `save()` method of a Keras model.
This argument is mutually exclusive with `keras_model`.
custom_objects: Dictionary for custom objects.
model_dir: Directory to save `Estimator` model parameters, graph, summary
files for TensorBoard, etc.
config: `RunConfig` to config `Estimator`.
checkpoint_format: Sets the format of the checkpoint saved by the estimator
when training. May be `saver` or `checkpoint`, depending on whether to
save checkpoints from `tf.compat.v1.train.Saver` or `tf.train.Checkpoint`.
The default is `checkpoint`. Estimators use name-based `tf.train.Saver`
checkpoints, while Keras models use object-based checkpoints from
`tf.train.Checkpoint`. Currently, saving object-based checkpoints from
`model_to_estimator` is only supported by Functional and Sequential
models.
use_v2_estimator: Whether to convert the model to a V2 Estimator or V1
Estimator.
Returns:
An Estimator from given keras model.
Raises:
ValueError: if neither keras_model nor keras_model_path was given.
ValueError: if both keras_model and keras_model_path was given.
ValueError: if the keras_model_path is a GCS URI.
ValueError: if keras_model has not been compiled.
ValueError: if an invalid checkpoint_format was given.
"""
if not (keras_model or keras_model_path):
raise ValueError(
'Either `keras_model` or `keras_model_path` needs to be provided.')
if keras_model and keras_model_path:
raise ValueError(
'Please specity either `keras_model` or `keras_model_path`, '
'but not both.')
config = estimator_lib.maybe_overwrite_model_dir_and_session_config(
config, model_dir)
if not keras_model:
if keras_model_path.startswith(
'gs://') or 'storage.googleapis.com' in keras_model_path:
keras_model_path = _get_file_from_google_storage(keras_model_path,
config.model_dir)
logging.info('Loading models from %s', keras_model_path)
keras_model = models.load_model(keras_model_path)
else:
logging.info('Using the Keras model provided.')
keras_model = keras_model
if checkpoint_format is None or checkpoint_format == 'checkpoint':
if not (keras_model._is_graph_network or
isinstance(keras_model, models.Sequential)):
raise ValueError('Object-based checkpoints are currently not supported '
'with subclassed models.')
save_object_ckpt = True
elif checkpoint_format == 'saver':
save_object_ckpt = False
else:
raise ValueError(
'Checkpoint format must be one of "checkpoint" or "saver". Got {}'
.format(checkpoint_format))
if not hasattr(keras_model, 'optimizer') or not keras_model.optimizer:
raise ValueError(
'The given keras model has not been compiled yet. '
'Please compile the model with `model.compile()` '
'before calling `model_to_estimator()`.')
keras_model_fn = _create_keras_model_fn(keras_model, custom_objects,
save_object_ckpt)
if _any_weight_initialized(keras_model):
# Warn if config passed to estimator tries to update GPUOptions. If a
# session has already been created, the GPUOptions passed to the first
# session sticks.
if config.session_config.HasField('gpu_options'):
logging.warning(
'The Keras backend session has already been set. '
'The _session_config passed to model_to_estimator will not be used.')
else:
# Pass the config into keras backend's default session.
sess = session.Session(config=config.session_config)
K.set_session(sess)
warm_start_path = None
if keras_model._is_graph_network:
warm_start_path = _save_first_checkpoint(keras_model, custom_objects,
config, save_object_ckpt)
elif keras_model.built:
logging.warning('You are creating an Estimator from a Keras model manually '
'subclassed from `Model`, that was already called on some '
'inputs (and thus already had weights). We are currently '
'unable to preserve the model\'s state (its weights) as '
'part of the estimator in this case. Be warned that the '
'estimator has been created using a freshly initialized '
'version of your model.\n'
'Note that this doesn\'t affect the state of the model '
'instance you passed as `keras_model` argument.')
if use_v2_estimator:
estimator_cls = estimator_lib.EstimatorV2
else:
estimator_cls = estimator_lib.Estimator
estimator = estimator_cls(
keras_model_fn, config=config, warm_start_from=warm_start_path)
return estimator
| 39.601363
| 80
| 0.711004
|
4f9a96e90e4645ad96c1b598be4e64678c847ff0
| 3,577
|
py
|
Python
|
chb/x86/opcodes/X86PushRegisters.py
|
kestreltechnology/CodeHawk-Binary
|
aa0b2534e0318e5fb3770ec7b4d78feb0feb2394
|
[
"MIT"
] | null | null | null |
chb/x86/opcodes/X86PushRegisters.py
|
kestreltechnology/CodeHawk-Binary
|
aa0b2534e0318e5fb3770ec7b4d78feb0feb2394
|
[
"MIT"
] | null | null | null |
chb/x86/opcodes/X86PushRegisters.py
|
kestreltechnology/CodeHawk-Binary
|
aa0b2534e0318e5fb3770ec7b4d78feb0feb2394
|
[
"MIT"
] | null | null | null |
# ------------------------------------------------------------------------------
# CodeHawk Binary Analyzer
# Author: Henny Sipma
# ------------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2016-2020 Kestrel Technology LLC
# Copyright (c) 2020 Henny Sipma
# Copyright (c) 2021 Aarno Labs LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ------------------------------------------------------------------------------
from typing import List, TYPE_CHECKING
from chb.app.InstrXData import InstrXData
import chb.simulation.SimUtil as SU
import chb.simulation.SimValue as SV
import chb.util.fileutil as UF
from chb.util.IndexedTable import IndexedTableValue
from chb.x86.X86DictionaryRecord import x86registry
from chb.x86.X86Opcode import X86Opcode
if TYPE_CHECKING:
from chb.x86.X86Dictionary import X86Dictionary
from chb.x86.simulation.X86SimulationState import X86SimulationState
@x86registry.register_tag("pusha", X86Opcode)
class X86PushRegisters(X86Opcode):
"""PUSHA. """
def __init__(
self,
x86d: "X86Dictionary",
ixval: IndexedTableValue) -> None:
X86Opcode.__init__(self, x86d, ixval)
def get_annotation(self, xdata: InstrXData) -> str:
return 'push eax,ecx,edx,ebx,esp,ebp,esi,edi'
# --------------------------------------------------------------------------
# Pushes the contents of the general-purpose registers onto the stack. The
# registers are stored on the stack in the following order: EAX, ECX, EDX,
# EBX, ESP (original value), EBP, ESI, and EDI (if the current operand-size
# attribute is 32) and AX, CX, DX, BX, SP (original value), BP, SI, and DI
# (if the operand-size attribute is 16).
#
# Flags affected: None
# --------------------------------------------------------------------------
def simulate(self, iaddr: str, simstate: "X86SimulationState") -> None:
espval = simstate.get_regval(iaddr, 'esp')
simstate.push_value(iaddr, simstate.get_regval(iaddr, 'eax'))
simstate.push_value(iaddr, simstate.get_regval(iaddr, 'ecx'))
simstate.push_value(iaddr, simstate.get_regval(iaddr, 'edx'))
simstate.push_value(iaddr, simstate.get_regval(iaddr, 'ebx'))
simstate.push_value(iaddr, espval)
simstate.push_value(iaddr, simstate.get_regval(iaddr, 'ebp'))
simstate.push_value(iaddr, simstate.get_regval(iaddr, 'esi'))
simstate.push_value(iaddr, simstate.get_regval(iaddr, 'edi'))
| 44.160494
| 80
| 0.649147
|
a4a661d0b8ca0871df53e909b8f2f1490ebd600d
| 5,487
|
py
|
Python
|
khan/pipeline/tests/test_images.py
|
zachariahmilby/keck-hires-aurora-pipeline
|
51aae65cc6be396b0ef3de12f76220576248fea9
|
[
"MIT"
] | null | null | null |
khan/pipeline/tests/test_images.py
|
zachariahmilby/keck-hires-aurora-pipeline
|
51aae65cc6be396b0ef3de12f76220576248fea9
|
[
"MIT"
] | null | null | null |
khan/pipeline/tests/test_images.py
|
zachariahmilby/keck-hires-aurora-pipeline
|
51aae65cc6be396b0ef3de12f76220576248fea9
|
[
"MIT"
] | null | null | null |
import pytest
from astropy.io import fits
import numpy as np
from numpy.testing import assert_array_equal
from khan.pipeline.images import parse_mosaic_detector_slice, \
determine_detector_layout, get_mosaic_detector_corner_coordinates, \
reformat_observers, CCDImage
np.random.seed(1701)
class TestParseMosaicDetectorSlice:
@pytest.fixture
def good_slice_string(self):
yield '[5:683,1:4096]'
def test_correct_slice_returns_correct_slice(self, good_slice_string):
assert parse_mosaic_detector_slice(good_slice_string) == \
(slice(4, 683, 1), slice(0, 4096, 1))
class TestDetermineDetectorLayout:
@pytest.fixture
def legacy_hdul(self):
yield fits.HDUList([fits.PrimaryHDU()])
@pytest.fixture
def mosaic_hdul(self):
yield fits.HDUList([fits.PrimaryHDU(), fits.ImageHDU(),
fits.ImageHDU(), fits.ImageHDU()])
@pytest.fixture
def one_detector_mosaic_hdul(self):
yield fits.HDUList([fits.PrimaryHDU(), fits.ImageHDU()])
@pytest.fixture
def two_detector_mosaic_hdul(self):
yield fits.HDUList([fits.PrimaryHDU(), fits.ImageHDU(),
fits.ImageHDU()])
@pytest.fixture
def four_detector_mosaic_hdul(self):
yield fits.HDUList([fits.PrimaryHDU(), fits.ImageHDU(),
fits.ImageHDU(), fits.ImageHDU(), fits.ImageHDU()])
def test_if_legacy_identified(self, legacy_hdul):
assert determine_detector_layout(legacy_hdul) == 'legacy'
def test_if_mosaic_identified(self, mosaic_hdul):
assert determine_detector_layout(mosaic_hdul) == 'mosaic'
def test_if_one_detector_mosaic_raises_exception(self,
one_detector_mosaic_hdul):
with pytest.raises(Exception):
determine_detector_layout(one_detector_mosaic_hdul)
def test_if_two_detector_mosaic_raises_exception(self,
two_detector_mosaic_hdul):
with pytest.raises(Exception):
determine_detector_layout(two_detector_mosaic_hdul)
def test_if_four_detector_mosaic_raises_exception(
self, four_detector_mosaic_hdul):
with pytest.raises(Exception):
determine_detector_layout(four_detector_mosaic_hdul)
class TestGetMosaicDetectorCornerCoordinates:
@pytest.fixture
def image_header_whole_pixel(self):
yield fits.Header({'CRVAL1G': 4141.0, 'CRVAL2G': 1.0})
@pytest.fixture
def image_header_half_pixel(self):
yield fits.Header({'CRVAL1G': 4141.5, 'CRVAL2G': 0.5})
def test_correct_position_with_2x_spatial_binning(
self, image_header_whole_pixel):
assert assert_array_equal(
get_mosaic_detector_corner_coordinates(
image_header_whole_pixel, binning=np.array([2, 1])),
np.array([1047, 0])) is None
def test_correct_position_with_2x_spatial_binning_and_decimal_coordinate(
self, image_header_half_pixel):
assert assert_array_equal(
get_mosaic_detector_corner_coordinates(
image_header_half_pixel, binning=np.array([2, 1])),
np.array([1047, 0])) is None
def test_correct_position_with_3x_spatial_binning(
self, image_header_whole_pixel):
assert assert_array_equal(
get_mosaic_detector_corner_coordinates(
image_header_whole_pixel, binning=np.array([3, 1])),
np.array([698, 0])) is None
def test_correct_position_with_3x_spatial_binning_and_decimal_coordinate(
self, image_header_half_pixel):
assert assert_array_equal(
get_mosaic_detector_corner_coordinates(
image_header_half_pixel, binning=np.array([3, 1])),
np.array([698, 0])) is None
class TestReformatObservers:
@pytest.fixture
def observers_with_commas_no_spaces(self):
yield 'de Kleer,Milby,Camarca,Schmidt,Brown'
@pytest.fixture
def observers_with_commas_and_spaces(self):
yield 'de Kleer, Milby, Camarca, Schmidt, Brown'
def test_observers_with_commas_no_spaces_returns_with_spaces(
self, observers_with_commas_no_spaces):
assert reformat_observers(observers_with_commas_no_spaces) == \
'de Kleer, Milby, Camarca, Schmidt, Brown'
def test_observers_with_commas_and_spaces_returns_unchanged(
self, observers_with_commas_and_spaces):
assert reformat_observers(observers_with_commas_and_spaces) == \
observers_with_commas_and_spaces
class TestCCDImage:
@pytest.fixture
def sample_data(self):
yield np.random.rand(2, 3)
@pytest.fixture
def sample_anc(self):
yield {'test': 0.0}
def test_if_data_type_is_array(self, sample_data, sample_anc):
assert type(CCDImage(sample_data, sample_anc).data) is np.ndarray
def test_if_data_matches_expected_array(self, sample_data, sample_anc):
assert assert_array_equal(CCDImage(sample_data, sample_anc).data,
sample_data) is None
def test_if_ancillary_type_is_dict(self, sample_data, sample_anc):
assert type(CCDImage(sample_data, sample_anc).anc) is dict
def test_if_ancillary_information_accessible(
self, sample_data, sample_anc):
assert CCDImage(sample_data, sample_anc).anc['test'] == 0.0
| 35.62987
| 79
| 0.684892
|
80ee6d47654abf9c9d5b373f0491dade87360bda
| 430
|
py
|
Python
|
Corona World Map/Goodle Maps.py
|
CoderAryanAnand/fun_projects
|
6891320a373bad539b6e7926ca868d56c560bc20
|
[
"MIT"
] | null | null | null |
Corona World Map/Goodle Maps.py
|
CoderAryanAnand/fun_projects
|
6891320a373bad539b6e7926ca868d56c560bc20
|
[
"MIT"
] | null | null | null |
Corona World Map/Goodle Maps.py
|
CoderAryanAnand/fun_projects
|
6891320a373bad539b6e7926ca868d56c560bc20
|
[
"MIT"
] | null | null | null |
import pandas as pd
import gmaps
from IPython.display import display
data = pd.read_csv("data.csv")
df = pd.DataFrame(data)
df.head()
print(df.shape)
mylist = ["5/27/20"]
df2 = df[df[mylist].ne(0).all(1)]
print(df2)
gmaps.configure(api_key="")
locations = df2[['Lat', 'Long']]
weights = df2["5/27/20"]
fig = gmaps.figure()
fig.add_layer(gmaps.heatmap_layer(locations, weights=weights, max_intensity=100, point_radius=20.0))
fig
| 22.631579
| 100
| 0.718605
|
56fb58c8e59e3e99af1cb33271fb1afe2510ec1a
| 17,309
|
py
|
Python
|
redash/tasks/queries.py
|
pkuyangchao/redash
|
1640b1e927a4d10ce9ae5c24b2d015734c696b08
|
[
"BSD-2-Clause"
] | null | null | null |
redash/tasks/queries.py
|
pkuyangchao/redash
|
1640b1e927a4d10ce9ae5c24b2d015734c696b08
|
[
"BSD-2-Clause"
] | 4
|
2020-06-18T15:31:02.000Z
|
2021-03-25T23:31:41.000Z
|
redash/tasks/queries.py
|
pkuyangchao/redash
|
1640b1e927a4d10ce9ae5c24b2d015734c696b08
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import signal
import time
import redis
from celery.exceptions import SoftTimeLimitExceeded, TimeLimitExceeded
from celery.result import AsyncResult
from celery.utils.log import get_task_logger
from six import text_type
from redash import models, redis_connection, settings, statsd_client
from redash.models.parameterized_query import InvalidParameterError, QueryDetachedFromDataSourceError
from redash.query_runner import InterruptException
from redash.tasks.alerts import check_alerts_for_query
from redash.tasks.failure_report import notify_of_failure
from redash.utils import gen_query_hash, json_dumps, utcnow, mustache_render
from redash.worker import celery
logger = get_task_logger(__name__)
TIMEOUT_MESSAGE = "Query exceeded Redash query execution time limit."
def _job_lock_id(query_hash, data_source_id):
return "query_hash_job:%s:%s" % (data_source_id, query_hash)
def _unlock(query_hash, data_source_id):
redis_connection.delete(_job_lock_id(query_hash, data_source_id))
class QueryTask(object):
# TODO: this is mapping to the old Job class statuses. Need to update the client side and remove this
STATUSES = {
'PENDING': 1,
'STARTED': 2,
'SUCCESS': 3,
'FAILURE': 4,
'REVOKED': 4
}
def __init__(self, job_id=None, async_result=None):
if async_result:
self._async_result = async_result
else:
self._async_result = AsyncResult(job_id, app=celery)
@property
def id(self):
return self._async_result.id
def to_dict(self):
task_info = self._async_result._get_task_meta()
result, task_status = task_info['result'], task_info['status']
if task_status == 'STARTED':
updated_at = result.get('start_time', 0)
else:
updated_at = 0
status = self.STATUSES[task_status]
if isinstance(result, (TimeLimitExceeded, SoftTimeLimitExceeded)):
error = TIMEOUT_MESSAGE
status = 4
elif isinstance(result, Exception):
error = result.message
status = 4
elif task_status == 'REVOKED':
error = 'Query execution cancelled.'
else:
error = ''
if task_status == 'SUCCESS' and not error:
query_result_id = result
else:
query_result_id = None
return {
'id': self._async_result.id,
'updated_at': updated_at,
'status': status,
'error': error,
'query_result_id': query_result_id,
}
@property
def is_cancelled(self):
return self._async_result.status == 'REVOKED'
@property
def celery_status(self):
return self._async_result.status
def ready(self):
return self._async_result.ready()
def cancel(self):
return self._async_result.revoke(terminate=True, signal='SIGINT')
def enqueue_query(query, data_source, user_id, is_api_key=False, scheduled_query=None, metadata={}):
query_hash = gen_query_hash(query)
logging.info("Inserting job for %s with metadata=%s", query_hash, metadata)
try_count = 0
job = None
while try_count < 5:
try_count += 1
pipe = redis_connection.pipeline()
try:
pipe.watch(_job_lock_id(query_hash, data_source.id))
job_id = pipe.get(_job_lock_id(query_hash, data_source.id))
if job_id:
logging.info("[%s] Found existing job: %s", query_hash, job_id)
job = QueryTask(job_id=job_id)
if job.ready():
logging.info("[%s] job found is ready (%s), removing lock", query_hash, job.celery_status)
redis_connection.delete(_job_lock_id(query_hash, data_source.id))
job = None
if not job:
pipe.multi()
if scheduled_query:
queue_name = data_source.scheduled_queue_name
scheduled_query_id = scheduled_query.id
else:
queue_name = data_source.queue_name
scheduled_query_id = None
args = (query, data_source.id, metadata, user_id, scheduled_query_id, is_api_key)
argsrepr = json_dumps({
'org_id': data_source.org_id,
'data_source_id': data_source.id,
'enqueue_time': time.time(),
'scheduled': scheduled_query_id is not None,
'query_id': metadata.get('Query ID'),
'user_id': user_id
})
time_limit = settings.dynamic_settings.query_time_limit(scheduled_query, user_id, data_source.org_id)
result = execute_query.apply_async(args=args,
argsrepr=argsrepr,
queue=queue_name,
soft_time_limit=time_limit)
job = QueryTask(async_result=result)
logging.info("[%s] Created new job: %s", query_hash, job.id)
pipe.set(_job_lock_id(query_hash, data_source.id), job.id, settings.JOB_EXPIRY_TIME)
pipe.execute()
break
except redis.WatchError:
continue
if not job:
logging.error("[Manager][%s] Failed adding job for query.", query_hash)
return job
@celery.task(name="redash.tasks.empty_schedules")
def empty_schedules():
logger.info("Deleting schedules of past scheduled queries...")
queries = models.Query.past_scheduled_queries()
for query in queries:
query.schedule = None
models.db.session.commit()
logger.info("Deleted %d schedules.", len(queries))
@celery.task(name="redash.tasks.refresh_queries")
def refresh_queries():
logger.info("Refreshing queries...")
outdated_queries_count = 0
query_ids = []
with statsd_client.timer('manager.outdated_queries_lookup'):
for query in models.Query.outdated_queries():
if settings.FEATURE_DISABLE_REFRESH_QUERIES:
logging.info("Disabled refresh queries.")
elif query.org.is_disabled:
logging.debug("Skipping refresh of %s because org is disabled.", query.id)
elif query.data_source is None:
logging.debug("Skipping refresh of %s because the datasource is none.", query.id)
elif query.data_source.paused:
logging.debug("Skipping refresh of %s because datasource - %s is paused (%s).",
query.id, query.data_source.name, query.data_source.pause_reason)
else:
query_text = query.query_text
parameters = {p['name']: p.get('value') for p in query.parameters}
if any(parameters):
try:
query_text = query.parameterized.apply(parameters).query
except InvalidParameterError as e:
error = u"Skipping refresh of {} because of invalid parameters: {}".format(query.id, e.message)
track_failure(query, error)
continue
except QueryDetachedFromDataSourceError as e:
error = ("Skipping refresh of {} because a related dropdown "
"query ({}) is unattached to any datasource.").format(query.id, e.query_id)
track_failure(query, error)
continue
enqueue_query(query_text, query.data_source, query.user_id,
scheduled_query=query,
metadata={'Query ID': query.id, 'Username': 'Scheduled'})
query_ids.append(query.id)
outdated_queries_count += 1
statsd_client.gauge('manager.outdated_queries', outdated_queries_count)
logger.info("Done refreshing queries. Found %d outdated queries: %s" % (outdated_queries_count, query_ids))
status = redis_connection.hgetall('redash:status')
now = time.time()
redis_connection.hmset('redash:status', {
'outdated_queries_count': outdated_queries_count,
'last_refresh_at': now,
'query_ids': json_dumps(query_ids)
})
statsd_client.gauge('manager.seconds_since_refresh', now - float(status.get('last_refresh_at', now)))
@celery.task(name="redash.tasks.cleanup_query_results")
def cleanup_query_results():
"""
Job to cleanup unused query results -- such that no query links to them anymore, and older than
settings.QUERY_RESULTS_MAX_AGE (a week by default, so it's less likely to be open in someone's browser and be used).
Each time the job deletes only settings.QUERY_RESULTS_CLEANUP_COUNT (100 by default) query results so it won't choke
the database in case of many such results.
"""
logging.info("Running query results clean up (removing maximum of %d unused results, that are %d days old or more)",
settings.QUERY_RESULTS_CLEANUP_COUNT, settings.QUERY_RESULTS_CLEANUP_MAX_AGE)
unused_query_results = models.QueryResult.unused(settings.QUERY_RESULTS_CLEANUP_MAX_AGE).limit(settings.QUERY_RESULTS_CLEANUP_COUNT)
deleted_count = models.QueryResult.query.filter(
models.QueryResult.id.in_(unused_query_results.subquery())
).delete(synchronize_session=False)
models.db.session.commit()
logger.info("Deleted %d unused query results.", deleted_count)
@celery.task(name="redash.tasks.refresh_schema", time_limit=90, soft_time_limit=60)
def refresh_schema(data_source_id):
ds = models.DataSource.get_by_id(data_source_id)
logger.info(u"task=refresh_schema state=start ds_id=%s", ds.id)
start_time = time.time()
try:
ds.get_schema(refresh=True)
logger.info(u"task=refresh_schema state=finished ds_id=%s runtime=%.2f", ds.id, time.time() - start_time)
statsd_client.incr('refresh_schema.success')
except SoftTimeLimitExceeded:
logger.info(u"task=refresh_schema state=timeout ds_id=%s runtime=%.2f", ds.id, time.time() - start_time)
statsd_client.incr('refresh_schema.timeout')
except Exception:
logger.warning(u"Failed refreshing schema for the data source: %s", ds.name, exc_info=1)
statsd_client.incr('refresh_schema.error')
logger.info(u"task=refresh_schema state=failed ds_id=%s runtime=%.2f", ds.id, time.time() - start_time)
@celery.task(name="redash.tasks.refresh_schemas")
def refresh_schemas():
"""
Refreshes the data sources schemas.
"""
blacklist = [int(ds_id) for ds_id in redis_connection.smembers('data_sources:schema:blacklist') if ds_id]
global_start_time = time.time()
logger.info(u"task=refresh_schemas state=start")
for ds in models.DataSource.query:
if ds.paused:
logger.info(u"task=refresh_schema state=skip ds_id=%s reason=paused(%s)", ds.id, ds.pause_reason)
elif ds.id in blacklist:
logger.info(u"task=refresh_schema state=skip ds_id=%s reason=blacklist", ds.id)
elif ds.org.is_disabled:
logger.info(u"task=refresh_schema state=skip ds_id=%s reason=org_disabled", ds.id)
else:
refresh_schema.apply_async(args=(ds.id,), queue=settings.SCHEMAS_REFRESH_QUEUE)
logger.info(u"task=refresh_schemas state=finish total_runtime=%.2f", time.time() - global_start_time)
def signal_handler(*args):
raise InterruptException
class QueryExecutionError(Exception):
pass
def _resolve_user(user_id, is_api_key, query_id):
if user_id is not None:
if is_api_key:
api_key = user_id
if query_id is not None:
q = models.Query.get_by_id(query_id)
else:
q = models.Query.by_api_key(api_key)
return models.ApiUser(api_key, q.org, q.groups)
else:
return models.User.get_by_id(user_id)
else:
return None
def track_failure(query, error):
logging.debug(error)
query.schedule_failures += 1
models.db.session.add(query)
models.db.session.commit()
notify_of_failure(error, query)
# We could have created this as a celery.Task derived class, and act as the task itself. But this might result in weird
# issues as the task class created once per process, so decided to have a plain object instead.
class QueryExecutor(object):
def __init__(self, task, query, data_source_id, user_id, is_api_key, metadata,
scheduled_query):
self.task = task
self.query = query
self.data_source_id = data_source_id
self.metadata = metadata
self.data_source = self._load_data_source()
self.user = _resolve_user(user_id, is_api_key, metadata.get('Query ID'))
# Close DB connection to prevent holding a connection for a long time while the query is executing.
models.db.session.close()
self.query_hash = gen_query_hash(self.query)
self.scheduled_query = scheduled_query
# Load existing tracker or create a new one if the job was created before code update:
if scheduled_query:
models.scheduled_queries_executions.update(scheduled_query.id)
def run(self):
signal.signal(signal.SIGINT, signal_handler)
started_at = time.time()
logger.debug("Executing query:\n%s", self.query)
self._log_progress('executing_query')
query_runner = self.data_source.query_runner
annotated_query = self._annotate_query(query_runner)
try:
data, error = query_runner.run_query(annotated_query, self.user)
except Exception as e:
if isinstance(e, SoftTimeLimitExceeded):
error = TIMEOUT_MESSAGE
else:
error = text_type(e)
data = None
logging.warning('Unexpected error while running query:', exc_info=1)
run_time = time.time() - started_at
logger.info(u"task=execute_query query_hash=%s data_length=%s error=[%s]", self.query_hash, data and len(data), error)
_unlock(self.query_hash, self.data_source.id)
if error is not None and data is None:
result = QueryExecutionError(error)
if self.scheduled_query is not None:
self.scheduled_query = models.db.session.merge(self.scheduled_query, load=False)
track_failure(self.scheduled_query, error)
raise result
else:
if (self.scheduled_query and self.scheduled_query.schedule_failures > 0):
self.scheduled_query = models.db.session.merge(self.scheduled_query, load=False)
self.scheduled_query.schedule_failures = 0
models.db.session.add(self.scheduled_query)
query_result, updated_query_ids = models.QueryResult.store_result(
self.data_source.org_id, self.data_source,
self.query_hash, self.query, data,
run_time, utcnow())
models.db.session.commit() # make sure that alert sees the latest query result
self._log_progress('checking_alerts')
for query_id in updated_query_ids:
check_alerts_for_query.delay(query_id)
self._log_progress('finished')
result = query_result.id
models.db.session.commit()
return result
def _annotate_query(self, query_runner):
self.metadata['Task ID'] = self.task.request.id
self.metadata['Query Hash'] = self.query_hash
self.metadata['Queue'] = self.task.request.delivery_info['routing_key']
self.metadata['Scheduled'] = self.scheduled_query is not None
return query_runner.annotate_query(self.query, self.metadata)
def _log_progress(self, state):
logger.info(
u"task=execute_query state=%s query_hash=%s type=%s ds_id=%d "
"task_id=%s queue=%s query_id=%s username=%s",
state, self.query_hash, self.data_source.type, self.data_source.id,
self.task.request.id,
self.task.request.delivery_info['routing_key'],
self.metadata.get('Query ID', 'unknown'),
self.metadata.get('Username', 'unknown'))
def _load_data_source(self):
logger.info("task=execute_query state=load_ds ds_id=%d", self.data_source_id)
return models.DataSource.query.get(self.data_source_id)
# user_id is added last as a keyword argument for backward compatability -- to support executing previously submitted
# jobs before the upgrade to this version.
@celery.task(name="redash.tasks.execute_query", bind=True, track_started=True)
def execute_query(self, query, data_source_id, metadata, user_id=None,
scheduled_query_id=None, is_api_key=False):
if scheduled_query_id is not None:
scheduled_query = models.Query.query.get(scheduled_query_id)
else:
scheduled_query = None
return QueryExecutor(self, query, data_source_id, user_id, is_api_key, metadata,
scheduled_query).run()
| 39.428246
| 136
| 0.644462
|
1e92dc4caef3d76b13ec9a6dc22d1eadbb34d6db
| 57
|
py
|
Python
|
geochemistrypy/utils/__init__.py
|
WenyuMolly/geochemistrypy
|
7127d98eb4b1610769a859dfbfb21457ae8de37c
|
[
"MIT"
] | 13
|
2021-11-03T05:44:06.000Z
|
2022-03-27T02:50:45.000Z
|
geochemistrypy/utils/__init__.py
|
WenyuMolly/geochemistrypy
|
7127d98eb4b1610769a859dfbfb21457ae8de37c
|
[
"MIT"
] | 20
|
2022-02-19T14:06:08.000Z
|
2022-03-28T10:25:48.000Z
|
geochemistrypy/utils/__init__.py
|
WenyuMolly/geochemistrypy
|
7127d98eb4b1610769a859dfbfb21457ae8de37c
|
[
"MIT"
] | 4
|
2021-11-03T05:44:12.000Z
|
2022-03-12T15:14:22.000Z
|
# -*- coding: utf-8 -*-
import sys
sys.path.append('..')
| 14.25
| 23
| 0.561404
|
35419e763cdd6878d443d2421030d1d31eda8330
| 1,157
|
py
|
Python
|
kubernetes/test/test_v1beta1_custom_resource_definition_condition.py
|
kevingessner/python
|
3f4d09d260cf0839fae8173852c69e0419188454
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/test/test_v1beta1_custom_resource_definition_condition.py
|
kevingessner/python
|
3f4d09d260cf0839fae8173852c69e0419188454
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/test/test_v1beta1_custom_resource_definition_condition.py
|
kevingessner/python
|
3f4d09d260cf0839fae8173852c69e0419188454
|
[
"Apache-2.0"
] | 1
|
2018-07-19T16:37:20.000Z
|
2018-07-19T16:37:20.000Z
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.9.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1beta1_custom_resource_definition_condition import V1beta1CustomResourceDefinitionCondition
class TestV1beta1CustomResourceDefinitionCondition(unittest.TestCase):
""" V1beta1CustomResourceDefinitionCondition unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1CustomResourceDefinitionCondition(self):
"""
Test V1beta1CustomResourceDefinitionCondition
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1beta1_custom_resource_definition_condition.V1beta1CustomResourceDefinitionCondition()
pass
if __name__ == '__main__':
unittest.main()
| 25.711111
| 129
| 0.757995
|
a9bf1272492e1110917b8864bddcfa05dcec8baa
| 285
|
py
|
Python
|
database/restclientfactory.py
|
tomdoel/pyxnatbrowser
|
573701e34538d6bae488d0a2d2a8864e974e5a8a
|
[
"BSD-2-Clause"
] | null | null | null |
database/restclientfactory.py
|
tomdoel/pyxnatbrowser
|
573701e34538d6bae488d0a2d2a8864e974e5a8a
|
[
"BSD-2-Clause"
] | null | null | null |
database/restclientfactory.py
|
tomdoel/pyxnatbrowser
|
573701e34538d6bae488d0a2d2a8864e974e5a8a
|
[
"BSD-2-Clause"
] | null | null | null |
# https://github.com/tomdoel/pyxnatbrowser
# Author: Tom Doel www.tomdoel.com
# Distributed under the Simplified BSD License.
from database.restclient import RestClient
class RestClientFactory:
@staticmethod
def create_rest_client(config):
return RestClient(config)
| 23.75
| 47
| 0.77193
|
8c349faa049ad9811cf692eb9d539e322c40efd4
| 920
|
py
|
Python
|
tools/mo/openvino/tools/mo/front/tf/topk_ext.py
|
ryanloney/openvino-1
|
4e0a740eb3ee31062ba0df88fcf438564f67edb7
|
[
"Apache-2.0"
] | 1,127
|
2018-10-15T14:36:58.000Z
|
2020-04-20T09:29:44.000Z
|
tools/mo/openvino/tools/mo/front/tf/topk_ext.py
|
ryanloney/openvino-1
|
4e0a740eb3ee31062ba0df88fcf438564f67edb7
|
[
"Apache-2.0"
] | 439
|
2018-10-20T04:40:35.000Z
|
2020-04-19T05:56:25.000Z
|
tools/mo/openvino/tools/mo/front/tf/topk_ext.py
|
ryanloney/openvino-1
|
4e0a740eb3ee31062ba0df88fcf438564f67edb7
|
[
"Apache-2.0"
] | 414
|
2018-10-17T05:53:46.000Z
|
2020-04-16T17:29:53.000Z
|
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.ops.topk import TopK
from openvino.tools.mo.front.extractor import FrontExtractorOp
class TopKExtractor(FrontExtractorOp):
op = 'TopK'
enabled = True
@classmethod
def extract(cls, node):
sort = 'value' if node.pb.attr['sorted'] else 'none'
TopK.update_node_stat(node, {'mode': 'max', 'axis': -1, 'sort': sort, 'k': node.pb.attr['k'].i,
'index_element_type': np.int32})
return cls.enabled
class TopKV2Extractor(FrontExtractorOp):
op = 'TopKV2'
enabled = True
@classmethod
def extract(cls, node):
sort = 'value' if node.pb.attr['sorted'] else 'none'
TopK.update_node_stat(node, {'mode': 'max', 'axis': -1, 'sort': sort, 'index_element_type': np.int32})
return cls.enabled
| 28.75
| 110
| 0.634783
|
8e4a741186b9be252c5e00b12492c5484497df50
| 1,082
|
py
|
Python
|
blibs/timestamps.py
|
theleftsock/blibs
|
a4a1b2eacf5bed9ed42e0e7f651f17e9549b34e6
|
[
"MIT"
] | null | null | null |
blibs/timestamps.py
|
theleftsock/blibs
|
a4a1b2eacf5bed9ed42e0e7f651f17e9549b34e6
|
[
"MIT"
] | null | null | null |
blibs/timestamps.py
|
theleftsock/blibs
|
a4a1b2eacf5bed9ed42e0e7f651f17e9549b34e6
|
[
"MIT"
] | null | null | null |
__author__ = 'biagio'
import datetime
import time
from time import gmtime, strftime
def get_ts(mode = 0):
date_list = datetime.datetime.now().time()
# print "date_list: ", date_list
if (mode == 0): # 2015-06-02 09:52
ts = strftime("%Y-%m-%d %H:%M")
if (mode == 4): # 2015-06-02 09:52:30
ts = strftime("%Y-%m-%d %H:%M:%S")
if (mode == 1): # 2015-06-02 13:52:41
ts = strftime("%Y-%m-%d %H:%M:%S", gmtime())
if (mode == 2): # 201506020952
ts = strftime("%Y%m%d%H%M")
if (mode == 3): # 20150602095306
ts = strftime("%Y%m%d%H%M%S")
#print "ts: ", ts
return ts
def seconds_to_time(secs): # takes number of seconds and returns a tuple up to days of time
if secs > 60:
mins = int(secs) / 60
secs = int(secs) % 60
else:
mins = 0
if mins > 60:
hours = int(mins) / 60
mins = int(mins) % 60
else:
hours = 0
if hours > 24:
days = int(hours) / 24
hours = int(hours) % 24
else:
days = 0
return(secs, mins, hours, days)
| 28.473684
| 92
| 0.525878
|
f8f3000de868711e2b03c21f66f0b4d8b848299c
| 458
|
py
|
Python
|
Python/pico/utils.py
|
DSeeLP/LXBot9800
|
a53524b49fedffb3bd7e01d510b0318a602729e4
|
[
"MIT"
] | null | null | null |
Python/pico/utils.py
|
DSeeLP/LXBot9800
|
a53524b49fedffb3bd7e01d510b0318a602729e4
|
[
"MIT"
] | null | null | null |
Python/pico/utils.py
|
DSeeLP/LXBot9800
|
a53524b49fedffb3bd7e01d510b0318a602729e4
|
[
"MIT"
] | null | null | null |
from machine import Pin, SPI
from lib.ST7735 import TFT
import utime
buzzer = Pin(4, Pin.OUT)
def buzz(time_ms: int = 250):
buzzer.value(1)
utime.sleep_ms(time_ms)
buzzer.value(0)
spi = SPI(1, baudrate=20000000, polarity=0, phase=0,
sck=Pin(10), mosi=Pin(11), miso=None)
# def __init__( self, spi, aDC, aReset, aCS) :
tft = TFT(spi, 14, 15, 13)
offset = 25
led1 = Pin(18, Pin.OUT)
led2 = Pin(19, Pin.OUT)
led3 = Pin(20, Pin.OUT)
| 19.083333
| 52
| 0.648472
|
3b5b683f9bd896753ed6835e8df39b02d123cd96
| 10,260
|
py
|
Python
|
deli/manager/cli/commands/run.py
|
sandwichcloud/deli
|
a08bf3da59e435493326ff3c8b435a7a9e762c00
|
[
"MIT"
] | 5
|
2018-02-07T05:49:09.000Z
|
2020-03-02T20:33:13.000Z
|
deli/manager/cli/commands/run.py
|
sandwichcloud/deli
|
a08bf3da59e435493326ff3c8b435a7a9e762c00
|
[
"MIT"
] | 2
|
2018-02-07T01:50:49.000Z
|
2020-10-27T21:18:04.000Z
|
deli/manager/cli/commands/run.py
|
sandwichcloud/deli
|
a08bf3da59e435493326ff3c8b435a7a9e762c00
|
[
"MIT"
] | null | null | null |
import argparse
import datetime
import enum
import ipaddress
import json
import os
import time
import uuid
from threading import RLock
import arrow
import urllib3
from clify.daemon import Daemon
from dotenv import load_dotenv
from go_defer import with_defer, defer
from k8scontroller.election.elector import LeaderElector
from kubernetes import config, client
from kubernetes.client import Configuration
from deli.cache import cache_client
from deli.kubernetes.resources.v1alpha1.flavor.controller import FlavorController
from deli.kubernetes.resources.v1alpha1.flavor.model import Flavor
from deli.kubernetes.resources.v1alpha1.iam_policy.controller import IAMPolicyController
from deli.kubernetes.resources.v1alpha1.iam_policy.model import IAMPolicy
from deli.kubernetes.resources.v1alpha1.iam_role.controller import IAMSystemRoleController, IAMProjectRoleController
from deli.kubernetes.resources.v1alpha1.iam_role.model import IAMSystemRole, IAMProjectRole
from deli.kubernetes.resources.v1alpha1.iam_service_account.controller import SystemServiceAccountController, \
ProjectServiceAccountController
from deli.kubernetes.resources.v1alpha1.iam_service_account.model import SystemServiceAccount, ProjectServiceAccount
from deli.kubernetes.resources.v1alpha1.image.controller import ImageController
from deli.kubernetes.resources.v1alpha1.image.model import Image
from deli.kubernetes.resources.v1alpha1.instance.controller import InstanceController
from deli.kubernetes.resources.v1alpha1.instance.model import Instance
from deli.kubernetes.resources.v1alpha1.keypair.controller import KeypairController
from deli.kubernetes.resources.v1alpha1.keypair.keypair import Keypair
from deli.kubernetes.resources.v1alpha1.network.controller import NetworkController, NetworkPortController
from deli.kubernetes.resources.v1alpha1.network.model import Network, NetworkPort
from deli.kubernetes.resources.v1alpha1.project_quota.controller import ProjectQuotaController
from deli.kubernetes.resources.v1alpha1.project_quota.model import ProjectQuota
from deli.kubernetes.resources.v1alpha1.region.controller import RegionController
from deli.kubernetes.resources.v1alpha1.region.model import Region
from deli.kubernetes.resources.v1alpha1.volume.controller import VolumeController
from deli.kubernetes.resources.v1alpha1.volume.model import Volume
from deli.kubernetes.resources.v1alpha1.zone.controller import ZoneController
from deli.kubernetes.resources.v1alpha1.zone.model import Zone
from deli.manager.vmware import VMWare
class EnvDefault(argparse.Action):
def __init__(self, envvar, required=True, default=None, help=None, **kwargs):
if envvar in os.environ:
default = os.environ.get(envvar, default)
if required and default:
required = False
if help is not None:
help += " [Environment Variable: $" + envvar + "]"
if default is None:
default = argparse.SUPPRESS
super(EnvDefault, self).__init__(default=default, required=required, help=help, **kwargs)
def __call__(self, parser, namespace, values, option_string=None): # pragma: no cover
setattr(namespace, self.dest, values)
class RunManager(Daemon):
def __init__(self):
super().__init__('run', 'Run the Sandwich Cloud Manager')
self.menu_url = None
self.vmware = None
self.leader_elector = None
self.controllers = []
self.lock = RLock()
def setup_arguments(self, parser):
load_dotenv(os.path.join(os.getcwd(), '.env'))
parser.add_argument("--kube-config", action=EnvDefault, envvar="KUBECONFIG", required=False, default="",
help="Path to a kubeconfig. Only required if out-of-cluster.")
parser.add_argument('--kube-master', action=EnvDefault, envvar="KUBEMASTER", required=False, default="",
help="The address of the Kubernetes API server (overrides any value in kubeconfig)")
required_group = parser.add_argument_group("required named arguments")
required_group.add_argument("--vcenter-host", action=EnvDefault, envvar="VCENTER_HOST", required=True,
help="The address to use to connect to VCenter")
required_group.add_argument("--vcenter-port", action=EnvDefault, envvar="VCENTER_PORT", default="443",
help="The port to use to connect to VCenter")
required_group.add_argument("--vcenter-username", action=EnvDefault, envvar="VCENTER_USERNAME", required=True,
help="The username to use to connect to VCenter")
required_group.add_argument("--vcenter-password", action=EnvDefault, envvar="VCENTER_PASSWORD", required=True,
help="The password to use to connect to VCenter")
required_group.add_argument("--menu-url", action=EnvDefault, envvar="MENU_URL", required=True,
help="Telnet URL to the menu server")
required_group.add_argument("--redis-url", action=EnvDefault, envvar="REDIS_URL", required=True,
help="URL to the redis server for caching")
def run(self, args) -> int:
cache_client.connect(url=args.redis_url)
if args.kube_config != "" or args.kube_master != "":
self.logger.info("Using kube-config configuration")
Configuration.set_default(Configuration())
if args.kube_config != "":
config.load_kube_config(config_file=args.kube_config)
if args.kube_master != "":
Configuration._default.host = args.kube_master
else:
self.logger.info("Using in-cluster configuration")
config.load_incluster_config()
while True:
try:
client.CoreV1Api().list_namespace()
break
except urllib3.exceptions.HTTPError as e:
self.logger.error("Error connecting to the Kubernetes API. Trying again in 5 seconds. Error: " + str(e))
time.sleep(5)
old_json_encoder = json.JSONEncoder.default
def json_encoder(self, o): # pragma: no cover
if isinstance(o, uuid.UUID):
return str(o)
if isinstance(o, arrow.Arrow):
return o.isoformat()
if isinstance(o, ipaddress.IPv4Network):
return str(o)
if isinstance(o, ipaddress.IPv4Address):
return str(o)
if isinstance(o, enum.Enum):
return o.value
if isinstance(o, datetime.datetime):
return str(o.isoformat())
return old_json_encoder(self, o)
json.JSONEncoder.default = json_encoder
self.logger.info("Creating CRDs")
IAMSystemRole.create_crd()
IAMSystemRole.wait_for_crd()
IAMProjectRole.create_crd()
IAMProjectRole.wait_for_crd()
IAMPolicy.create_crd()
IAMPolicy.wait_for_crd()
IAMPolicy.create_system_policy()
SystemServiceAccount.create_crd()
SystemServiceAccount.wait_for_crd()
ProjectServiceAccount.create_crd()
ProjectServiceAccount.wait_for_crd()
IAMSystemRole.create_default_roles()
SystemServiceAccount.create_admin_sa()
ProjectQuota.create_crd()
ProjectQuota.wait_for_crd()
Region.create_crd()
Region.wait_for_crd()
Zone.create_crd()
Zone.wait_for_crd()
Network.create_crd()
Network.wait_for_crd()
NetworkPort.create_crd()
NetworkPort.wait_for_crd()
Image.create_crd()
Image.wait_for_crd()
Flavor.create_crd()
Flavor.wait_for_crd()
Volume.create_crd()
Volume.wait_for_crd()
Instance.create_crd()
Instance.wait_for_crd()
Keypair.create_crd()
Keypair.wait_for_crd()
self.logger.info("CRDs have been created")
self.menu_url = args.menu_url
self.vmware = VMWare(args.vcenter_host, args.vcenter_port, args.vcenter_username, args.vcenter_password)
self.leader_elector = LeaderElector("sandwich-controller", "kube-system", self.on_started_leading,
self.on_stopped_leading)
self.leader_elector.start()
return 0
def launch_controller(self, controller):
self.controllers.append(controller)
controller.start()
@with_defer
def on_started_leading(self):
if self.leader_elector.shutting_down:
return
self.lock.acquire()
defer(self.lock.release)
self.logger.info("Started leading... starting controllers")
self.launch_controller(RegionController(1, 30, self.vmware))
self.launch_controller(ZoneController(1, 30, self.vmware))
self.launch_controller(IAMSystemRoleController(1, 30))
self.launch_controller(IAMProjectRoleController(1, 30))
self.launch_controller(IAMPolicyController(1, 30))
self.launch_controller(ProjectQuotaController(1, 30))
self.launch_controller(NetworkController(1, 30, self.vmware))
self.launch_controller(NetworkPortController(1, 30))
self.launch_controller(ImageController(4, 30, self.vmware))
self.launch_controller(SystemServiceAccountController(1, 30))
self.launch_controller(ProjectServiceAccountController(1, 30))
self.launch_controller(FlavorController(1, 30))
self.launch_controller(VolumeController(4, 30, self.vmware))
self.launch_controller(InstanceController(4, 30, self.vmware, self.menu_url))
self.launch_controller(KeypairController(4, 30))
@with_defer
def on_stopped_leading(self):
self.logger.info("Stopped leading... stopping controllers")
self.lock.acquire()
defer(self.lock.release)
for controller in self.controllers:
controller.stop()
self.controllers = []
def on_shutdown(self, signum=None, frame=None):
self.logger.info("Shutting down the Manager")
if self.leader_elector is not None:
self.leader_elector.shutdown()
| 44.034335
| 120
| 0.69191
|
e21c12b439b117cae017d8a3ac1d634ca8690518
| 616
|
py
|
Python
|
config/celery.py
|
juusokor/osmcha-django
|
5daafa015d5e341aa8ad6f847be2b7cc1a204e2b
|
[
"BSD-2-Clause"
] | 27
|
2015-09-06T00:39:39.000Z
|
2021-12-09T10:30:52.000Z
|
config/celery.py
|
juusokor/osmcha-django
|
5daafa015d5e341aa8ad6f847be2b7cc1a204e2b
|
[
"BSD-2-Clause"
] | 494
|
2015-09-10T19:39:38.000Z
|
2022-03-29T08:07:37.000Z
|
config/celery.py
|
juusokor/osmcha-django
|
5daafa015d5e341aa8ad6f847be2b7cc1a204e2b
|
[
"BSD-2-Clause"
] | 17
|
2015-08-10T22:58:56.000Z
|
2021-09-24T17:03:16.000Z
|
#-*- coding: utf-8 -*-
from __future__ import absolute_import
import os
from django.conf import settings
from celery import Celery
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.local')
app = Celery('config',
backend='rpc://',
broker=settings.BROKER_URL
)
app.conf.update(
CELERY_TASK_RESULT_EXPIRES=10000,
)
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
| 23.692308
| 72
| 0.762987
|
258bce67d8def13a16778a93d09e9a2f1d76bb7a
| 628
|
py
|
Python
|
examples/titanic/assets/algo_constant/algo.py
|
cupcicm/substra
|
19eeec1dda02cce0e10ef6ed285636e974a6e77a
|
[
"Apache-2.0"
] | null | null | null |
examples/titanic/assets/algo_constant/algo.py
|
cupcicm/substra
|
19eeec1dda02cce0e10ef6ed285636e974a6e77a
|
[
"Apache-2.0"
] | null | null | null |
examples/titanic/assets/algo_constant/algo.py
|
cupcicm/substra
|
19eeec1dda02cce0e10ef6ed285636e974a6e77a
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
import numpy as np
import pickle
import substratools as tools
def constant_clf(X):
return pd.DataFrame(columns=['Survived'], data=np.zeros(len(X)), dtype=np.int64)
class TitanicAlgo(tools.algo.Algo):
def train(self, X, y, models, rank):
return (constant_clf(X), constant_clf)
def predict(self, X, models):
return constant_clf(X)
def load_model(self, path):
return constant_clf
def save_model(self, model, path):
with open(path, 'wb') as f:
pickle.dump(constant_clf, f)
if __name__ == '__main__':
tools.algo.execute(TitanicAlgo())
| 21.655172
| 84
| 0.664013
|
d42b8ae8b23ffd56028125948fe72c3fc526082a
| 82
|
py
|
Python
|
utility/GeneralUtility5.py
|
hkuadithya/general-dynamics-data-analytics
|
d3bb2fa8abdc0a8d654941794f65abddc5e85cf1
|
[
"Apache-2.0"
] | 1
|
2017-12-23T20:11:19.000Z
|
2017-12-23T20:11:19.000Z
|
utility/GeneralUtility5.py
|
hkuadithya/general-dynamics-data-analytics
|
d3bb2fa8abdc0a8d654941794f65abddc5e85cf1
|
[
"Apache-2.0"
] | null | null | null |
utility/GeneralUtility5.py
|
hkuadithya/general-dynamics-data-analytics
|
d3bb2fa8abdc0a8d654941794f65abddc5e85cf1
|
[
"Apache-2.0"
] | null | null | null |
# Analysis of First Dataset...
import pandas as pd
emp_df = pd.read_pickle(../)
| 13.666667
| 30
| 0.695122
|
628a01ae3a3195e0cbe02733e0432ac7ab2f45c4
| 406
|
py
|
Python
|
src/supplier/migrations/0002_auto_20190915_2139.py
|
vandana0608/Pharmacy-Managament
|
f99bdec11c24027a432858daa19247a21cecc092
|
[
"bzip2-1.0.6"
] | null | null | null |
src/supplier/migrations/0002_auto_20190915_2139.py
|
vandana0608/Pharmacy-Managament
|
f99bdec11c24027a432858daa19247a21cecc092
|
[
"bzip2-1.0.6"
] | null | null | null |
src/supplier/migrations/0002_auto_20190915_2139.py
|
vandana0608/Pharmacy-Managament
|
f99bdec11c24027a432858daa19247a21cecc092
|
[
"bzip2-1.0.6"
] | null | null | null |
# Generated by Django 2.0.7 on 2019-09-15 16:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('supplier', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='supplier',
name='supplierphonenumber',
field=models.CharField(default='+91', max_length=12),
),
]
| 21.368421
| 65
| 0.603448
|
c434264d41de2823c4e5d1cbdc7a7a374c2e5ab1
| 1,340
|
py
|
Python
|
web/model/aclImdb.py
|
java-lang-programming/web_and_machine_learning
|
e76109b63f1c6b3a380bb7d4ce6ed132c0cddd1e
|
[
"Apache-2.0"
] | null | null | null |
web/model/aclImdb.py
|
java-lang-programming/web_and_machine_learning
|
e76109b63f1c6b3a380bb7d4ce6ed132c0cddd1e
|
[
"Apache-2.0"
] | null | null | null |
web/model/aclImdb.py
|
java-lang-programming/web_and_machine_learning
|
e76109b63f1c6b3a380bb7d4ce6ed132c0cddd1e
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
import pickle
import os
import numpy as np
import pandas as pd
import sys
class Model():
def save_classifier(self, clf, clf_filename):
parenr_dir_path = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + '/../')
# parent_path = os.path.abspath(os.path.join(os.getcwd(), os.path.pardir))
print(parenr_dir_path)
model_dir_path = parenr_dir_path + '/models/'
dest = os.path.join(model_dir_path, 'movieclassifier', 'pkl_objects')
print(dest)
if not os.path.exists(dest):
os.makedirs(dest)
pickle.dump(clf,
open(os.path.join(dest, clf_filename + '.pkl'),'wb'),
protocol=4)
def save_stopwords(self, object, filename, clf, clf_filename):
parenr_dir_path = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + '/../')
# parent_path = os.path.abspath(os.path.join(os.getcwd(), os.path.pardir))
print(parenr_dir_path)
model_dir_path = parenr_dir_path + '/models/'
dest = os.path.join(model_dir_path, 'stopwords', 'pkl_objects')
print(dest)
if not os.path.exists(dest):
os.makedirs(dest)
pickle.dump(object,
open(os.path.join(dest, filename + '.pkl'),'wb'),
protocol=4)
| 34.358974
| 94
| 0.60597
|
785a48ad516ec31c41816ed6aa51e585e5498825
| 1,947
|
py
|
Python
|
label_passages_pkl.py
|
Louis-udm/Adapting-regulations-for-an-automated-reading-system
|
4e0ee08397dfed95a1fb4fc4429390291fdb0331
|
[
"Apache-2.0"
] | 2
|
2019-08-22T19:52:40.000Z
|
2019-08-22T19:54:11.000Z
|
label_passages_pkl.py
|
Louis-udm/Adapting-regulations-for-an-automated-reading-system
|
4e0ee08397dfed95a1fb4fc4429390291fdb0331
|
[
"Apache-2.0"
] | null | null | null |
label_passages_pkl.py
|
Louis-udm/Adapting-regulations-for-an-automated-reading-system
|
4e0ee08397dfed95a1fb4fc4429390291fdb0331
|
[
"Apache-2.0"
] | null | null | null |
#%%
import os, pickle
from glob import glob
from document import Document, Paragraph, Table
from utils import *
#%%
import spacy
nlp = spacy.load("en_core_web_lg")
from spacy.matcher import Matcher
matcher = Matcher(nlp.vocab)
#%%
roles=['insurance representative',
'representative in insurance of person',
'group insurance representative',
'damage insurance agent',
'damage insurance broker',
'claims adjuster',
'financial planner',
'firm','independent representative','independent partnership']
patterns = [
[{"LEMMA": "insurance"}, {"LEMMA": "representative"}],
[{"LEMMA": "representative"}, {"LEMMA": "in"}, {"LEMMA": "insurance"}, {"LEMMA": "of"}, {"LEMMA": "person"}],
[{"LEMMA": "group"}, {"LEMMA": "insurance"}, {"LEMMA": "representative"}],
[{"LEMMA": "damage"}, {"LEMMA": "insurance"}, {"LEMMA": "agent"}],
[{"LEMMA": "damage"}, {"LEMMA": "insurance"}, {"LEMMA": "broker"}],
[{"LEMMA": "claim"}, {"LEMMA": "adjuster"}],
[{"LEMMA": "financial"}, {"LEMMA": "planner"}],
[{"LEMMA": "firm"}],
[{"LEMMA": "independent"}, {"LEMMA": "representative"}],
[{"LEMMA": "independent"}, {"LEMMA": "partnership"}] ]
if len(matcher)==0:
for r,p in zip(roles,patterns):
matcher.add(r, None, p)
def get_matched_roles(matcher, text):
matches = matcher(nlp(text.lower()))
return list(set([nlp.vocab.strings[match_id] for match_id, start, end in matches]))
print('Start label passages.pkl...')
# opening the pickle
with open('passages.pkl', 'rb') as fp:
passages = pickle.load(fp)
for k in passages.keys():
text=passages[k]['text']
roles_matched=', '.join(get_matched_roles(matcher,text))
print(k,':',roles_matched)
passages[k]['label']=roles_matched
# if roles_matched is None or roles_matched.strip()=='':
# print('\n------------------\n',k,':\n',text)
with open('passages_labeled.pkl', 'wb') as fp:
pickle.dump(passages,fp)
| 34.157895
| 113
| 0.625064
|
7c3edc2f7eb19dee3499d38f3b99731d9d5830f0
| 43,610
|
py
|
Python
|
client/Clients.py
|
MarianFr/Split_ECG_Classification
|
b6ef77a2512ae6119974e20232ce602d17d0c892
|
[
"MIT"
] | null | null | null |
client/Clients.py
|
MarianFr/Split_ECG_Classification
|
b6ef77a2512ae6119974e20232ce602d17d0c892
|
[
"MIT"
] | null | null | null |
client/Clients.py
|
MarianFr/Split_ECG_Classification
|
b6ef77a2512ae6119974e20232ce602d17d0c892
|
[
"MIT"
] | 1
|
2022-03-23T11:53:16.000Z
|
2022-03-23T11:53:16.000Z
|
import struct
import socket
import pickle
import json
from torch.optim import SGD, Adam, AdamW
import sys
import time
import random
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
#import seaborn as sns
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torch.autograd import Variable
from sklearn.model_selection import train_test_split
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.metrics import accuracy_score, auc, f1_score, precision_score, recall_score, roc_auc_score
from sklearn.preprocessing import MinMaxScaler
import Metrics
import wfdb
import ast
import math
import os.path
import utils
import Models
#np.set_printoptions(threshold=np.inf)
cwd = os.path.dirname(os.path.abspath(__file__))
mlb_path = os.path.join(cwd, "..","Benchmark", "output", "mlb.pkl")
scaler_path = os.path.join(cwd, "..","Benchmark", "output", "standard_scaler.pkl")
ptb_path = os.path.join(cwd, "..", "server", "../server/PTB-XL", "ptb-xl/")
import wandb
wandb.init(project="non-IID,clean", entity="split-learning-medical")
client_num = 1
num_classes = 2
pretrain_this_client = 0
simultrain_this_client = 0
pretrain_epochs = 50
IID = 0
f = open('parameter_client.json', )
data = json.load(f)
# set parameters fron json file
#epoch = data["training_epochs"]
lr = data["learningrate"]
batchsize = data["batchsize"]
batch_concat = data["batch_concat"]
host = data["host"]
port = data["port"]
max_recv = data["max_recv"]
autoencoder = data["autoencoder"]
detailed_output = data["detailed_output"]
count_flops = data["count_flops"]
plots = data["plots"]
autoencoder_train = data["autoencoder_train"]
deactivate_train_after_num_epochs = data["deactivate_train_after_num_epochs"]
grad_encode = data["grad_encode"]
train_gradAE_active = data["train_gradAE_active"]
deactivate_grad_train_after_num_epochs = data["deactivate_grad_train_after_num_epochs"]
wandb.init(config={
"learning_rate": lr,
#"epochs": epoch,
"batch_size": batchsize,
"autoencoder": autoencoder
})
wandb.config.update({"learning_rate": lr, "PC: ": 2})
def print_json():
print("learningrate: ", lr)
print("grad_encode: ", grad_encode)
print("gradAE_train: ", train_gradAE_active)
print("deactivate_grad_train_after_num_epochs: ", deactivate_grad_train_after_num_epochs)
#print("Getting the metadata epoch: ", epoch)
print("Getting the metadata host: ", host)
print("Getting the metadata port: ", port)
print("Getting the metadata batchsize: ", batchsize)
print("Autoencoder: ", autoencoder)
print("detailed_output: ", detailed_output)
print("count_flops: ", count_flops)
print("plots: ", plots)
print("autoencoder_train: ", autoencoder_train)
print("deactivate_train_after_num_epochs: ", deactivate_train_after_num_epochs)
# load data from json file
class PTB_XL(Dataset):
def __init__(self, stage=None):
self.stage = stage
if self.stage == 'train':
global X_train
global y_train
self.y_train = y_train
self.X_train = X_train
if self.stage == 'val':
global y_val
global X_val
self.y_val = y_val
self.X_val = X_val
if self.stage == 'test':
global y_test
global X_test
self.y_test = y_test
self.X_test = X_test
if self.stage == 'raw':
global y_raw
global X_raw
self.y_raw = y_raw
self.X_raw = X_raw
def __len__(self):
if self.stage == 'train':
return len(self.y_train)
if self.stage == 'val':
return len(self.y_val)
if self.stage == 'test':
return len(self.y_test)
if self.stage == 'raw':
return len(self.y_raw)
def __getitem__(self, idx):
if self.stage == 'train':
sample = self.X_train[idx].transpose((1, 0)), self.y_train[idx]
if self.stage == 'val':
sample = self.X_val[idx].transpose((1, 0)), self.y_val[idx]
if self.stage == 'test':
sample = self.X_test[idx].transpose((1, 0)), self.y_test[idx]
if self.stage == 'raw':
sample = self.X_raw[idx].transpose((1, 0)), self.y_raw[idx]
return sample
def init():
train_dataset = PTB_XL('train')
val_dataset = PTB_XL('val')
if IID:
train_1, rest1 = torch.utils.data.random_split(train_dataset, [3853, 15414], generator=torch.Generator().manual_seed(42))
train_2, rest2 = torch.utils.data.random_split(rest1, [3853, 11561], generator=torch.Generator().manual_seed(42))
train_3, rest3 = torch.utils.data.random_split(rest2, [3853, 7708], generator=torch.Generator().manual_seed(42))
train_4, train_5 = torch.utils.data.random_split(rest3, [3853, 3855], generator=torch.Generator().manual_seed(42))
if client_num == 1: train_dataset = train_1
if client_num == 2: train_dataset = train_2
if client_num == 3: train_dataset = train_3
if client_num == 4: train_dataset = train_4
if client_num == 5: train_dataset = train_5
if pretrain_this_client:
raw_dataset = PTB_XL('raw')
print("len raw dataset", len(raw_dataset))
pretrain_dataset, no_dataset = torch.utils.data.random_split(raw_dataset, [963, 18304],
generator=torch.Generator().manual_seed(42))
print("pretrain_dataset length: ", len(pretrain_dataset))
global pretrain_loader
pretrain_loader = torch.utils.data.DataLoader(pretrain_dataset, batch_size=batchsize, shuffle=True)
if simultrain_this_client:
raw_dataset = PTB_XL('raw')
print("len raw dataset", len(raw_dataset))
pretrain_dataset, no_dataset = torch.utils.data.random_split(raw_dataset, [963, 18304],
generator=torch.Generator().manual_seed(42))
print("len train dataset", len(train_dataset))
train_dataset = torch.utils.data.ConcatDataset((pretrain_dataset, train_dataset))
print("len mixed-train dataset", len(train_dataset))
print("train_dataset length: ", len(train_dataset))
print("val_dataset length: ", len(train_dataset))
global train_loader
global val_loader
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batchsize, shuffle=True)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=batchsize, shuffle=True)
"""
def new_split():
global train_loader
global val_loader
train_dataset, val_dataset = torch.utils.data.random_split(training_dataset,
[size_train_dataset,
len(training_dataset) - size_train_dataset])
print("train_dataset size: ", size_train_dataset)
print("val_dataset size: ", len(training_dataset) - size_train_dataset)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batchsize, shuffle=True)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=batchsize, shuffle=True)
"""
if count_flops: #Does not work on the Jetson Nano yet. The amount of FLOPs doesn't depend on the architecture. Measuring FLOPs on the PC and JetsonNano would result in the same outcome.
# The paranoid switch prevents the FLOPs count
# Solution: sudo sh -c 'echo 1 >/proc/sys/kernel/perf_event_paranoid'
# Needs to be done after every restart of the PC
from ptflops import get_model_complexity_info
from pypapi import events, papi_high as high
def str_to_number(label):
a = np.zeros(5)
if not label:
return a
for i in label:
if i == 'NORM':
a[0] = 1
if i == 'MI':
a[1] = 1
if i == 'STTC':
a[2] = 1
if i == 'HYP':
a[3] = 1
if i == 'CD':
a[4] = 1
return a
#send/recieve system:
def send_msg(sock, getid, content):
"""
pickles the content (creates bitstream), adds header and send message via tcp port
:param sock: socket
:param content: content to send via tcp port
"""
msg = [getid, content] # add getid
msg = pickle.dumps(msg)
msg = struct.pack('>I', len(msg)) + msg # add 4-byte length in network byte order
#print("communication overhead send: ", sys.getsizeof(msg), " bytes")
global data_send_per_epoch
data_send_per_epoch += sys.getsizeof(msg)
sock.sendall(msg)
def recieve_msg(sock):
"""
recieves the meassage with helper function, umpickles the message and separates the getid from the actual massage content
:param sock: socket
"""
msg = recv_msg(sock) # receive client message from socket
msg = pickle.loads(msg)
return msg
def recieve_request(sock):
"""
recieves the meassage with helper function, umpickles the message and separates the getid from the actual massage content
:param sock: socket
"""
msg = recv_msg(sock) # receive client message from socket
msg = pickle.loads(msg)
getid = msg[0]
content = msg[1]
handle_request(sock, getid, content)
def recv_msg(sock):
"""
gets the message length (which corresponds to the first
4 bytes of the recieved bytestream) with the recvall function
:param sock: socket
:return: returns the data retrieved from the recvall function
"""
# read message length and unpack it into an integer
raw_msglen = recvall(sock, 4)
if not raw_msglen:
return None
msglen = struct.unpack('>I', raw_msglen)[0]
#print("Message length:", msglen)
global data_recieved_per_epoch
data_recieved_per_epoch += msglen
# read the message data
return recvall(sock, msglen)
def recvall(sock, n):
"""
returns the data from a recieved bytestream, helper function
to receive n bytes or return None if EOF is hit
:param sock: socket
:param n: length in bytes (number of bytes)
:return: message
"""
#
data = b''
while len(data) < n:
if detailed_output:
print("Start function sock.recv")
packet = sock.recv(n - len(data))
if not packet:
return None
data += packet
# print("Daten: ", data)
return data
def handle_request(sock, getid, content):
"""
executes the requested function, depending on the get id, and passes the recieved message
:param sock: socket
:param getid: id of the function, that should be executed if the message is recieved
:param content: message content
"""
#print("request mit id:", getid)
switcher = {
0: initialize_model,
1: train_epoch,
2: val_stage,
3: test_stage,
}
switcher.get(getid, "invalid request recieved")(sock, content)
def serverHandler(conn):
while True:
recieve_request(conn)
def grad_postprocessing(grad):
grad_new = grad.numpy()
for a in range(64):
#scaler.fit(grad[a])
grad_new[a] = scaler.inverse_transform(grad[a])
grad_new = torch.DoubleTensor(grad_new).to(device)
return grad_new
def train_epoch(s, pretraining):
#new_split() #new random dist between train and val
loss_grad_total = 0
global epoch
epoch += 1
flops_forward_epoch, flops_encoder_epoch, flops_backprop_epoch, flops_rest, flops_send = 0,0,0,0,0
#Specify AE configuration
train_active = 0 #default: AE is pretrained
train_grad_active = 0
if epoch < deactivate_train_after_num_epochs:
if autoencoder_train:
train_active = 1
if epoch < deactivate_grad_train_after_num_epochs:
if train_gradAE_active:
train_grad_active = 1
global data_send_per_epoch, data_recieved_per_epoch, data_send_per_epoch_total, data_recieved_per_epoch_total
data_send_per_epoch, data_recieved_per_epoch = 0, 0
correct_train, total_train, train_loss = 0, 0, 0
batches_aborted, total_train_nr, total_val_nr, total_test_nr = 0, 0, 0, 0
hamming_epoch, precision_epoch, recall_epoch, f1_epoch, auc_train = 0, 0, 0, 0, 0
#encoder_grad_server = 0
epoch_start_time = time.time()
loader = pretrain_loader if pretraining else train_loader
for b, batch in enumerate(loader):
if count_flops:
x = high.read_counters()
#print("batch: ", b)
# print("FLOPs dataloader: ", x)
# if b % 100 == 0:
# print("batch ", b, " / ", total_batch)
forward_time = time.time()
active_training_time_batch_client = 0
start_time_batch_forward = time.time()
# define labels and data per batch
x_train, label_train = batch
x_train = x_train.to(device)
# x_train = x_train.to(device)
label_train = label_train.double().to(device)
if len(x_train) != 64:
break
if count_flops:
x = high.read_counters()
flops_rest += x[0] # reset Flop Counter
optimizer.zero_grad() # sets gradients to 0 - start for backprop later
client_output_backprop = client(x_train)
client_output_train = client_output_backprop.detach().clone()
if count_flops:
x = high.read_counters()
#print("FLOPs forward: ", x)
flops_forward_epoch += x[0]
client_output_train_without_ae_send = 0
if autoencoder:
if train_active:
optimizerencode.zero_grad()
# client_output_train_without_ae = client_output_train.clone().detach().requires_grad_(False)
client_encoded = encode(client_output_train)
client_output_send = client_encoded.detach().clone()
if train_active:
client_output_train_without_ae_send = client_output_train.detach().clone()
else:
client_output_send = client_output_train.detach().clone()
# client_output_send = encode(client_output_train)
if count_flops:
x = high.read_counters()
flops_encoder_epoch += x[0]
global encoder_grad_server
msg = {
'client_output_train': client_output_send,
'client_output_train_without_ae': client_output_train_without_ae_send,
'label_train': label_train, # concat_labels,
'batch_concat': batch_concat,
'batchsize': batchsize,
'train_active': train_active,
'encoder_grad_server': encoder_grad_server,
'train_grad_active': train_grad_active,
'grad_encode': grad_encode
}
active_training_time_batch_client += time.time() - start_time_batch_forward
if detailed_output:
print("Send the message to server")
send_msg(s, 0, msg)
# while concat_counter_recv < concat_counter_send:
msg = recieve_msg(s)
# print("msg: ", msg)
if pretraining == 0:
wandb.log({"dropout_threshold": msg["dropout_threshold"]}, commit=False)
# decode grad:
client_grad_without_encode = msg["client_grad_without_encode"]
client_grad = msg["grad_client"]
global scaler
scaler = msg["scaler"]
if msg["grad_encode"]:
if train_grad_active:
# print("train_active")
optimizer_grad_decoder.zero_grad()
client_grad = Variable(client_grad, requires_grad=True)
client_grad_decode = grad_decoder(client_grad)
if train_grad_active:
loss_grad_autoencoder = error_grad_autoencoder(client_grad_without_encode, client_grad_decode)
loss_grad_total += loss_grad_autoencoder.item()
loss_grad_autoencoder.backward()
encoder_grad_server = client_grad.grad.detach().clone()#
optimizer_grad_decoder.step()
# print("loss_grad_autoencoder: ", loss_grad_autoencoder)
else:
encoder_grad_server = 0
client_grad_decode = grad_postprocessing(client_grad_decode.detach().clone().cpu())
else:
if msg["client_grad_abort"] == 0:
client_grad_decode = client_grad.detach().clone()
#else:
# client_grad = "abort"
encoder_grad_server = 0
start_time_batch_backward = time.time()
encoder_grad = msg["encoder_grad"]
if client_grad == "abort":
# print("client_grad: ", client_grad)
train_loss_add, add_correct_train, add_total_train = msg["train_loss"], msg["add_correct_train"], \
msg["add_total_train"]
correct_train += add_correct_train
total_train_nr += 1
total_train += add_total_train
train_loss += train_loss_add
batches_aborted += 1
output_train = msg["output_train"]
# print("train_loss: ", train_loss/total_train_nr)
# meter.update(output_train, label_train, train_loss/total_train_nr)
pass
else:
if train_active:
client_encoded.backward(encoder_grad)
optimizerencode.step()
# concat_tensors[concat_counter_recv].to(device)
# concat_tensors[concat_counter_recv].backward(client_grad)
# client_output_backprob.to(device)
# if b % 1000 == 999:
# print("Backprop with: ", client_grad)
if count_flops:
x = high.read_counters() # reset counter
flops_rest += x[0]
flops_send += x[0]
client_output_backprop.backward(client_grad_decode)
optimizer.step()
if count_flops:
x = high.read_counters()
# print("FLOPs backprob: ", x)
flops_backprop_epoch += x[0]
train_loss_add, add_correct_train, add_total_train = msg["train_loss"], msg["add_correct_train"], \
msg["add_total_train"]
correct_train += add_correct_train
total_train_nr += 1
total_train += add_total_train
train_loss += train_loss_add
output_train = msg["output_train"]
# print("train_loss: ", train_loss/total_train_nr)
# meter.update(output_train, label_train, train_loss/total_train_nr)
# wandb.watch(client, log_freq=100)
output = torch.round(output_train)
# if np.sum(label.cpu().detach().numpy()[0]) > 1:
# if np.sum(output.cpu().detach().numpy()[0] > 1):
# print("output[0]: ", output.cpu().detach().numpy()[0])
# print("label [0]: ", label.cpu().detach().numpy()[0])
#if (total_train_nr % 100 == 0):
# print("output[0]: ", output.cpu().detach().numpy()[0])
# print("label [0]: ", label_train.cpu().detach().numpy()[0])
#global batches_abort_rate_total
#batches_abort_rate_total.append(batches_aborted / total_train_nr)
active_training_time_batch_client += time.time() - start_time_batch_backward
#active_training_time_batch_server = msg["active_trtime_batch_server"]
#active_training_time_epoch_client += active_training_time_batch_client
#active_training_time_epoch_server += active_training_time_batch_server
#
try:
roc_auc = roc_auc_score(label_train.detach().clone().cpu(), torch.round(output).detach().clone().cpu(),average='micro')
auc_train += roc_auc
except:
# print("auc_train_exception: ")
# print("label: ", label)
# print("output: ", output)
pass
hamming_epoch += Metrics.Accuracy(label_train.detach().clone().cpu(), torch.round(output).detach().clone().cpu())
# accuracy_score(label_train.detach().clone().cpu(), torch.round(output).detach().clone().cpu())
precision_epoch += precision_score(label_train.detach().clone().cpu(),
torch.round(output).detach().clone().cpu(),
average='micro', zero_division=0)
# recall_epoch += Plots.Recall(label_train.detach().clone().cpu(), output.detach().clone().cpu()).item()
recall_epoch += recall_score(label_train.detach().clone().cpu(), torch.round(output).detach().clone().cpu(),
average='micro', zero_division=0)
# f1_epoch += Plots.F1Measure(label_train.detach().clone().cpu(), output.detach().clone().cpu()).item()
f1_epoch += f1_score(label_train.detach().clone().cpu(), torch.round(output).detach().clone().cpu(),
average='micro', zero_division=0)
epoch_endtime = time.time() - epoch_start_time
if pretraining:
status_epoch_train = "epoch: {}, AUC_train: {:.4f}, Accuracy: {:.4f}, Precision: {:.4f}, Recall: {:.4f}, F1: {:.4f}, trainingtime for epoch: {:.6f}s, batches abortrate:{:.2f}, train_loss: {:.4f} ".format(
epoch, auc_train / total_train_nr, hamming_epoch / total_train_nr, precision_epoch / total_train_nr,
recall_epoch / total_train_nr,
f1_epoch / total_train_nr, epoch_endtime, batches_aborted / total_train_nr,
train_loss / total_train_nr)
print("status_epoch_pretrain: ", status_epoch_train)
else:
flops_client_forward_total.append(flops_forward_epoch)
flops_client_encoder_total.append(flops_encoder_epoch)
flops_client_backprop_total.append(flops_backprop_epoch)
print("data_send_per_epoch: ", data_send_per_epoch / 1000000, " MegaBytes")
print("data_recieved_per_epoch: ", data_recieved_per_epoch / 1000000, "MegaBytes")
data_send_per_epoch_total.append(data_send_per_epoch)
data_recieved_per_epoch_total.append(data_recieved_per_epoch)
status_epoch_train = "epoch: {}, AUC_train: {:.4f}, Accuracy: {:.4f}, Precision: {:.4f}, Recall: {:.4f}, F1: {:.4f}, trainingtime for epoch: {:.6f}s, batches abortrate:{:.2f}, train_loss: {:.4f} ".format(
epoch, auc_train / total_train_nr, hamming_epoch / total_train_nr, precision_epoch / total_train_nr,
recall_epoch / total_train_nr,
f1_epoch / total_train_nr, epoch_endtime, batches_aborted / total_train_nr,
train_loss / total_train_nr)
print("status_epoch_train: ", status_epoch_train)
if count_flops:
print("MegaFLOPS_forward_epoch", flops_forward_epoch / 1000000)
print("MegaFLOPS_encoder_epoch", flops_encoder_epoch / 1000000)
print("MegaFLOPS_backprop_epoch", flops_backprop_epoch / 1000000)
print("MegaFLOPS_rest", flops_rest / 1000000)
print("MegaFLOPS_send", flops_send / 1000000)
wandb.log({"Batches Abortrate": batches_aborted / total_train_nr,
"MegaFLOPS Client Encoder": flops_encoder_epoch / 1000000,
"MegaFLOPS Client Forward": flops_forward_epoch / 1000000,
"MegaFLOPS Client Backprop": flops_backprop_epoch / 1000000},
commit=False)
global auc_train_log
auc_train_log = auc_train / total_train_nr
global accuracy_train_log
accuracy_train_log = hamming_epoch / total_train_nr
global batches_abort_rate_total
batches_abort_rate_total.append(batches_aborted / total_train_nr)
initial_weights = client.state_dict()
send_msg(s, 2, initial_weights)
msg = 0
send_msg(s, 3, msg)
def val_stage(s, pretraining=0):
total_val_nr, val_loss_total, correct_val, total_val = 0, 0, 0, 0
val_losses, val_accs = [], []
hamming_epoch, precision_epoch, recall_epoch, f1_epoch, accuracy, auc_val = 0, 0, 0, 0, 0, 0
val_time = time.time()
with torch.no_grad():
for b_t, batch_t in enumerate(val_loader):
x_val, label_val = batch_t
x_val, label_val = x_val.to(device), label_val.double().to(device)
optimizer.zero_grad()
output_val = client(x_val, drop=False)
client_output_val = output_val.clone().detach().requires_grad_(True)
if autoencoder:
client_output_val = encode(client_output_val)
msg = {'client_output_val/test': client_output_val,
'label_val/test': label_val,
}
if detailed_output:
print("The msg is:", msg)
send_msg(s, 1, msg)
if detailed_output:
print("294: send_msg success!")
msg = recieve_msg(s)
if detailed_output:
print("296: recieve_msg success!")
correct_val_add = msg["correct_val/test"]
val_loss = msg["val/test_loss"]
output_val_server = msg["output_val/test_server"]
val_loss_total += val_loss
correct_val += correct_val_add
total_val_add = len(label_val)
total_val += total_val_add
total_val_nr += 1
try:
roc_auc = roc_auc_score(label_val.detach().clone().cpu(), torch.round(output_val_server).detach().clone().cpu(), average='micro')
auc_val += roc_auc
except:
# print("auc_train_exception: ")
# print("label: ", label)
# print("output: ", output)
pass
output_val_server = torch.round(output_val_server)
hamming_epoch += Metrics.Accuracy(label_val.detach().clone().cpu(), output_val_server.detach().clone().cpu())
#accuracy_score(label_val.detach().clone().cpu(),
# torch.round(output_val_server).detach().clone().cpu())
precision_epoch += precision_score(label_val.detach().clone().cpu(),
output_val_server.detach().clone().cpu(),
average='micro', zero_division=0)
# recall_epoch += Plots.Recall(label_train.detach().clone().cpu(), output.detach().clone().cpu()).item()
recall_epoch += recall_score(label_val.detach().clone().cpu(), output_val_server.detach().clone().cpu(),
average='micro', zero_division=0)
# f1_epoch += Plots.F1Measure(label_train.detach().clone().cpu(), output.detach().clone().cpu()).item()
f1_epoch += f1_score(label_val.detach().clone().cpu(), output_val_server.detach().clone().cpu(),
average='micro', zero_division=0)
status_epoch_val = "epoch: {},AUC_val: {:.4f} ,Accuracy: {:.4f}, Precision: {:.4f}, Recall: {:.4f}, F1: {:.4f}, val_loss: {:.4f}".format(
epoch, auc_val / total_val_nr, hamming_epoch / total_val_nr, precision_epoch / total_val_nr,
recall_epoch / total_val_nr,
f1_epoch / total_val_nr, val_loss_total / total_val_nr)
print("status_epoch_val: ", status_epoch_val)
if pretraining == 0:
wandb.log({"Loss_val": val_loss_total / total_val_nr,
"Accuracy_val_micro": hamming_epoch / total_val_nr,
"F1_val": f1_epoch / total_val_nr,
"AUC_val": auc_val / total_val_nr,
"AUC_train": auc_train_log,
"Accuracy_train_micro": accuracy_train_log})
send_msg(s, 3, 0)
def test_stage(s, epoch):
loss_test = 0.0
correct_test, total_test = 0, 0
hamming_epoch = 0
precision_epoch = 0
recall_epoch = 0
f1_epoch = 0
total_test_nr = 0
with torch.no_grad():
for b_t, batch_t in enumerate(val_loader):
x_test, label_test = batch_t
x_test, label_test = x_test.to(device), label_test.double().to(device)
optimizer.zero_grad()
output_test = client(x_test, drop=False)
client_output_test = output_test.clone().detach().requires_grad_(True)
if autoencoder:
client_output_test = encode(client_output_test)
msg = {'client_output_val/test': client_output_test,
'label_val/test': label_test,
}
if detailed_output:
print("The msg is:", msg)
send_msg(s, 1, msg)
if detailed_output:
print("294: send_msg success!")
msg = recieve_msg(s)
if detailed_output:
print("296: recieve_msg success!")
correct_test_add = msg["correct_val/test"]
test_loss = msg["val/test_loss"]
output_test_server = msg["output_val/test_server"]
loss_test += test_loss
correct_test += correct_test_add
total_test_add = len(label_test)
total_test += total_test_add
total_test_nr += 1
output_test_server = torch.round(output_test_server)
hamming_epoch += Metrics.Accuracy(label_test.detach().clone().cpu(), output_test_server.detach().clone().cpu())
#accuracy_score(label_test.detach().clone().cpu(),
#torch.round(output_test_server).detach().clone().cpu())
precision_epoch += precision_score(label_test.detach().clone().cpu(),
output_test_server.detach().clone().cpu(),
average='micro')
# recall_epoch += Plots.Recall(label_train.detach().clone().cpu(), output.detach().clone().cpu()).item()
recall_epoch += recall_score(label_test.detach().clone().cpu(),
output_test_server.detach().clone().cpu(),
average='micro')
# f1_epoch += Plots.F1Measure(label_train.detach().clone().cpu(), output.detach().clone().cpu()).item()
f1_epoch += f1_score(label_test.detach().clone().cpu(),
output_test_server.detach().clone().cpu(),
average='micro')
status_test = "test: hamming_epoch: {:.4f}, precision_epoch: {:.4f}, recall_epoch: {:.4f}, f1_epoch: {:.4f}".format(
hamming_epoch / total_test_nr, precision_epoch / total_test_nr, recall_epoch / total_test_nr,
f1_epoch / total_test_nr)
print("status_test: ", status_test)
global data_send_per_epoch_total
global data_recieved_per_epoch_total
global batches_abort_rate_total
data_transfer_per_epoch = 0
average_dismissal_rate = 0
total_flops_forward = 0
total_flops_encoder = 0
total_flops_backprob = 0
for data in data_send_per_epoch_total:
data_transfer_per_epoch += data
for data in data_recieved_per_epoch_total:
data_transfer_per_epoch += data
for data in batches_abort_rate_total:
average_dismissal_rate += data
for flop in flops_client_forward_total:
total_flops_forward += flop
for flop in flops_client_encoder_total:
total_flops_encoder += flop
for flop in flops_client_backprop_total:
total_flops_backprob += flop
total_flops = total_flops_backprob + total_flops_encoder + total_flops_forward
print("total FLOPs forward: ", total_flops_forward)
print("total FLOPs encoder: ", total_flops_encoder)
print("total FLOPs backprob: ", total_flops_backprob)
print("total FLOPs client: ", total_flops)
print("Average data transfer/epoch: ", data_transfer_per_epoch / epoch / 1000000, " MB")
print("Average dismissal rate: ", average_dismissal_rate / epoch)
wandb.config.update({"Average data transfer/epoch (MB): ": data_transfer_per_epoch / epoch / 1000000,
"Average dismissal rate: ": average_dismissal_rate / epoch,
"total_MegaFLOPS_forward": total_flops_forward/1000000, "total_MegaFLOPS_encoder": total_flops_encoder/1000000,
"total_MegaFLOPS_backprob": total_flops_backprob/1000000, "total_MegaFLOPS": total_flops/1000000})
msg = 0
send_msg(s, 3, msg)
def initialize_model(s, msg):
"""
if new connected client is not the first connected client,
the initial weights are fetched from the server
:param conn:
"""
#msg = recieve_msg(s)
if msg == 0:
#print("msg == 0")
pass
else:
print("msg != 0")
client.load_state_dict(msg, strict=False)
print("model successfully initialized")
#print("start_training")
# start_training(s)
#train_epoch(s)
def initIID():
global X_train, X_val, y_val, y_train, y_test, X_test
sampling_frequency = 100
datafolder = ptb_path
task = 'superdiagnostic'
outputfolder = mlb_path
# Load PTB-XL data
data, raw_labels = utils.load_dataset(datafolder, sampling_frequency)
# Preprocess label data
labels = utils.compute_label_aggregations(raw_labels, datafolder, task)
# Select relevant data and convert to one-hot
data, labels, Y, _ = utils.select_data(data, labels, task, min_samples=0, outputfolder=outputfolder)
input_shape = data[0].shape
print(input_shape)
# 1-9 for training
X_train = data[labels.strat_fold < 10]
y_train = Y[labels.strat_fold < 10]
# 10 for validation
X_val = data[labels.strat_fold == 10]
y_val = Y[labels.strat_fold == 10]
# X_test = data[labels.strat_fold == 10]
# y_test = Y[labels.strat_fold == 10]
num_classes = 5 # <=== number of classes in the finetuning dataset
input_shape = [1000, 12] # <=== shape of samples, [None, 12] in case of different lengths
print(X_train.shape, y_train.shape, X_val.shape, y_val.shape) # , X_test.shape, y_test.shape)
import pickle
standard_scaler = pickle.load(open(scaler_path, "rb"))
X_train = utils.apply_standardizer(X_train, standard_scaler)
X_val = utils.apply_standardizer(X_val, standard_scaler)
global X_raw, y_raw
X_raw = X_train
y_raw = y_train
def init_nonIID():
global X_train, X_val, y_val, y_train, y_test, X_test
norm, mi, sttc, hyp, cd = [],[],[],[],[]
for a in range(len(y_train)):
if label_class(y_train[a], 0):
sttc.append(X_train[a])
if label_class(y_train[a], 1):
hyp.append(X_train[a])
if label_class(y_train[a], 2):
mi.append(X_train[a])
if label_class(y_train[a], 3):
norm.append(X_train[a])
if label_class(y_train[a], 4):
cd.append(X_train[a])
"""
print("norm shape: ", len(norm))
print("mi shape: ", len(mi))
print("sttc shape: ", len(sttc))
print("hyp shape: ", len(hyp))
print("cd shape: ", len(cd))
print("norm label: ", label_norm[0])
print("mi label: ", label_mi[0])
print("sttc label: ", label_sttc[0])
print("hyp label: ", label_hyp[0])
print("cd label: ", label_cd[0])
print("norm label: ", len(label_norm))
print("mi label: ", len(label_mi))
print("sttc label: ", len(label_sttc))
print("hyp label: ", len(label_hyp))
print("cd label: ", len(label_cd))
"""
if client_num == 1:
if num_classes == 1:
print("Client number: ", client_num, " Class norm")
X_train = norm
y_train = label_norm
if num_classes == 2:
print("Client number: ", client_num, " Class norm, mi")
X_train = np.concatenate((norm, mi), axis=0)
y_train = np.concatenate((label_norm, label_mi), axis=0)
if num_classes == 3:
print("Client number: ", client_num, " Class norm, mi, sttc")
X_train = np.concatenate((norm, mi), axis=0)
X_train = np.concatenate((X_train, sttc), axis=0)
y_train = np.concatenate((label_norm, label_mi), axis=0)
y_train = np.concatenate((y_train, label_sttc), axis=0)
if client_num == 2:
if num_classes == 1:
print("Client number: ", client_num, " Class mi")
X_train = mi
y_train = label_mi
if num_classes == 2:
print("Client number: ", client_num, " Class mi, sttc")
X_train = np.concatenate((mi, sttc), axis=0)
y_train = np.concatenate((label_mi, label_sttc), axis=0)
if num_classes == 3:
print("Client number: ", client_num, " Class mi, sttc, hyp")
X_train = np.concatenate((mi, sttc), axis=0)
X_train = np.concatenate((X_train, hyp), axis=0)
y_train = np.concatenate((label_mi, label_sttc), axis=0)
y_train = np.concatenate((y_train, label_hyp), axis=0)
if client_num == 3:
if num_classes == 1:
print("Client number: ", client_num, " Class sttc")
X_train = sttc
y_train = label_sttc
if num_classes == 2:
print("Client number: ", client_num, " Class sttc, hyp")
X_train = np.concatenate((sttc, hyp), axis=0)
y_train = np.concatenate((label_sttc, label_hyp), axis=0)
if num_classes == 3:
print("Client number: ", client_num, " Class sttc, hyp, cd")
X_train = np.concatenate((sttc, hyp), axis=0)
X_train = np.concatenate((X_train, cd), axis=0)
y_train = np.concatenate((label_sttc, label_hyp), axis=0)
y_train = np.concatenate((y_train, label_cd), axis=0)
if client_num == 4:
if num_classes == 1:
print("Client number: ", client_num, " Class hyp")
X_train = hyp
y_train = label_hyp
if num_classes == 2:
print("Client number: ", client_num, " Class hyp, cd")
X_train = np.concatenate((hyp, cd), axis=0)
y_train = np.concatenate((label_hyp, label_cd), axis=0)
if num_classes == 3:
print("Client number: ", client_num, " Class hyp, cd, norm")
X_train = np.concatenate((hyp, cd), axis=0)
X_train = np.concatenate((X_train, norm), axis=0)
y_train = np.concatenate((label_hyp, label_cd), axis=0)
y_train = np.concatenate((y_train, label_norm), axis=0)
if client_num == 5:
if num_classes == 1:
print("Client number: ", client_num, " Class cd")
X_train = cd
y_train = label_cd
if num_classes == 2:
print("Client number: ", client_num, " Class cd, norm")
X_train = np.concatenate((cd, norm), axis=0)
y_train = np.concatenate((label_cd, label_norm), axis=0)
if num_classes == 3:
print("Client number: ", client_num, " Class cd, norm, mi")
X_train = np.concatenate((cd, norm), axis=0)
X_train = np.concatenate((X_train, mi), axis=0)
y_train = np.concatenate((label_cd, label_norm), axis=0)
y_train = np.concatenate((y_train, label_mi), axis=0)
def label_class(label, clas):
if clas == 0:
if label[0] == 1:
label_sttc.append(label)
return True
if clas == 1:
if label[1] == 1:
label_hyp.append(label)
return True
if clas == 2:
if label[2] == 1:
label_mi.append(label)
return True
if clas == 3:
if label[3] == 1:
label_norm.append(label)
return True
if clas == 4:
if label[4] == 1:
label_cd.append(label)
return True
def main():
"""
initialize device, client model, optimizer, loss and decoder and starts the training process
"""
global label_sttc, label_hyp, label_mi, label_norm, label_cd
label_sttc, label_hyp, label_mi, label_norm, label_cd = [],[],[],[],[]
global X_train, X_val, y_val, y_train, y_test, X_test
initIID()
init_nonIID()
print_json()
if count_flops:
# Starts internal FLOPs counter | If there is an Error: See "from pypapi import events"
high.start_counters([events.PAPI_FP_OPS,])
global flops_client_forward_total, flops_client_encoder_total, flops_client_backprop_total
flops_client_forward_total, flops_client_encoder_total, flops_client_backprop_total = [], [], []
#X_test = utils.apply_standardizer(X_test, standard_scaler)
init()
if plots: #visualize data
Metrics.load_dataset()
Metrics.plotten()
Metrics.ecg_signals()
global epoch
epoch = 0
global encoder_grad_server
encoder_grad_server = 0
global data_send_per_epoch_total
data_send_per_epoch_total = []
global data_recieved_per_epoch_total
data_recieved_per_epoch_total = []
global batches_abort_rate_total
batches_abort_rate_total = []
global device
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
if (torch.cuda.is_available()):
print("training on gpu")
print("training on,", device)
seed = 0
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
global client
client = Models.Client()
print("Start Client")
client.double().to(device)
global optimizer
#optimizer = SGD(client.parameters(), lr=lr, momentum=0.9)
optimizer = AdamW(client.parameters(), lr=lr)
print("Start Optimizer")
global error
#error = nn.CrossEntropyLoss()
error = nn.BCELoss()
print("Start loss calcu")
global data_send_per_epoch
global data_recieved_per_epoch
data_send_per_epoch = 0
data_recieved_per_epoch = 0
#global scaler
#scaler = MinMaxScaler()
if autoencoder:
global encode
encode = Models.Encode()
print("Start Encoder")
if autoencoder_train == 0:
encode.load_state_dict(torch.load("./convencoder_medical.pth")) # CPU
print("Encoder model loaded")
encode.eval()
print("Start eval")
encode.double().to(device)
global optimizerencode
optimizerencode = Adam(encode.parameters(), lr=lr) ###
if grad_encode:
global grad_decoder
grad_decoder = Models.Grad_Decoder()
#grad_decoder.load_state_dict(torch.load("./grad_decoder_medical.pth"))
grad_decoder.double().to(device)
print("Grad decoder model loaded")
global optimizer_grad_decoder
optimizer_grad_decoder = Adam(grad_decoder.parameters(), lr=0.0001)
global error_grad_autoencoder
error_grad_autoencoder = nn.MSELoss()
s = socket.socket()
print("Start socket connect")
s.connect((host, port))
print("Socket connect success, to.", host, port)
if pretrain_this_client:
print("Pretrain active")
for a in range(pretrain_epochs):
train_epoch(s, pretraining=1)
val_stage(s, pretraining=1)
initial_weights = client.state_dict()
send_msg(s, 2, initial_weights)
send_msg(s, 3, 0)
epoch = 0
#initialize_model(s)
serverHandler(s)
if __name__ == '__main__':
main()
| 39.359206
| 213
| 0.621555
|
97b9a1329057768c4fde768d1a3502f73041fd46
| 192
|
py
|
Python
|
Chapter 8/hail2.py
|
PacktPublishing/Mastering-IPython-4
|
d752f7ba38e0c9399a83d57da406fe26152f272b
|
[
"MIT"
] | 22
|
2016-06-07T07:52:35.000Z
|
2021-11-08T13:12:21.000Z
|
Chapter 8/hail2.py
|
PacktPublishing/Mastering-IPython-4
|
d752f7ba38e0c9399a83d57da406fe26152f272b
|
[
"MIT"
] | 2
|
2016-05-23T08:20:54.000Z
|
2018-07-02T08:21:32.000Z
|
Chapter 8/hail2.py
|
PacktPublishing/Mastering-IPython-4
|
d752f7ba38e0c9399a83d57da406fe26152f272b
|
[
"MIT"
] | 27
|
2016-05-23T08:19:51.000Z
|
2021-08-31T02:46:00.000Z
|
def f(n):
curr = n
tmp = 0
while curr != 1:
tmp = tmp + 1
if curr % 2 == 1:
curr = 3 * curr + 1
else:
curr = curr/2
return tmp
| 16
| 31
| 0.369792
|
32e324c82d84bc04e7a8035bc2b11e130e7ffb8a
| 343
|
py
|
Python
|
SRC/Chapter_07-Using-A-Database/02_DB_conn.py
|
archeranimesh/HeadFirstPython
|
8e0c84871328a6bf3a8d723341be56298440f29b
|
[
"MIT"
] | 1
|
2020-12-26T19:37:14.000Z
|
2020-12-26T19:37:14.000Z
|
SRC/Chapter_07-Using-A-Database/02_DB_conn.py
|
archeranimesh/HeadFirstPython
|
8e0c84871328a6bf3a8d723341be56298440f29b
|
[
"MIT"
] | null | null | null |
SRC/Chapter_07-Using-A-Database/02_DB_conn.py
|
archeranimesh/HeadFirstPython
|
8e0c84871328a6bf3a8d723341be56298440f29b
|
[
"MIT"
] | null | null | null |
import mysql.connector
dbconfig = {
"host": "127.0.0.1",
"user": "vsearch",
"password": "hello",
"database": "vsearchlogDB",
}
conn = mysql.connector.connect(**dbconfig)
cursor = conn.cursor()
_SQL = """describe log"""
cursor.execute(_SQL)
res = cursor.fetchall()
for row in res:
print(row)
cursor.close()
conn.close()
| 16.333333
| 42
| 0.638484
|
7c65f161dff86f5a66f0ccf7802dc3af6662b40d
| 70,148
|
py
|
Python
|
tests/test_connector.py
|
fcomuniz/aiohttp
|
5f03869652a02b373b9078dcc6e00be60ffa9101
|
[
"Apache-2.0"
] | null | null | null |
tests/test_connector.py
|
fcomuniz/aiohttp
|
5f03869652a02b373b9078dcc6e00be60ffa9101
|
[
"Apache-2.0"
] | null | null | null |
tests/test_connector.py
|
fcomuniz/aiohttp
|
5f03869652a02b373b9078dcc6e00be60ffa9101
|
[
"Apache-2.0"
] | null | null | null |
# type: ignore
# Tests of http client with custom Connector
import asyncio
import gc
import hashlib
import platform
import socket
import ssl
import sys
import uuid
from collections import deque
from typing import Any, Optional
from unittest import mock
import pytest
from conftest import needs_unix
from yarl import URL
import aiohttp
from aiohttp import client, web
from aiohttp.client import ClientRequest, ClientTimeout
from aiohttp.client_reqrep import ConnectionKey
from aiohttp.connector import Connection, DNSCacheTable, TCPConnector
from aiohttp.locks import EventResultOrError
from aiohttp.test_utils import make_mocked_coro, unused_port
from aiohttp.tracing import Trace
@pytest.fixture()
def key():
# Connection key
return ConnectionKey("localhost", 80, False, None, None, None, None)
@pytest.fixture
def key2():
# Connection key
return ConnectionKey("localhost", 80, False, None, None, None, None)
@pytest.fixture
def ssl_key():
# Connection key
return ConnectionKey("localhost", 80, True, None, None, None, None)
@pytest.fixture
def unix_server(loop: Any, unix_sockname: Any) -> None:
runners = []
async def go(app):
runner = web.AppRunner(app)
runners.append(runner)
await runner.setup()
site = web.UnixSite(runner, unix_sockname)
await site.start()
yield go
for runner in runners:
loop.run_until_complete(runner.cleanup())
@pytest.fixture
def named_pipe_server(proactor_loop: Any, pipe_name: Any) -> None:
runners = []
async def go(app):
runner = web.AppRunner(app)
runners.append(runner)
await runner.setup()
site = web.NamedPipeSite(runner, pipe_name)
await site.start()
yield go
for runner in runners:
proactor_loop.run_until_complete(runner.cleanup())
def create_mocked_conn(conn_closing_result: Optional[Any] = None, **kwargs: Any):
assert "loop" not in kwargs
loop = asyncio.get_event_loop()
proto = mock.Mock(**kwargs)
proto.closed = loop.create_future()
proto.closed.set_result(conn_closing_result)
return proto
def test_connection_del(loop: Any) -> None:
connector = mock.Mock()
key = mock.Mock()
protocol = mock.Mock()
loop.set_debug(0)
conn = Connection(connector, key, protocol, loop=loop)
exc_handler = mock.Mock()
loop.set_exception_handler(exc_handler)
with pytest.warns(ResourceWarning):
del conn
gc.collect()
connector._release.assert_called_with(key, protocol, should_close=True)
msg = {
"message": mock.ANY,
"client_connection": mock.ANY,
}
exc_handler.assert_called_with(loop, msg)
def test_connection_del_loop_debug(loop: Any) -> None:
connector = mock.Mock()
key = mock.Mock()
protocol = mock.Mock()
loop.set_debug(1)
conn = Connection(connector, key, protocol, loop=loop)
exc_handler = mock.Mock()
loop.set_exception_handler(exc_handler)
with pytest.warns(ResourceWarning):
del conn
gc.collect()
msg = {
"message": mock.ANY,
"client_connection": mock.ANY,
"source_traceback": mock.ANY,
}
exc_handler.assert_called_with(loop, msg)
def test_connection_del_loop_closed(loop: Any) -> None:
connector = mock.Mock()
key = mock.Mock()
protocol = mock.Mock()
loop.set_debug(1)
conn = Connection(connector, key, protocol, loop=loop)
exc_handler = mock.Mock()
loop.set_exception_handler(exc_handler)
loop.close()
with pytest.warns(ResourceWarning):
del conn
gc.collect()
assert not connector._release.called
assert not exc_handler.called
async def test_del(loop: Any) -> None:
conn = aiohttp.BaseConnector()
proto = create_mocked_conn(loop, should_close=False)
conn._release("a", proto)
conns_impl = conn._conns
exc_handler = mock.Mock()
loop.set_exception_handler(exc_handler)
with pytest.warns(ResourceWarning):
del conn
gc.collect()
assert not conns_impl
proto.close.assert_called_with()
msg = {
"connector": mock.ANY, # conn was deleted
"connections": mock.ANY,
"message": "Unclosed connector",
}
if loop.get_debug():
msg["source_traceback"] = mock.ANY
exc_handler.assert_called_with(loop, msg)
@pytest.mark.xfail
async def test_del_with_scheduled_cleanup(loop: Any) -> None:
loop.set_debug(True)
conn = aiohttp.BaseConnector(keepalive_timeout=0.01)
transp = create_mocked_conn(loop)
conn._conns["a"] = [(transp, 123)]
conns_impl = conn._conns
exc_handler = mock.Mock()
loop.set_exception_handler(exc_handler)
with pytest.warns(ResourceWarning):
# obviously doesn't deletion because loop has a strong
# reference to connector's instance method, isn't it?
del conn
await asyncio.sleep(0.01)
gc.collect()
assert not conns_impl
transp.close.assert_called_with()
msg = {"connector": mock.ANY, "message": "Unclosed connector"} # conn was deleted
if loop.get_debug():
msg["source_traceback"] = mock.ANY
exc_handler.assert_called_with(loop, msg)
@pytest.mark.skipif(
sys.implementation.name != "cpython", reason="CPython GC is required for the test"
)
def test_del_with_closed_loop(loop: Any) -> None:
async def make_conn():
return aiohttp.BaseConnector()
conn = loop.run_until_complete(make_conn())
transp = create_mocked_conn(loop)
conn._conns["a"] = [(transp, 123)]
conns_impl = conn._conns
exc_handler = mock.Mock()
loop.set_exception_handler(exc_handler)
loop.close()
with pytest.warns(ResourceWarning):
del conn
gc.collect()
assert not conns_impl
assert not transp.close.called
assert exc_handler.called
async def test_del_empty_connector(loop: Any) -> None:
conn = aiohttp.BaseConnector()
exc_handler = mock.Mock()
loop.set_exception_handler(exc_handler)
del conn
assert not exc_handler.called
async def test_create_conn() -> None:
conn = aiohttp.BaseConnector()
with pytest.raises(NotImplementedError):
await conn._create_connection(object(), [], object())
async def test_async_context_manager(loop: Any) -> None:
conn = aiohttp.BaseConnector()
async with conn as c:
assert conn is c
assert conn.closed
async def test_close() -> None:
proto = create_mocked_conn()
conn = aiohttp.BaseConnector()
assert not conn.closed
conn._conns[("host", 8080, False)] = [(proto, object())]
await conn.close()
assert not conn._conns
assert proto.close.called
assert conn.closed
async def test_get(loop: Any) -> None:
conn = aiohttp.BaseConnector()
assert conn._get(1) is None
proto = create_mocked_conn(loop)
conn._conns[1] = [(proto, loop.time())]
assert conn._get(1) == proto
await conn.close()
async def test_get_unconnected_proto(loop: Any) -> None:
conn = aiohttp.BaseConnector()
key = ConnectionKey("localhost", 80, False, None, None, None, None)
assert conn._get(key) is None
proto = create_mocked_conn(loop)
conn._conns[key] = [(proto, loop.time())]
assert conn._get(key) == proto
assert conn._get(key) is None
conn._conns[key] = [(proto, loop.time())]
proto.is_connected = lambda *args: False
assert conn._get(key) is None
await conn.close()
async def test_get_unconnected_proto_ssl(loop: Any) -> None:
conn = aiohttp.BaseConnector()
key = ConnectionKey("localhost", 80, True, None, None, None, None)
assert conn._get(key) is None
proto = create_mocked_conn(loop)
conn._conns[key] = [(proto, loop.time())]
assert conn._get(key) == proto
assert conn._get(key) is None
conn._conns[key] = [(proto, loop.time())]
proto.is_connected = lambda *args: False
assert conn._get(key) is None
await conn.close()
async def test_get_expired(loop: Any) -> None:
conn = aiohttp.BaseConnector()
key = ConnectionKey("localhost", 80, False, None, None, None, None)
assert conn._get(key) is None
proto = create_mocked_conn(loop)
conn._conns[key] = [(proto, loop.time() - 1000)]
assert conn._get(key) is None
assert not conn._conns
await conn.close()
async def test_get_expired_ssl(loop: Any) -> None:
conn = aiohttp.BaseConnector(enable_cleanup_closed=True)
key = ConnectionKey("localhost", 80, True, None, None, None, None)
assert conn._get(key) is None
proto = create_mocked_conn(loop)
transport = proto.transport
conn._conns[key] = [(proto, loop.time() - 1000)]
assert conn._get(key) is None
assert not conn._conns
assert conn._cleanup_closed_transports == [transport]
await conn.close()
async def test_release_acquired(key: Any) -> None:
proto = create_mocked_conn()
conn = aiohttp.BaseConnector(limit=5)
conn._release_waiter = mock.Mock()
conn._acquired.add(proto)
conn._acquired_per_host[key].add(proto)
conn._release_acquired(key, proto)
assert 0 == len(conn._acquired)
assert 0 == len(conn._acquired_per_host)
assert conn._release_waiter.called
conn._release_acquired(key, proto)
assert 0 == len(conn._acquired)
assert 0 == len(conn._acquired_per_host)
await conn.close()
async def test_release_acquired_closed(key: Any) -> None:
proto = create_mocked_conn()
conn = aiohttp.BaseConnector(limit=5)
conn._release_waiter = mock.Mock()
conn._acquired.add(proto)
conn._acquired_per_host[key].add(proto)
conn._closed = True
conn._release_acquired(key, proto)
assert 1 == len(conn._acquired)
assert 1 == len(conn._acquired_per_host[key])
assert not conn._release_waiter.called
await conn.close()
async def test_release(loop: Any, key: Any) -> None:
conn = aiohttp.BaseConnector()
conn._release_waiter = mock.Mock()
proto = create_mocked_conn(loop, should_close=False)
conn._acquired.add(proto)
conn._acquired_per_host[key].add(proto)
conn._release(key, proto)
assert conn._release_waiter.called
assert conn._cleanup_handle is not None
assert conn._conns[key][0][0] == proto
assert conn._conns[key][0][1] == pytest.approx(loop.time(), abs=0.1)
assert not conn._cleanup_closed_transports
await conn.close()
async def test_release_ssl_transport(loop: Any, ssl_key: Any) -> None:
conn = aiohttp.BaseConnector(enable_cleanup_closed=True)
conn._release_waiter = mock.Mock()
proto = create_mocked_conn(loop)
transport = proto.transport
conn._acquired.add(proto)
conn._acquired_per_host[ssl_key].add(proto)
conn._release(ssl_key, proto, should_close=True)
assert conn._cleanup_closed_transports == [transport]
await conn.close()
async def test_release_already_closed() -> None:
conn = aiohttp.BaseConnector()
proto = create_mocked_conn()
key = 1
conn._acquired.add(proto)
await conn.close()
conn._release_waiters = mock.Mock()
conn._release_acquired = mock.Mock()
conn._release(key, proto)
assert not conn._release_waiters.called
assert not conn._release_acquired.called
async def test_release_waiter_no_limit(loop: Any, key: Any, key2: Any) -> None:
# limit is 0
conn = aiohttp.BaseConnector(limit=0)
w = mock.Mock()
w.done.return_value = False
conn._waiters[key].append(w)
conn._release_waiter()
assert len(conn._waiters[key]) == 0
assert w.done.called
await conn.close()
async def test_release_waiter_first_available(loop: Any, key: Any, key2: Any) -> None:
conn = aiohttp.BaseConnector()
w1, w2 = mock.Mock(), mock.Mock()
w1.done.return_value = False
w2.done.return_value = False
conn._waiters[key].append(w2)
conn._waiters[key2].append(w1)
conn._release_waiter()
assert (
w1.set_result.called
and not w2.set_result.called
or not w1.set_result.called
and w2.set_result.called
)
await conn.close()
async def test_release_waiter_release_first(loop: Any, key: Any, key2: Any) -> None:
conn = aiohttp.BaseConnector(limit=1)
w1, w2 = mock.Mock(), mock.Mock()
w1.done.return_value = False
w2.done.return_value = False
conn._waiters[key] = deque([w1, w2])
conn._release_waiter()
assert w1.set_result.called
assert not w2.set_result.called
await conn.close()
async def test_release_waiter_skip_done_waiter(loop: Any, key: Any, key2: Any) -> None:
conn = aiohttp.BaseConnector(limit=1)
w1, w2 = mock.Mock(), mock.Mock()
w1.done.return_value = True
w2.done.return_value = False
conn._waiters[key] = deque([w1, w2])
conn._release_waiter()
assert not w1.set_result.called
assert w2.set_result.called
await conn.close()
async def test_release_waiter_per_host(loop: Any, key: Any, key2: Any) -> None:
# no limit
conn = aiohttp.BaseConnector(limit=0, limit_per_host=2)
w1, w2 = mock.Mock(), mock.Mock()
w1.done.return_value = False
w2.done.return_value = False
conn._waiters[key] = deque([w1])
conn._waiters[key2] = deque([w2])
conn._release_waiter()
assert (w1.set_result.called and not w2.set_result.called) or (
not w1.set_result.called and w2.set_result.called
)
await conn.close()
async def test_release_waiter_no_available(loop: Any, key: Any, key2: Any) -> None:
# limit is 0
conn = aiohttp.BaseConnector(limit=0)
w = mock.Mock()
w.done.return_value = False
conn._waiters[key].append(w)
conn._available_connections = mock.Mock(return_value=0)
conn._release_waiter()
assert len(conn._waiters) == 1
assert not w.done.called
await conn.close()
async def test_release_close(key: Any) -> None:
conn = aiohttp.BaseConnector()
proto = create_mocked_conn(should_close=True)
conn._acquired.add(proto)
conn._release(key, proto)
assert not conn._conns
assert proto.close.called
async def test__drop_acquire_per_host1(loop: Any) -> None:
conn = aiohttp.BaseConnector()
conn._drop_acquired_per_host(123, 456)
assert len(conn._acquired_per_host) == 0
async def test__drop_acquire_per_host2(loop: Any) -> None:
conn = aiohttp.BaseConnector()
conn._acquired_per_host[123].add(456)
conn._drop_acquired_per_host(123, 456)
assert len(conn._acquired_per_host) == 0
async def test__drop_acquire_per_host3(loop: Any) -> None:
conn = aiohttp.BaseConnector()
conn._acquired_per_host[123].add(456)
conn._acquired_per_host[123].add(789)
conn._drop_acquired_per_host(123, 456)
assert len(conn._acquired_per_host) == 1
assert conn._acquired_per_host[123] == {789}
async def test_tcp_connector_certificate_error(loop: Any) -> None:
req = ClientRequest("GET", URL("https://127.0.0.1:443"), loop=loop)
async def certificate_error(*args, **kwargs):
raise ssl.CertificateError
conn = aiohttp.TCPConnector()
conn._loop.create_connection = certificate_error
with pytest.raises(aiohttp.ClientConnectorCertificateError) as ctx:
await conn.connect(req, [], ClientTimeout())
assert isinstance(ctx.value, ssl.CertificateError)
assert isinstance(ctx.value.certificate_error, ssl.CertificateError)
assert isinstance(ctx.value, aiohttp.ClientSSLError)
async def test_tcp_connector_multiple_hosts_errors(loop: Any) -> None:
conn = aiohttp.TCPConnector()
ip1 = "192.168.1.1"
ip2 = "192.168.1.2"
ip3 = "192.168.1.3"
ip4 = "192.168.1.4"
ip5 = "192.168.1.5"
ips = [ip1, ip2, ip3, ip4, ip5]
ips_tried = []
fingerprint = hashlib.sha256(b"foo").digest()
req = ClientRequest(
"GET",
URL("https://mocked.host"),
ssl=aiohttp.Fingerprint(fingerprint),
loop=loop,
)
async def _resolve_host(host, port, traces=None):
return [
{
"hostname": host,
"host": ip,
"port": port,
"family": socket.AF_INET,
"proto": 0,
"flags": socket.AI_NUMERICHOST,
}
for ip in ips
]
conn._resolve_host = _resolve_host
os_error = certificate_error = ssl_error = fingerprint_error = False
connected = False
async def create_connection(*args, **kwargs):
nonlocal os_error, certificate_error, ssl_error, fingerprint_error
nonlocal connected
ip = args[1]
ips_tried.append(ip)
if ip == ip1:
os_error = True
raise OSError
if ip == ip2:
certificate_error = True
raise ssl.CertificateError
if ip == ip3:
ssl_error = True
raise ssl.SSLError
if ip == ip4:
fingerprint_error = True
tr = create_mocked_conn(loop)
pr = create_mocked_conn(loop)
def get_extra_info(param):
if param == "sslcontext":
return True
if param == "ssl_object":
s = create_mocked_conn(loop)
s.getpeercert.return_value = b"not foo"
return s
if param == "peername":
return ("192.168.1.5", 12345)
assert False, param
tr.get_extra_info = get_extra_info
return tr, pr
if ip == ip5:
connected = True
tr = create_mocked_conn(loop)
pr = create_mocked_conn(loop)
def get_extra_info(param):
if param == "sslcontext":
return True
if param == "ssl_object":
s = create_mocked_conn(loop)
s.getpeercert.return_value = b"foo"
return s
assert False
tr.get_extra_info = get_extra_info
return tr, pr
assert False
conn._loop.create_connection = create_connection
established_connection = await conn.connect(req, [], ClientTimeout())
assert ips == ips_tried
assert os_error
assert certificate_error
assert ssl_error
assert fingerprint_error
assert connected
established_connection.close()
async def test_tcp_connector_resolve_host(loop: Any) -> None:
conn = aiohttp.TCPConnector(use_dns_cache=True)
res = await conn._resolve_host("localhost", 8080)
assert res
for rec in res:
if rec["family"] == socket.AF_INET:
assert rec["host"] == "127.0.0.1"
assert rec["hostname"] == "localhost"
assert rec["port"] == 8080
elif rec["family"] == socket.AF_INET6:
assert rec["hostname"] == "localhost"
assert rec["port"] == 8080
if platform.system() == "Darwin":
assert rec["host"] in ("::1", "fe80::1", "fe80::1%lo0")
else:
assert rec["host"] == "::1"
@pytest.fixture
def dns_response(loop: Any):
async def coro():
# simulates a network operation
await asyncio.sleep(0)
return ["127.0.0.1"]
return coro
async def test_tcp_connector_dns_cache_not_expired(
loop: Any, dns_response: Any
) -> None:
with mock.patch("aiohttp.connector.DefaultResolver") as m_resolver:
conn = aiohttp.TCPConnector(use_dns_cache=True, ttl_dns_cache=10)
m_resolver().resolve.return_value = dns_response()
await conn._resolve_host("localhost", 8080)
await conn._resolve_host("localhost", 8080)
m_resolver().resolve.assert_called_once_with("localhost", 8080, family=0)
async def test_tcp_connector_dns_cache_forever(loop: Any, dns_response: Any) -> None:
with mock.patch("aiohttp.connector.DefaultResolver") as m_resolver:
conn = aiohttp.TCPConnector(use_dns_cache=True, ttl_dns_cache=10)
m_resolver().resolve.return_value = dns_response()
await conn._resolve_host("localhost", 8080)
await conn._resolve_host("localhost", 8080)
m_resolver().resolve.assert_called_once_with("localhost", 8080, family=0)
async def test_tcp_connector_use_dns_cache_disabled(
loop: Any, dns_response: Any
) -> None:
with mock.patch("aiohttp.connector.DefaultResolver") as m_resolver:
conn = aiohttp.TCPConnector(use_dns_cache=False)
m_resolver().resolve.side_effect = [dns_response(), dns_response()]
await conn._resolve_host("localhost", 8080)
await conn._resolve_host("localhost", 8080)
m_resolver().resolve.assert_has_calls(
[
mock.call("localhost", 8080, family=0),
mock.call("localhost", 8080, family=0),
]
)
async def test_tcp_connector_dns_throttle_requests(
loop: Any, dns_response: Any
) -> None:
with mock.patch("aiohttp.connector.DefaultResolver") as m_resolver:
conn = aiohttp.TCPConnector(use_dns_cache=True, ttl_dns_cache=10)
m_resolver().resolve.return_value = dns_response()
loop.create_task(conn._resolve_host("localhost", 8080))
loop.create_task(conn._resolve_host("localhost", 8080))
await asyncio.sleep(0)
m_resolver().resolve.assert_called_once_with("localhost", 8080, family=0)
async def test_tcp_connector_dns_throttle_requests_exception_spread(loop: Any) -> None:
with mock.patch("aiohttp.connector.DefaultResolver") as m_resolver:
conn = aiohttp.TCPConnector(use_dns_cache=True, ttl_dns_cache=10)
e = Exception()
m_resolver().resolve.side_effect = e
r1 = loop.create_task(conn._resolve_host("localhost", 8080))
r2 = loop.create_task(conn._resolve_host("localhost", 8080))
await asyncio.sleep(0)
assert r1.exception() == e
assert r2.exception() == e
async def test_tcp_connector_dns_throttle_requests_cancelled_when_close(
loop: Any, dns_response: Any
) -> None:
with mock.patch("aiohttp.connector.DefaultResolver") as m_resolver:
conn = aiohttp.TCPConnector(use_dns_cache=True, ttl_dns_cache=10)
m_resolver().resolve.return_value = dns_response()
loop.create_task(conn._resolve_host("localhost", 8080))
f = loop.create_task(conn._resolve_host("localhost", 8080))
await asyncio.sleep(0)
await conn.close()
with pytest.raises(asyncio.CancelledError):
await f
@pytest.fixture
def dns_response_error(loop: Any):
async def coro():
# simulates a network operation
await asyncio.sleep(0)
raise socket.gaierror(-3, "Temporary failure in name resolution")
return coro
async def test_tcp_connector_cancel_dns_error_captured(
loop: Any, dns_response_error: Any
) -> None:
exception_handler_called = False
def exception_handler(loop, context):
nonlocal exception_handler_called
exception_handler_called = True
loop.set_exception_handler(mock.Mock(side_effect=exception_handler))
with mock.patch("aiohttp.connector.DefaultResolver") as m_resolver:
req = ClientRequest(
method="GET", url=URL("http://temporary-failure:80"), loop=loop
)
conn = aiohttp.TCPConnector(
use_dns_cache=False,
)
m_resolver().resolve.return_value = dns_response_error()
f = loop.create_task(conn._create_direct_connection(req, [], ClientTimeout(0)))
await asyncio.sleep(0)
f.cancel()
with pytest.raises(asyncio.CancelledError):
await f
gc.collect()
assert exception_handler_called is False
async def test_tcp_connector_dns_tracing(loop: Any, dns_response: Any) -> None:
session = mock.Mock()
trace_config_ctx = mock.Mock()
on_dns_resolvehost_start = mock.Mock(side_effect=make_mocked_coro(mock.Mock()))
on_dns_resolvehost_end = mock.Mock(side_effect=make_mocked_coro(mock.Mock()))
on_dns_cache_hit = mock.Mock(side_effect=make_mocked_coro(mock.Mock()))
on_dns_cache_miss = mock.Mock(side_effect=make_mocked_coro(mock.Mock()))
trace_config = aiohttp.TraceConfig(
trace_config_ctx_factory=mock.Mock(return_value=trace_config_ctx)
)
trace_config.on_dns_resolvehost_start.append(on_dns_resolvehost_start)
trace_config.on_dns_resolvehost_end.append(on_dns_resolvehost_end)
trace_config.on_dns_cache_hit.append(on_dns_cache_hit)
trace_config.on_dns_cache_miss.append(on_dns_cache_miss)
trace_config.freeze()
traces = [Trace(session, trace_config, trace_config.trace_config_ctx())]
with mock.patch("aiohttp.connector.DefaultResolver") as m_resolver:
conn = aiohttp.TCPConnector(use_dns_cache=True, ttl_dns_cache=10)
m_resolver().resolve.return_value = dns_response()
await conn._resolve_host("localhost", 8080, traces=traces)
on_dns_resolvehost_start.assert_called_once_with(
session,
trace_config_ctx,
aiohttp.TraceDnsResolveHostStartParams("localhost"),
)
on_dns_resolvehost_end.assert_called_once_with(
session, trace_config_ctx, aiohttp.TraceDnsResolveHostEndParams("localhost")
)
on_dns_cache_miss.assert_called_once_with(
session, trace_config_ctx, aiohttp.TraceDnsCacheMissParams("localhost")
)
assert not on_dns_cache_hit.called
await conn._resolve_host("localhost", 8080, traces=traces)
on_dns_cache_hit.assert_called_once_with(
session, trace_config_ctx, aiohttp.TraceDnsCacheHitParams("localhost")
)
async def test_tcp_connector_dns_tracing_cache_disabled(
loop: Any, dns_response: Any
) -> None:
session = mock.Mock()
trace_config_ctx = mock.Mock()
on_dns_resolvehost_start = mock.Mock(side_effect=make_mocked_coro(mock.Mock()))
on_dns_resolvehost_end = mock.Mock(side_effect=make_mocked_coro(mock.Mock()))
trace_config = aiohttp.TraceConfig(
trace_config_ctx_factory=mock.Mock(return_value=trace_config_ctx)
)
trace_config.on_dns_resolvehost_start.append(on_dns_resolvehost_start)
trace_config.on_dns_resolvehost_end.append(on_dns_resolvehost_end)
trace_config.freeze()
traces = [Trace(session, trace_config, trace_config.trace_config_ctx())]
with mock.patch("aiohttp.connector.DefaultResolver") as m_resolver:
conn = aiohttp.TCPConnector(use_dns_cache=False)
m_resolver().resolve.side_effect = [dns_response(), dns_response()]
await conn._resolve_host("localhost", 8080, traces=traces)
await conn._resolve_host("localhost", 8080, traces=traces)
on_dns_resolvehost_start.assert_has_calls(
[
mock.call(
session,
trace_config_ctx,
aiohttp.TraceDnsResolveHostStartParams("localhost"),
),
mock.call(
session,
trace_config_ctx,
aiohttp.TraceDnsResolveHostStartParams("localhost"),
),
]
)
on_dns_resolvehost_end.assert_has_calls(
[
mock.call(
session,
trace_config_ctx,
aiohttp.TraceDnsResolveHostEndParams("localhost"),
),
mock.call(
session,
trace_config_ctx,
aiohttp.TraceDnsResolveHostEndParams("localhost"),
),
]
)
async def test_tcp_connector_dns_tracing_throttle_requests(
loop: Any, dns_response: Any
) -> None:
session = mock.Mock()
trace_config_ctx = mock.Mock()
on_dns_cache_hit = mock.Mock(side_effect=make_mocked_coro(mock.Mock()))
on_dns_cache_miss = mock.Mock(side_effect=make_mocked_coro(mock.Mock()))
trace_config = aiohttp.TraceConfig(
trace_config_ctx_factory=mock.Mock(return_value=trace_config_ctx)
)
trace_config.on_dns_cache_hit.append(on_dns_cache_hit)
trace_config.on_dns_cache_miss.append(on_dns_cache_miss)
trace_config.freeze()
traces = [Trace(session, trace_config, trace_config.trace_config_ctx())]
with mock.patch("aiohttp.connector.DefaultResolver") as m_resolver:
conn = aiohttp.TCPConnector(use_dns_cache=True, ttl_dns_cache=10)
m_resolver().resolve.return_value = dns_response()
loop.create_task(conn._resolve_host("localhost", 8080, traces=traces))
loop.create_task(conn._resolve_host("localhost", 8080, traces=traces))
await asyncio.sleep(0)
on_dns_cache_hit.assert_called_once_with(
session, trace_config_ctx, aiohttp.TraceDnsCacheHitParams("localhost")
)
on_dns_cache_miss.assert_called_once_with(
session, trace_config_ctx, aiohttp.TraceDnsCacheMissParams("localhost")
)
async def test_dns_error(loop: Any) -> None:
connector = aiohttp.TCPConnector()
connector._resolve_host = make_mocked_coro(
raise_exception=OSError("dont take it serious")
)
req = ClientRequest("GET", URL("http://www.python.org"), loop=loop)
with pytest.raises(aiohttp.ClientConnectorError):
await connector.connect(req, [], ClientTimeout())
async def test_get_pop_empty_conns(loop: Any) -> None:
# see issue #473
conn = aiohttp.BaseConnector()
key = ("127.0.0.1", 80, False)
conn._conns[key] = []
proto = conn._get(key)
assert proto is None
assert not conn._conns
async def test_release_close_do_not_add_to_pool(loop: Any, key: Any) -> None:
# see issue #473
conn = aiohttp.BaseConnector()
proto = create_mocked_conn(loop, should_close=True)
conn._acquired.add(proto)
conn._release(key, proto)
assert not conn._conns
async def test_release_close_do_not_delete_existing_connections(
loop: Any, key: Any
) -> None:
proto1 = create_mocked_conn(loop)
conn = aiohttp.BaseConnector()
conn._conns[key] = [(proto1, 1)]
proto = create_mocked_conn(loop, should_close=True)
conn._acquired.add(proto)
conn._release(key, proto)
assert conn._conns[key] == [(proto1, 1)]
assert proto.close.called
await conn.close()
async def test_release_not_started(loop: Any) -> None:
conn = aiohttp.BaseConnector()
proto = create_mocked_conn(should_close=False)
key = 1
conn._acquired.add(proto)
conn._release(key, proto)
# assert conn._conns == {1: [(proto, 10)]}
rec = conn._conns[1]
assert rec[0][0] == proto
assert rec[0][1] == pytest.approx(loop.time(), abs=0.05)
assert not proto.close.called
await conn.close()
async def test_release_not_opened(loop: Any, key: Any) -> None:
conn = aiohttp.BaseConnector()
proto = create_mocked_conn(loop)
conn._acquired.add(proto)
conn._release(key, proto)
assert proto.close.called
async def test_connect(loop: Any, key: Any) -> None:
proto = create_mocked_conn(loop)
proto.is_connected.return_value = True
req = ClientRequest("GET", URL("http://localhost:80"), loop=loop)
conn = aiohttp.BaseConnector()
conn._conns[key] = [(proto, loop.time())]
conn._create_connection = create_mocked_conn(loop)
conn._create_connection.return_value = loop.create_future()
conn._create_connection.return_value.set_result(proto)
connection = await conn.connect(req, [], ClientTimeout())
assert not conn._create_connection.called
assert connection._protocol is proto
assert connection.transport is proto.transport
assert isinstance(connection, Connection)
connection.close()
async def test_connect_tracing(loop: Any) -> None:
session = mock.Mock()
trace_config_ctx = mock.Mock()
on_connection_create_start = mock.Mock(side_effect=make_mocked_coro(mock.Mock()))
on_connection_create_end = mock.Mock(side_effect=make_mocked_coro(mock.Mock()))
trace_config = aiohttp.TraceConfig(
trace_config_ctx_factory=mock.Mock(return_value=trace_config_ctx)
)
trace_config.on_connection_create_start.append(on_connection_create_start)
trace_config.on_connection_create_end.append(on_connection_create_end)
trace_config.freeze()
traces = [Trace(session, trace_config, trace_config.trace_config_ctx())]
proto = create_mocked_conn(loop)
proto.is_connected.return_value = True
req = ClientRequest("GET", URL("http://host:80"), loop=loop)
conn = aiohttp.BaseConnector()
conn._create_connection = mock.Mock()
conn._create_connection.return_value = loop.create_future()
conn._create_connection.return_value.set_result(proto)
conn2 = await conn.connect(req, traces, ClientTimeout())
conn2.release()
on_connection_create_start.assert_called_with(
session, trace_config_ctx, aiohttp.TraceConnectionCreateStartParams()
)
on_connection_create_end.assert_called_with(
session, trace_config_ctx, aiohttp.TraceConnectionCreateEndParams()
)
async def test_close_during_connect(loop: Any) -> None:
proto = create_mocked_conn(loop)
proto.is_connected.return_value = True
fut = loop.create_future()
req = ClientRequest("GET", URL("http://host:80"), loop=loop)
conn = aiohttp.BaseConnector()
conn._create_connection = mock.Mock()
conn._create_connection.return_value = fut
task = loop.create_task(conn.connect(req, None, ClientTimeout()))
await asyncio.sleep(0)
await conn.close()
fut.set_result(proto)
with pytest.raises(aiohttp.ClientConnectionError):
await task
assert proto.close.called
async def test_ctor_cleanup() -> None:
loop = mock.Mock()
loop.time.return_value = 1.5
conn = aiohttp.BaseConnector(keepalive_timeout=10, enable_cleanup_closed=True)
assert conn._cleanup_handle is None
assert conn._cleanup_closed_handle is not None
async def test_cleanup(key: Any) -> None:
testset = {
key: [(mock.Mock(), 10), (mock.Mock(), 300)],
}
testset[key][0][0].is_connected.return_value = True
testset[key][1][0].is_connected.return_value = False
loop = mock.Mock()
loop.time.return_value = 300
conn = aiohttp.BaseConnector()
conn._conns = testset
existing_handle = conn._cleanup_handle = mock.Mock()
conn._cleanup()
assert existing_handle.cancel.called
assert conn._conns == {}
assert conn._cleanup_handle is None
async def test_cleanup_close_ssl_transport(loop: Any, ssl_key: Any) -> None:
proto = create_mocked_conn(loop)
transport = proto.transport
testset = {ssl_key: [(proto, 10)]}
loop = mock.Mock()
loop.time.return_value = asyncio.get_event_loop().time() + 300
conn = aiohttp.BaseConnector(enable_cleanup_closed=True)
conn._loop = loop
conn._conns = testset
existing_handle = conn._cleanup_handle = mock.Mock()
conn._cleanup()
assert existing_handle.cancel.called
assert conn._conns == {}
assert conn._cleanup_closed_transports == [transport]
async def test_cleanup2(loop: Any) -> None:
testset = {1: [(create_mocked_conn(), 300)]}
testset[1][0][0].is_connected.return_value = True
conn = aiohttp.BaseConnector(keepalive_timeout=10)
conn._loop = mock.Mock()
conn._loop.time.return_value = 300
conn._conns = testset
conn._cleanup()
assert conn._conns == testset
assert conn._cleanup_handle is not None
conn._loop.call_at.assert_called_with(310, mock.ANY, mock.ANY)
await conn.close()
async def test_cleanup3(loop: Any, key: Any) -> None:
testset = {
key: [(create_mocked_conn(loop), 290.1), (create_mocked_conn(loop), 305.1)]
}
testset[key][0][0].is_connected.return_value = True
conn = aiohttp.BaseConnector(keepalive_timeout=10)
conn._loop = mock.Mock()
conn._loop.time.return_value = 308.5
conn._conns = testset
conn._cleanup()
assert conn._conns == {key: [testset[key][1]]}
assert conn._cleanup_handle is not None
conn._loop.call_at.assert_called_with(319, mock.ANY, mock.ANY)
await conn.close()
async def test_cleanup_closed(loop: Any, mocker: Any) -> None:
if not hasattr(loop, "__dict__"):
pytest.skip("can not override loop attributes")
mocker.spy(loop, "call_at")
conn = aiohttp.BaseConnector(enable_cleanup_closed=True)
tr = mock.Mock()
conn._cleanup_closed_handle = cleanup_closed_handle = mock.Mock()
conn._cleanup_closed_transports = [tr]
conn._cleanup_closed()
assert tr.abort.called
assert not conn._cleanup_closed_transports
assert loop.call_at.called
assert cleanup_closed_handle.cancel.called
async def test_cleanup_closed_disabled(loop: Any, mocker: Any) -> None:
conn = aiohttp.BaseConnector(enable_cleanup_closed=False)
tr = mock.Mock()
conn._cleanup_closed_transports = [tr]
conn._cleanup_closed()
assert tr.abort.called
assert not conn._cleanup_closed_transports
async def test_tcp_connector_ctor(loop: Any) -> None:
conn = aiohttp.TCPConnector()
assert conn._ssl is None
assert conn.use_dns_cache
assert conn.family == 0
async def test_invalid_ssl_param() -> None:
with pytest.raises(TypeError):
aiohttp.TCPConnector(ssl=object())
async def test_tcp_connector_ctor_fingerprint_valid(loop: Any) -> None:
valid = aiohttp.Fingerprint(hashlib.sha256(b"foo").digest())
conn = aiohttp.TCPConnector(ssl=valid)
assert conn._ssl is valid
async def test_insecure_fingerprint_md5(loop: Any) -> None:
with pytest.raises(ValueError):
aiohttp.TCPConnector(ssl=aiohttp.Fingerprint(hashlib.md5(b"foo").digest()))
async def test_insecure_fingerprint_sha1(loop: Any) -> None:
with pytest.raises(ValueError):
aiohttp.TCPConnector(ssl=aiohttp.Fingerprint(hashlib.sha1(b"foo").digest()))
async def test_tcp_connector_clear_dns_cache(loop: Any) -> None:
conn = aiohttp.TCPConnector()
hosts = ["a", "b"]
conn._cached_hosts.add(("localhost", 123), hosts)
conn._cached_hosts.add(("localhost", 124), hosts)
conn.clear_dns_cache("localhost", 123)
with pytest.raises(KeyError):
conn._cached_hosts.next_addrs(("localhost", 123))
assert conn._cached_hosts.next_addrs(("localhost", 124)) == hosts
# Remove removed element is OK
conn.clear_dns_cache("localhost", 123)
with pytest.raises(KeyError):
conn._cached_hosts.next_addrs(("localhost", 123))
conn.clear_dns_cache()
with pytest.raises(KeyError):
conn._cached_hosts.next_addrs(("localhost", 124))
async def test_tcp_connector_clear_dns_cache_bad_args(loop: Any) -> None:
conn = aiohttp.TCPConnector()
with pytest.raises(ValueError):
conn.clear_dns_cache("localhost")
async def test_dont_recreate_ssl_context(loop: Any) -> None:
conn = aiohttp.TCPConnector()
ctx = conn._make_ssl_context(True)
assert ctx is conn._make_ssl_context(True)
async def test_dont_recreate_ssl_context2(loop: Any) -> None:
conn = aiohttp.TCPConnector()
ctx = conn._make_ssl_context(False)
assert ctx is conn._make_ssl_context(False)
async def test___get_ssl_context1(loop: Any) -> None:
conn = aiohttp.TCPConnector()
req = mock.Mock()
req.is_ssl.return_value = False
assert conn._get_ssl_context(req) is None
async def test___get_ssl_context2(loop: Any) -> None:
ctx = ssl.SSLContext()
conn = aiohttp.TCPConnector()
req = mock.Mock()
req.is_ssl.return_value = True
req.ssl = ctx
assert conn._get_ssl_context(req) is ctx
async def test___get_ssl_context3(loop: Any) -> None:
ctx = ssl.SSLContext()
conn = aiohttp.TCPConnector(ssl=ctx)
req = mock.Mock()
req.is_ssl.return_value = True
req.ssl = None
assert conn._get_ssl_context(req) is ctx
async def test___get_ssl_context4(loop: Any) -> None:
ctx = ssl.SSLContext()
conn = aiohttp.TCPConnector(ssl=ctx)
req = mock.Mock()
req.is_ssl.return_value = True
req.ssl = False
assert conn._get_ssl_context(req) is conn._make_ssl_context(False)
async def test___get_ssl_context5(loop: Any) -> None:
ctx = ssl.SSLContext()
conn = aiohttp.TCPConnector(ssl=ctx)
req = mock.Mock()
req.is_ssl.return_value = True
req.ssl = aiohttp.Fingerprint(hashlib.sha256(b"1").digest())
assert conn._get_ssl_context(req) is conn._make_ssl_context(False)
async def test___get_ssl_context6(loop: Any) -> None:
conn = aiohttp.TCPConnector()
req = mock.Mock()
req.is_ssl.return_value = True
req.ssl = None
assert conn._get_ssl_context(req) is conn._make_ssl_context(True)
async def test_close_twice(loop: Any) -> None:
proto = create_mocked_conn(loop)
conn = aiohttp.BaseConnector()
conn._conns[1] = [(proto, object())]
await conn.close()
assert not conn._conns
assert proto.close.called
assert conn.closed
conn._conns = "Invalid" # fill with garbage
await conn.close()
assert conn.closed
async def test_close_cancels_cleanup_handle(loop: Any) -> None:
conn = aiohttp.BaseConnector()
conn._release(1, create_mocked_conn(should_close=False))
assert conn._cleanup_handle is not None
await conn.close()
assert conn._cleanup_handle is None
async def test_close_abort_closed_transports(loop: Any) -> None:
tr = mock.Mock()
conn = aiohttp.BaseConnector()
conn._cleanup_closed_transports.append(tr)
await conn.close()
assert not conn._cleanup_closed_transports
assert tr.abort.called
assert conn.closed
async def test_close_cancels_cleanup_closed_handle(loop: Any) -> None:
conn = aiohttp.BaseConnector(enable_cleanup_closed=True)
assert conn._cleanup_closed_handle is not None
await conn.close()
assert conn._cleanup_closed_handle is None
async def test_ctor_with_default_loop(loop: Any) -> None:
conn = aiohttp.BaseConnector()
assert loop is conn._loop
async def test_connect_with_limit(loop: Any, key: Any) -> None:
proto = create_mocked_conn(loop)
proto.is_connected.return_value = True
req = ClientRequest(
"GET", URL("http://localhost:80"), loop=loop, response_class=mock.Mock()
)
conn = aiohttp.BaseConnector(limit=1)
conn._conns[key] = [(proto, loop.time())]
conn._create_connection = mock.Mock()
conn._create_connection.return_value = loop.create_future()
conn._create_connection.return_value.set_result(proto)
connection1 = await conn.connect(req, None, ClientTimeout())
assert connection1._protocol == proto
assert 1 == len(conn._acquired)
assert proto in conn._acquired
assert key in conn._acquired_per_host
assert proto in conn._acquired_per_host[key]
acquired = False
async def f():
nonlocal acquired
connection2 = await conn.connect(req, None, ClientTimeout())
acquired = True
assert 1 == len(conn._acquired)
assert 1 == len(conn._acquired_per_host[key])
connection2.release()
task = loop.create_task(f())
await asyncio.sleep(0.01)
assert not acquired
connection1.release()
await asyncio.sleep(0)
assert acquired
await task
await conn.close()
async def test_connect_queued_operation_tracing(loop: Any, key: Any) -> None:
session = mock.Mock()
trace_config_ctx = mock.Mock()
on_connection_queued_start = mock.Mock(side_effect=make_mocked_coro(mock.Mock()))
on_connection_queued_end = mock.Mock(side_effect=make_mocked_coro(mock.Mock()))
trace_config = aiohttp.TraceConfig(
trace_config_ctx_factory=mock.Mock(return_value=trace_config_ctx)
)
trace_config.on_connection_queued_start.append(on_connection_queued_start)
trace_config.on_connection_queued_end.append(on_connection_queued_end)
trace_config.freeze()
traces = [Trace(session, trace_config, trace_config.trace_config_ctx())]
proto = create_mocked_conn(loop)
proto.is_connected.return_value = True
req = ClientRequest(
"GET", URL("http://localhost1:80"), loop=loop, response_class=mock.Mock()
)
conn = aiohttp.BaseConnector(limit=1)
conn._conns[key] = [(proto, loop.time())]
conn._create_connection = mock.Mock()
conn._create_connection.return_value = loop.create_future()
conn._create_connection.return_value.set_result(proto)
connection1 = await conn.connect(req, traces, ClientTimeout())
async def f():
connection2 = await conn.connect(req, traces, ClientTimeout())
on_connection_queued_start.assert_called_with(
session, trace_config_ctx, aiohttp.TraceConnectionQueuedStartParams()
)
on_connection_queued_end.assert_called_with(
session, trace_config_ctx, aiohttp.TraceConnectionQueuedEndParams()
)
connection2.release()
task = asyncio.ensure_future(f())
await asyncio.sleep(0.01)
connection1.release()
await task
await conn.close()
async def test_connect_reuseconn_tracing(loop: Any, key: Any) -> None:
session = mock.Mock()
trace_config_ctx = mock.Mock()
on_connection_reuseconn = mock.Mock(side_effect=make_mocked_coro(mock.Mock()))
trace_config = aiohttp.TraceConfig(
trace_config_ctx_factory=mock.Mock(return_value=trace_config_ctx)
)
trace_config.on_connection_reuseconn.append(on_connection_reuseconn)
trace_config.freeze()
traces = [Trace(session, trace_config, trace_config.trace_config_ctx())]
proto = create_mocked_conn(loop)
proto.is_connected.return_value = True
req = ClientRequest(
"GET", URL("http://localhost:80"), loop=loop, response_class=mock.Mock()
)
conn = aiohttp.BaseConnector(limit=1)
conn._conns[key] = [(proto, loop.time())]
conn2 = await conn.connect(req, traces, ClientTimeout())
conn2.release()
on_connection_reuseconn.assert_called_with(
session, trace_config_ctx, aiohttp.TraceConnectionReuseconnParams()
)
await conn.close()
async def test_connect_with_limit_and_limit_per_host(loop: Any, key: Any) -> None:
proto = create_mocked_conn(loop)
proto.is_connected.return_value = True
req = ClientRequest("GET", URL("http://localhost:80"), loop=loop)
conn = aiohttp.BaseConnector(limit=1000, limit_per_host=1)
conn._conns[key] = [(proto, loop.time())]
conn._create_connection = mock.Mock()
conn._create_connection.return_value = loop.create_future()
conn._create_connection.return_value.set_result(proto)
acquired = False
connection1 = await conn.connect(req, None, ClientTimeout())
async def f():
nonlocal acquired
connection2 = await conn.connect(req, None, ClientTimeout())
acquired = True
assert 1 == len(conn._acquired)
assert 1 == len(conn._acquired_per_host[key])
connection2.release()
task = loop.create_task(f())
await asyncio.sleep(0.01)
assert not acquired
connection1.release()
await asyncio.sleep(0)
assert acquired
await task
await conn.close()
async def test_connect_with_no_limit_and_limit_per_host(loop: Any, key: Any) -> None:
proto = create_mocked_conn(loop)
proto.is_connected.return_value = True
req = ClientRequest("GET", URL("http://localhost1:80"), loop=loop)
conn = aiohttp.BaseConnector(limit=0, limit_per_host=1)
conn._conns[key] = [(proto, loop.time())]
conn._create_connection = mock.Mock()
conn._create_connection.return_value = loop.create_future()
conn._create_connection.return_value.set_result(proto)
acquired = False
connection1 = await conn.connect(req, None, ClientTimeout())
async def f():
nonlocal acquired
connection2 = await conn.connect(req, None, ClientTimeout())
acquired = True
connection2.release()
task = loop.create_task(f())
await asyncio.sleep(0.01)
assert not acquired
connection1.release()
await asyncio.sleep(0)
assert acquired
await task
await conn.close()
async def test_connect_with_no_limits(loop: Any, key: Any) -> None:
proto = create_mocked_conn(loop)
proto.is_connected.return_value = True
req = ClientRequest("GET", URL("http://localhost:80"), loop=loop)
conn = aiohttp.BaseConnector(limit=0, limit_per_host=0)
conn._conns[key] = [(proto, loop.time())]
conn._create_connection = mock.Mock()
conn._create_connection.return_value = loop.create_future()
conn._create_connection.return_value.set_result(proto)
acquired = False
connection1 = await conn.connect(req, None, ClientTimeout())
async def f():
nonlocal acquired
connection2 = await conn.connect(req, None, ClientTimeout())
acquired = True
assert 1 == len(conn._acquired)
assert 1 == len(conn._acquired_per_host[key])
connection2.release()
task = loop.create_task(f())
await asyncio.sleep(0.01)
assert acquired
connection1.release()
await task
await conn.close()
async def test_connect_with_limit_cancelled(loop: Any) -> None:
proto = create_mocked_conn(loop)
proto.is_connected.return_value = True
req = ClientRequest("GET", URL("http://host:80"), loop=loop)
conn = aiohttp.BaseConnector(limit=1)
key = ("host", 80, False)
conn._conns[key] = [(proto, loop.time())]
conn._create_connection = mock.Mock()
conn._create_connection.return_value = loop.create_future()
conn._create_connection.return_value.set_result(proto)
connection = await conn.connect(req, None, ClientTimeout())
assert connection._protocol == proto
assert connection.transport == proto.transport
assert 1 == len(conn._acquired)
with pytest.raises(asyncio.TimeoutError):
# limit exhausted
await asyncio.wait_for(conn.connect(req, None, ClientTimeout()), 0.01)
connection.close()
await conn.close()
async def test_connect_with_capacity_release_waiters(loop: Any) -> None:
def check_with_exc(err):
conn = aiohttp.BaseConnector(limit=1)
conn._create_connection = mock.Mock()
conn._create_connection.return_value = loop.create_future()
conn._create_connection.return_value.set_exception(err)
with pytest.raises(Exception):
req = mock.Mock()
yield from conn.connect(req, None, ClientTimeout())
assert not conn._waiters
check_with_exc(OSError(1, "permission error"))
check_with_exc(RuntimeError())
check_with_exc(asyncio.TimeoutError())
async def test_connect_with_limit_concurrent(loop: Any) -> None:
proto = create_mocked_conn(loop)
proto.should_close = False
proto.is_connected.return_value = True
req = ClientRequest("GET", URL("http://host:80"), loop=loop)
max_connections = 2
num_connections = 0
conn = aiohttp.BaseConnector(limit=max_connections)
# Use a real coroutine for _create_connection; a mock would mask
# problems that only happen when the method yields.
async def create_connection(req, traces, timeout):
nonlocal num_connections
num_connections += 1
await asyncio.sleep(0)
# Make a new transport mock each time because acquired
# transports are stored in a set. Reusing the same object
# messes with the count.
proto = create_mocked_conn(loop, should_close=False)
proto.is_connected.return_value = True
return proto
conn._create_connection = create_connection
# Simulate something like a crawler. It opens a connection, does
# something with it, closes it, then creates tasks that make more
# connections and waits for them to finish. The crawler is started
# with multiple concurrent requests and stops when it hits a
# predefined maximum number of requests.
max_requests = 50
num_requests = 0
start_requests = max_connections + 1
async def f(start=True):
nonlocal num_requests
if num_requests == max_requests:
return
num_requests += 1
if not start:
connection = await conn.connect(req, None, ClientTimeout())
await asyncio.sleep(0)
connection.release()
await asyncio.sleep(0)
tasks = [loop.create_task(f(start=False)) for i in range(start_requests)]
await asyncio.wait(tasks)
await f()
await conn.close()
assert max_connections == num_connections
async def test_connect_waiters_cleanup(loop: Any) -> None:
proto = create_mocked_conn(loop)
proto.is_connected.return_value = True
req = ClientRequest("GET", URL("http://host:80"), loop=loop)
conn = aiohttp.BaseConnector(limit=1)
conn._available_connections = mock.Mock(return_value=0)
t = loop.create_task(conn.connect(req, None, ClientTimeout()))
await asyncio.sleep(0)
assert conn._waiters.keys()
t.cancel()
await asyncio.sleep(0)
assert not conn._waiters.keys()
async def test_connect_waiters_cleanup_key_error(loop: Any) -> None:
proto = create_mocked_conn(loop)
proto.is_connected.return_value = True
req = ClientRequest("GET", URL("http://host:80"), loop=loop)
conn = aiohttp.BaseConnector(limit=1)
conn._available_connections = mock.Mock(return_value=0)
t = loop.create_task(conn.connect(req, None, ClientTimeout()))
await asyncio.sleep(0)
assert conn._waiters.keys()
# we delete the entry explicitly before the
# canceled connection grabs the loop again, we
# must expect a none failure termination
conn._waiters.clear()
t.cancel()
await asyncio.sleep(0)
assert not conn._waiters.keys() == []
async def test_close_with_acquired_connection(loop: Any) -> None:
proto = create_mocked_conn(loop)
proto.is_connected.return_value = True
req = ClientRequest("GET", URL("http://host:80"), loop=loop)
conn = aiohttp.BaseConnector(limit=1)
key = ("host", 80, False)
conn._conns[key] = [(proto, loop.time())]
conn._create_connection = mock.Mock()
conn._create_connection.return_value = loop.create_future()
conn._create_connection.return_value.set_result(proto)
connection = await conn.connect(req, None, ClientTimeout())
assert 1 == len(conn._acquired)
await conn.close()
assert 0 == len(conn._acquired)
assert conn.closed
proto.close.assert_called_with()
assert not connection.closed
connection.close()
assert connection.closed
async def test_default_force_close(loop: Any) -> None:
connector = aiohttp.BaseConnector()
assert not connector.force_close
async def test_limit_property(loop: Any) -> None:
conn = aiohttp.BaseConnector(limit=15)
assert 15 == conn.limit
await conn.close()
async def test_limit_per_host_property(loop: Any) -> None:
conn = aiohttp.BaseConnector(limit_per_host=15)
assert 15 == conn.limit_per_host
await conn.close()
async def test_limit_property_default(loop: Any) -> None:
conn = aiohttp.BaseConnector()
assert conn.limit == 100
await conn.close()
async def test_limit_per_host_property_default(loop: Any) -> None:
conn = aiohttp.BaseConnector()
assert conn.limit_per_host == 0
await conn.close()
async def test_force_close_and_explicit_keep_alive(loop: Any) -> None:
with pytest.raises(ValueError):
aiohttp.BaseConnector(keepalive_timeout=30, force_close=True)
conn = aiohttp.BaseConnector(force_close=True, keepalive_timeout=None)
assert conn
conn = aiohttp.BaseConnector(force_close=True)
assert conn
async def test_error_on_connection(loop: Any, key: Any) -> None:
conn = aiohttp.BaseConnector(limit=1)
req = mock.Mock()
req.connection_key = key
proto = create_mocked_conn(loop)
i = 0
fut = loop.create_future()
exc = OSError()
async def create_connection(req, traces, timeout):
nonlocal i
i += 1
if i == 1:
await fut
raise exc
elif i == 2:
return proto
conn._create_connection = create_connection
t1 = loop.create_task(conn.connect(req, None, ClientTimeout()))
t2 = loop.create_task(conn.connect(req, None, ClientTimeout()))
await asyncio.sleep(0)
assert not t1.done()
assert not t2.done()
assert len(conn._acquired_per_host[key]) == 1
fut.set_result(None)
with pytest.raises(OSError):
await t1
ret = await t2
assert len(conn._acquired_per_host[key]) == 1
assert ret._key == key
assert ret.protocol == proto
assert proto in conn._acquired
ret.release()
async def test_cancelled_waiter(loop: Any) -> None:
conn = aiohttp.BaseConnector(limit=1)
req = mock.Mock()
req.connection_key = "key"
proto = create_mocked_conn(loop)
async def create_connection(req, traces=None):
await asyncio.sleep(1)
return proto
conn._create_connection = create_connection
conn._acquired.add(proto)
conn2 = loop.create_task(conn.connect(req, None, ClientTimeout()))
await asyncio.sleep(0)
conn2.cancel()
with pytest.raises(asyncio.CancelledError):
await conn2
async def test_error_on_connection_with_cancelled_waiter(loop: Any, key: Any) -> None:
conn = aiohttp.BaseConnector(limit=1)
req = mock.Mock()
req.connection_key = key
proto = create_mocked_conn()
i = 0
fut1 = loop.create_future()
fut2 = loop.create_future()
exc = OSError()
async def create_connection(req, traces, timeout):
nonlocal i
i += 1
if i == 1:
await fut1
raise exc
if i == 2:
await fut2
elif i == 3:
return proto
conn._create_connection = create_connection
t1 = loop.create_task(conn.connect(req, None, ClientTimeout()))
t2 = loop.create_task(conn.connect(req, None, ClientTimeout()))
t3 = loop.create_task(conn.connect(req, None, ClientTimeout()))
await asyncio.sleep(0)
assert not t1.done()
assert not t2.done()
assert len(conn._acquired_per_host[key]) == 1
fut1.set_result(None)
fut2.cancel()
with pytest.raises(OSError):
await t1
with pytest.raises(asyncio.CancelledError):
await t2
ret = await t3
assert len(conn._acquired_per_host[key]) == 1
assert ret._key == key
assert ret.protocol == proto
assert proto in conn._acquired
ret.release()
async def test_tcp_connector(aiohttp_client: Any, loop: Any) -> None:
async def handler(request):
return web.Response()
app = web.Application()
app.router.add_get("/", handler)
client = await aiohttp_client(app)
r = await client.get("/")
assert r.status == 200
@needs_unix
async def test_unix_connector_not_found(loop: Any) -> None:
connector = aiohttp.UnixConnector("/" + uuid.uuid4().hex)
req = ClientRequest("GET", URL("http://www.python.org"), loop=loop)
with pytest.raises(aiohttp.ClientConnectorError):
await connector.connect(req, None, ClientTimeout())
@needs_unix
async def test_unix_connector_permission(loop: Any) -> None:
loop.create_unix_connection = make_mocked_coro(raise_exception=PermissionError())
connector = aiohttp.UnixConnector("/" + uuid.uuid4().hex)
req = ClientRequest("GET", URL("http://www.python.org"), loop=loop)
with pytest.raises(aiohttp.ClientConnectorError):
await connector.connect(req, None, ClientTimeout())
@pytest.mark.skipif(
platform.system() != "Windows", reason="Proactor Event loop present only in Windows"
)
async def test_named_pipe_connector_wrong_loop(
selector_loop: Any, pipe_name: Any
) -> None:
with pytest.raises(RuntimeError):
aiohttp.NamedPipeConnector(pipe_name)
@pytest.mark.skipif(
platform.system() != "Windows", reason="Proactor Event loop present only in Windows"
)
async def test_named_pipe_connector_not_found(
proactor_loop: Any, pipe_name: Any
) -> None:
asyncio.set_event_loop(proactor_loop)
connector = aiohttp.NamedPipeConnector(pipe_name)
req = ClientRequest("GET", URL("http://www.python.org"), loop=proactor_loop)
with pytest.raises(aiohttp.ClientConnectorError):
await connector.connect(req, None, ClientTimeout())
@pytest.mark.skipif(
platform.system() != "Windows", reason="Proactor Event loop present only in Windows"
)
async def test_named_pipe_connector_permission(
proactor_loop: Any, pipe_name: Any
) -> None:
proactor_loop.create_pipe_connection = make_mocked_coro(
raise_exception=PermissionError()
)
asyncio.set_event_loop(proactor_loop)
connector = aiohttp.NamedPipeConnector(pipe_name)
req = ClientRequest("GET", URL("http://www.python.org"), loop=proactor_loop)
with pytest.raises(aiohttp.ClientConnectorError):
await connector.connect(req, None, ClientTimeout())
async def test_default_use_dns_cache() -> None:
conn = aiohttp.TCPConnector()
assert conn.use_dns_cache
async def test_resolver_not_called_with_address_is_ip(loop: Any) -> None:
resolver = mock.MagicMock()
connector = aiohttp.TCPConnector(resolver=resolver)
req = ClientRequest(
"GET",
URL(f"http://127.0.0.1:{unused_port()}"),
loop=loop,
response_class=mock.Mock(),
)
with pytest.raises(OSError):
await connector.connect(req, None, ClientTimeout())
resolver.resolve.assert_not_called()
async def test_tcp_connector_raise_connector_ssl_error(
aiohttp_server: Any, ssl_ctx: Any
) -> None:
async def handler(request):
return web.Response()
app = web.Application()
app.router.add_get("/", handler)
srv = await aiohttp_server(app, ssl=ssl_ctx)
port = unused_port()
conn = aiohttp.TCPConnector(local_addr=("127.0.0.1", port))
session = aiohttp.ClientSession(connector=conn)
url = srv.make_url("/")
err = aiohttp.ClientConnectorCertificateError
with pytest.raises(err) as ctx:
await session.get(url)
assert isinstance(ctx.value, aiohttp.ClientConnectorCertificateError)
assert isinstance(ctx.value.certificate_error, ssl.SSLError)
await session.close()
async def test_tcp_connector_do_not_raise_connector_ssl_error(
aiohttp_server: Any, ssl_ctx: Any, client_ssl_ctx: Any
) -> None:
async def handler(request):
return web.Response()
app = web.Application()
app.router.add_get("/", handler)
srv = await aiohttp_server(app, ssl=ssl_ctx)
port = unused_port()
conn = aiohttp.TCPConnector(local_addr=("127.0.0.1", port))
session = aiohttp.ClientSession(connector=conn)
url = srv.make_url("/")
r = await session.get(url, ssl=client_ssl_ctx)
r.release()
first_conn = next(iter(conn._conns.values()))[0][0]
try:
_sslcontext = first_conn.transport._ssl_protocol._sslcontext
except AttributeError:
_sslcontext = first_conn.transport._sslcontext
assert _sslcontext is client_ssl_ctx
r.close()
await session.close()
await conn.close()
async def test_tcp_connector_uses_provided_local_addr(aiohttp_server: Any) -> None:
async def handler(request):
return web.Response()
app = web.Application()
app.router.add_get("/", handler)
srv = await aiohttp_server(app)
port = unused_port()
conn = aiohttp.TCPConnector(local_addr=("127.0.0.1", port))
session = aiohttp.ClientSession(connector=conn)
url = srv.make_url("/")
r = await session.get(url)
r.release()
first_conn = next(iter(conn._conns.values()))[0][0]
assert first_conn.transport.get_extra_info("sockname") == ("127.0.0.1", port)
r.close()
await session.close()
await conn.close()
async def test_unix_connector(unix_server: Any, unix_sockname: Any) -> None:
async def handler(request):
return web.Response()
app = web.Application()
app.router.add_get("/", handler)
await unix_server(app)
url = "http://127.0.0.1/"
connector = aiohttp.UnixConnector(unix_sockname)
assert unix_sockname == connector.path
session = client.ClientSession(connector=connector)
r = await session.get(url)
assert r.status == 200
r.close()
await session.close()
@pytest.mark.skipif(
platform.system() != "Windows", reason="Proactor Event loop present only in Windows"
)
async def test_named_pipe_connector(
proactor_loop: Any, named_pipe_server: Any, pipe_name: Any
) -> None:
async def handler(request):
return web.Response()
app = web.Application()
app.router.add_get("/", handler)
await named_pipe_server(app)
url = "http://this-does-not-matter.com"
connector = aiohttp.NamedPipeConnector(pipe_name)
assert pipe_name == connector.path
session = client.ClientSession(connector=connector)
r = await session.get(url)
assert r.status == 200
r.close()
await session.close()
class TestDNSCacheTable:
@pytest.fixture
def dns_cache_table(self):
return DNSCacheTable()
def test_next_addrs_basic(self, dns_cache_table: Any) -> None:
dns_cache_table.add("localhost", ["127.0.0.1"])
dns_cache_table.add("foo", ["127.0.0.2"])
addrs = dns_cache_table.next_addrs("localhost")
assert addrs == ["127.0.0.1"]
addrs = dns_cache_table.next_addrs("foo")
assert addrs == ["127.0.0.2"]
with pytest.raises(KeyError):
dns_cache_table.next_addrs("no-such-host")
def test_remove(self, dns_cache_table: Any) -> None:
dns_cache_table.add("localhost", ["127.0.0.1"])
dns_cache_table.remove("localhost")
with pytest.raises(KeyError):
dns_cache_table.next_addrs("localhost")
def test_clear(self, dns_cache_table: Any) -> None:
dns_cache_table.add("localhost", ["127.0.0.1"])
dns_cache_table.clear()
with pytest.raises(KeyError):
dns_cache_table.next_addrs("localhost")
def test_not_expired_ttl_None(self, dns_cache_table: Any) -> None:
dns_cache_table.add("localhost", ["127.0.0.1"])
assert not dns_cache_table.expired("localhost")
def test_not_expired_ttl(self) -> None:
dns_cache_table = DNSCacheTable(ttl=0.1)
dns_cache_table.add("localhost", ["127.0.0.1"])
assert not dns_cache_table.expired("localhost")
async def test_expired_ttl(self, loop: Any) -> None:
dns_cache_table = DNSCacheTable(ttl=0.01)
dns_cache_table.add("localhost", ["127.0.0.1"])
await asyncio.sleep(0.02)
assert dns_cache_table.expired("localhost")
def test_next_addrs(self, dns_cache_table: Any) -> None:
dns_cache_table.add("foo", ["127.0.0.1", "127.0.0.2", "127.0.0.3"])
# Each calls to next_addrs return the hosts using
# a round robin strategy.
addrs = dns_cache_table.next_addrs("foo")
assert addrs == ["127.0.0.1", "127.0.0.2", "127.0.0.3"]
addrs = dns_cache_table.next_addrs("foo")
assert addrs == ["127.0.0.2", "127.0.0.3", "127.0.0.1"]
addrs = dns_cache_table.next_addrs("foo")
assert addrs == ["127.0.0.3", "127.0.0.1", "127.0.0.2"]
addrs = dns_cache_table.next_addrs("foo")
assert addrs == ["127.0.0.1", "127.0.0.2", "127.0.0.3"]
def test_next_addrs_single(self, dns_cache_table: Any) -> None:
dns_cache_table.add("foo", ["127.0.0.1"])
addrs = dns_cache_table.next_addrs("foo")
assert addrs == ["127.0.0.1"]
addrs = dns_cache_table.next_addrs("foo")
assert addrs == ["127.0.0.1"]
async def test_connector_cache_trace_race() -> None:
class DummyTracer:
async def send_dns_cache_hit(self, *args, **kwargs):
connector._cached_hosts.remove(("", 0))
token = object()
connector = TCPConnector()
connector._cached_hosts.add(("", 0), [token])
traces = [DummyTracer()]
assert await connector._resolve_host("", 0, traces) == [token]
async def test_connector_throttle_trace_race(loop: Any) -> None:
key = ("", 0)
token = object()
class DummyTracer:
async def send_dns_cache_hit(self, *args, **kwargs):
event = connector._throttle_dns_events.pop(key)
event.set()
connector._cached_hosts.add(key, [token])
connector = TCPConnector()
connector._throttle_dns_events[key] = EventResultOrError(loop)
traces = [DummyTracer()]
assert await connector._resolve_host("", 0, traces) == [token]
async def test_connector_does_not_remove_needed_waiters(loop: Any, key: Any) -> None:
proto = create_mocked_conn(loop)
proto.is_connected.return_value = True
req = ClientRequest("GET", URL("https://localhost:80"), loop=loop)
connection_key = req.connection_key
connector = aiohttp.BaseConnector()
connector._available_connections = mock.Mock(return_value=0)
connector._conns[key] = [(proto, loop.time())]
connector._create_connection = create_mocked_conn(loop)
connector._create_connection.return_value = loop.create_future()
connector._create_connection.return_value.set_result(proto)
dummy_waiter = loop.create_future()
async def await_connection_and_check_waiters() -> None:
connection = await connector.connect(req, [], ClientTimeout())
try:
assert connection_key in connector._waiters
assert dummy_waiter in connector._waiters[connection_key]
finally:
connection.close()
async def allow_connection_and_add_dummy_waiter() -> None:
# `asyncio.gather` may execute coroutines not in order.
# Skip one event loop run cycle in such a case.
if connection_key not in connector._waiters:
await asyncio.sleep(0)
connector._waiters[connection_key].popleft().set_result(None)
del connector._waiters[connection_key]
connector._waiters[connection_key].append(dummy_waiter)
await asyncio.gather(
await_connection_and_check_waiters(),
allow_connection_and_add_dummy_waiter(),
)
await connector.close()
| 30.888595
| 88
| 0.682728
|
0f7086441fe4a2382b92f7346c8ec1d001bf2dd8
| 1,483
|
py
|
Python
|
veros/diagnostics/io_tools/hdf5.py
|
madsbk/veros
|
00d2c33e28f0bd098a81bd6ac48436067e7eb8f5
|
[
"MIT"
] | null | null | null |
veros/diagnostics/io_tools/hdf5.py
|
madsbk/veros
|
00d2c33e28f0bd098a81bd6ac48436067e7eb8f5
|
[
"MIT"
] | null | null | null |
veros/diagnostics/io_tools/hdf5.py
|
madsbk/veros
|
00d2c33e28f0bd098a81bd6ac48436067e7eb8f5
|
[
"MIT"
] | null | null | null |
import threading
import contextlib
import logging
@contextlib.contextmanager
def threaded_io(vs, filepath, mode):
"""
If using IO threads, start a new thread to write the netCDF data to disk.
"""
import h5py
if vs.use_io_threads:
_wait_for_disk(vs, filepath)
_io_locks[filepath].clear()
h5file = h5py.File(filepath, mode)
try:
yield h5file
finally:
if vs.use_io_threads:
io_thread = threading.Thread(target=_write_to_disk, args=(vs, h5file, filepath))
io_thread.start()
else:
_write_to_disk(vs, h5file, filepath)
_io_locks = {}
def _add_to_locks(file_id):
"""
If there is no lock for file_id, create one
"""
if file_id not in _io_locks:
_io_locks[file_id] = threading.Event()
_io_locks[file_id].set()
def _wait_for_disk(vs, file_id):
"""
Wait for the lock of file_id to be released
"""
logging.debug("Waiting for lock {} to be released".format(file_id))
_add_to_locks(file_id)
lock_released = _io_locks[file_id].wait(vs.io_timeout)
if not lock_released:
raise RuntimeError("Timeout while waiting for disk IO to finish")
def _write_to_disk(vs, h5file, file_id):
"""
Sync HDF5 data to disk, close file handle, and release lock.
May run in a separate thread.
"""
h5file.flush()
h5file.close()
if vs.use_io_threads and file_id is not None:
_io_locks[file_id].set()
| 25.568966
| 92
| 0.65408
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.