content
stringlengths 5
1.05M
|
|---|
"""
Created on: 30 June 2019
Investigate stationarity of time series (example of security data), analytics on log-returns
Provide summary statistics on the data
Introduce tests (such as Augmented Dickey Fuller) to check stationarity of time series
Inspiration from: https://www.analyticsvidhya.com/blog/2018/09/non-stationary-time-series-python/
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.stats as stats
from scipy.stats import kurtosis, skew
from statsmodels.tsa.stattools import adfuller
from securityAnalysis.utils_finance import calculate_return_df
pd.set_option('display.max_columns', 10)
pd.set_option('display.width', 500)
plt.style.use('seaborn')
def test_stationarity_adf(time_series: np.array) -> None:
"""
Wrapper on adfuller method from statsmodels package, to perform Dickey-Fuller test for
Stationarity
Parameter:
time_series: time series containing non-null values which to perform stationarity test on
Returns
None: Print statement of ['Test Statistic', 'p-value', '# lags', '# observations', and
critical values for alpha 1, 5 and 10%
NOTE:
Test statistic: t
Critical value, c
Null hypothesis, H_0
If t < c, reject H_0 --> time series is stationary
If t > c, fail to reject H_0 --> time series is non-stationary (has some drift with time)
"""
print('Results of Dickey-Fuller Test:')
df_test = adfuller(time_series, autolag='AIC')
df_output = pd.Series(df_test[0:4],
index=['Test Statistic', 'p-value', '#Lags Used',
'Number of Observations Used'])
for key, value in df_test[4].items():
df_output['Critical Value (%s)' % key] = value
print(df_output)
def get_aug_dickey_fuller_result(time_series: np.array, alpha: int = 5) -> bool:
"""
Method to perform Augmented Dickey Fuller Test for stationarity on time_series, at a
given level of significance alpha
Parameters:
time_series: 1-D array of time series data to be tested for stationarity
alpha: chosen level of significance, must be one of 1,5 or 10%
Returns:
bool: True if stationary data (t-statistic less than critical value at significance level
alpha, reject H_0), False for non-stationary data
"""
assert alpha in [1, 5, 10], "Choose appropriate alpha significance: [1, 5 or 10%]"
print(f"Performing augmented Dickey Fuller test at significance level alpha: {alpha}")
df_test = adfuller(time_series, autolag='AIC')
test_stats = {
'test_statistic': df_test[0],
'p-values': df_test[4]
}
is_stationary = test_stats['test_statistic'] < test_stats['p-values'][f"{str(alpha)}%"]
return is_stationary
def get_descriptive_stats(data: pd.DataFrame, alpha: float = 0.05) -> dict:
"""Compute descriptive, high level stats (p-values given for two tailed tests),
incuding skewness and kurtosis, specifying alpha (for tests of skewness and kurtosis)
Args:
data: Clean dataframe with no NaNs
alpha: level of significance for the two-tailed test. must lie between 0 and 1
Returns
dict of results for descriptive level statistics
"""
assert 0 < alpha < 1, f"Alpha level of {alpha} is not valid, must lie between 0 and 1"
print("Getting descriptive level stats for dataframe...")
result_df = pd.DataFrame(columns=['Size', 'Mean', 'Std Dev', 'Skewness', 'Excess Kurtosis'])
result_df['Size'] = data.count()
result_df['Mean'] = data.mean()
result_df['Std Dev'] = np.std(data)
result_df['Min'] = np.min(data)
result_df['Max'] = np.max(data)
result_df['Skewness'] = skew(data)
result_df['Skewness t-statistic'] = \
result_df['Skewness'].values / np.sqrt(6 / result_df['Size'].values)
result_df['Skewness p-value'] = 2 * (1 - stats.t.cdf(result_df['Skewness t-statistic'], df=1))
# so, one can reject h_0 (skewness of log returns = 0) for a p-value of less than alpha
skew_h0_title = "Skewness reject H_0 at " + str(100 * alpha) + "% sig level"
skew_h0_values = result_df['Skewness p-value'].values < alpha
result_df['Skewness accept H_0'] = skew_h0_values
result_df.rename(columns={'Skewness accept H_0': skew_h0_title}, inplace=True)
result_df['Excess Kurtosis'] = kurtosis(data) # if high excess kurtosis --> thick tails
result_df['Excess Kurtosis t-statistic'] = \
result_df['Excess Kurtosis'].values / np.sqrt(24 / result_df['Size'].values)
result_df['Excess Kurtosis p-value'] = \
2 * (1 - stats.t.cdf(result_df['Excess Kurtosis t-statistic'], df=1))
kurt_h0_title = f"Kurtosis reject H_0 at {str(100 * alpha)}% sig level"
kurt_h0_values = result_df['Excess Kurtosis p-value'].values < alpha
result_df['Excess Kurtosis accept H_0'] = kurt_h0_values
result_df.rename(columns={'Excess Kurtosis accept H_0': kurt_h0_title}, inplace=True)
adf_results = []
for i in data.columns:
adf_results.append(get_aug_dickey_fuller_result(data.loc[:, i]))
result_df['Aug Dickey-Fuller Test'] = adf_results
result_dict = result_df.T.to_dict()
return result_dict
if __name__ == '__main__':
# real market data
import yfinance
price_series = yfinance.download(tickers='GOOGL', start="2010-01-01")['Adj Close'] # google data
price_df = pd.DataFrame(price_series)
# random data example
import datetime
date_rng = pd.date_range(datetime.datetime.now().strftime("%Y-%m-%d"), periods=500).to_list()
random_returns = pd.Series(np.random.randn(500), index=date_rng)
price_series = random_returns.cumsum()
price_df = pd.DataFrame(price_series)
# run analysis
returns_df = calculate_return_df(data=price_df,
is_relative_return=True)
# # could also look at log returns of the data and see if the time series is stationary
# log_returns_df = calculate_return_df(data=price_df,
# is_log_return=True)
# test for stationarity (using Augmented Dickey Fuller test) for one timeseries
test_stationarity_adf(time_series=price_series)
# augmented dickey fuller result
get_aug_dickey_fuller_result(time_series=price_series)
# more descriptive statistics on skewness, kurtosis, as well as # observations, max, min, mean,
# standard deviation etc
get_descriptive_stats(data=returns_df,
alpha=0.05)
|
import scrapy
class ItcastSpider(scrapy.Spider):
name = 'itcast'
allowed_domains = ['itcast.cn']
start_urls = ['http://www.itcast.cn/channel/teacher.shtml']
def parse(self, response):
filename = "teacher.html"
open(filename, 'w').write(response.body)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 28 16:14:58 2021
@author: marina
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 2 20:41:01 2021
@author: marina
"""
# Set absolute package path
import sys, os
sys.path.append(os.path.abspath(".."))
import os
import extra.aux_funcs as af # :)
import numpy as np
from evol_funcs.evol_mut_eval import initializeIndividual, evaluate, evaluateNSGA2, evaluate_PID
#from evol_funcs.evol_funcs_ANN import evaluate, evaluate_PID
config_file = "config.yaml"
# Load configuration "cf" dir
cf = af.set_config("../config/" + config_file)
network = initializeIndividual(cf)
network(5)
|
import pytest
from model_mommy import mommy
from usaspending_api.common.recipient_lookups import obtain_recipient_uri
@pytest.fixture
def recipient_lookup(db):
parent_recipient_lookup = {"duns": "123", "recipient_hash": "01c03484-d1bd-41cc-2aca-4b427a2d0611"}
recipient_lookup = {"duns": "456", "recipient_hash": "1c4e7c2a-efe3-1b7e-2190-6f4487f808ac"}
parent_recipient_profile = {"recipient_hash": "01c03484-d1bd-41cc-2aca-4b427a2d0611", "recipient_level": "P"}
recipient_profile = {"recipient_hash": "1c4e7c2a-efe3-1b7e-2190-6f4487f808ac", "recipient_level": "C"}
mommy.make("recipient.RecipientLookup", **parent_recipient_lookup)
mommy.make("recipient.RecipientLookup", **recipient_lookup)
mommy.make("recipient.RecipientProfile", **parent_recipient_profile)
mommy.make("recipient.RecipientProfile", **recipient_profile)
# This is required for test_child_recipient_with_name_and_no_id.
recipient_profile = {"recipient_hash": "b2c8fe8e-b520-c47f-31e3-3620a358ce48", "recipient_level": "C"}
mommy.make("recipient.RecipientProfile", **recipient_profile)
# Child Recipient Tests
@pytest.mark.django_db
def test_child_recipient_without_name_or_id(recipient_lookup):
child_recipient_parameters = {
"recipient_name": None,
"recipient_unique_id": None,
"parent_recipient_unique_id": "123",
}
expected_result = None
assert obtain_recipient_uri(**child_recipient_parameters) == expected_result
@pytest.mark.django_db
def test_child_recipient_with_name_and_no_id(recipient_lookup):
child_recipient_parameters = {
"recipient_name": "Child Recipient Test Without ID",
"recipient_unique_id": None,
"parent_recipient_unique_id": "123",
}
expected_result = "b2c8fe8e-b520-c47f-31e3-3620a358ce48-C"
assert obtain_recipient_uri(**child_recipient_parameters) == expected_result
@pytest.mark.django_db
def test_child_recipient_with_id_and_no_name(recipient_lookup):
child_recipient_parameters = {
"recipient_name": None,
"recipient_unique_id": "456",
"parent_recipient_unique_id": "123",
}
expected_result = "1c4e7c2a-efe3-1b7e-2190-6f4487f808ac-C"
assert obtain_recipient_uri(**child_recipient_parameters) == expected_result
@pytest.mark.django_db
def test_child_recipient_with_name_and_id(recipient_lookup):
child_recipient_parameters = {
"recipient_name": "Child Recipient Test",
"recipient_unique_id": "456",
"parent_recipient_unique_id": "123",
}
expected_result = "1c4e7c2a-efe3-1b7e-2190-6f4487f808ac-C"
assert obtain_recipient_uri(**child_recipient_parameters) == expected_result
# Parent Recipient Tests
@pytest.mark.django_db
def test_parent_recipient_without_name_or_id(recipient_lookup):
child_recipient_parameters = {
"recipient_name": None,
"recipient_unique_id": None,
"parent_recipient_unique_id": None,
"is_parent_recipient": True,
}
expected_result = None
assert obtain_recipient_uri(**child_recipient_parameters) == expected_result
@pytest.mark.django_db
def test_parent_recipient_with_id_and_no_name(recipient_lookup):
child_recipient_parameters = {
"recipient_name": None,
"recipient_unique_id": "123",
"parent_recipient_unique_id": None,
"is_parent_recipient": True,
}
expected_result = "01c03484-d1bd-41cc-2aca-4b427a2d0611-P"
assert obtain_recipient_uri(**child_recipient_parameters) == expected_result
@pytest.mark.django_db
def test_parent_recipient_with_id_and_name(recipient_lookup):
child_recipient_parameters = {
"recipient_name": "Parent Recipient Tester",
"recipient_unique_id": "123",
"parent_recipient_unique_id": None,
"is_parent_recipient": True,
}
expected_result = "01c03484-d1bd-41cc-2aca-4b427a2d0611-P"
assert obtain_recipient_uri(**child_recipient_parameters) == expected_result
|
"""This module implements the models for the Blog core system."""
from app.models.acl import AccessControlMixin
from app.models.patch import CommentMixin, StatusMixin, TagMixin
from app.models import db
class Post(AccessControlMixin, StatusMixin, CommentMixin, TagMixin, db.Model):
"""Implements the Post model.
"""
title = db.Column(db.String(32))
sub = db.Column(db.String(128))
user_id = db.Column(db.Integer, db.ForeignKey(u'user.id'))
user = db.relationship('User', backref='posts', lazy='select')
content = db.Column(db.Text())
def __init__(self, title, sub, user, content):
"""Initialize the Sketch object.
Args:
:param title: The title of the post
:param sub: The subtitle of the post
:param user: A user
:param content: Content db.String
"""
super(Post, self).__init__()
self.title = title
self.sub = sub
self.user = user
self.content = content
def __repr__(self):
return self.title
|
# Time Limit Exceeded
# class Solution(object):
# def threeSum(self, nums):
# """
# :type nums: List[int]
# :rtype: List[List[int]]
# """
# nums.sort()
# triplates = []
# for i in range(len(nums) - 2):
# if i > 0 and nums[i] == nums[i - 1]:
# continue
# left = i + 1
# right = len(nums) - 1
# while left < right:
# currentSum = nums[i] + nums[left] + nums[right]
# if currentSum == 0:
# if [nums[i], nums[left], nums[right]] not in triplates:
# triplates.append([nums[i], nums[left], nums[right]])
# left += 1
# right -= 1
# elif currentSum < 0:
# left += 1
# elif currentSum > 0:
# right -= 1
# return triplates
class Solution(object):
def threeSum(self, nums):
result = []
nums.sort()
for i in range(len(nums) - 2):
if i > 0 and nums[i] == nums[i - 1]:
continue
left, right = i + 1, len(nums) - 1
while left < right:
currentSum = nums[i] + nums[left] + nums[right]
if currentSum < 0:
left += 1
elif currentSum > 0:
right -= 1
else:
result.append((nums[i], nums[left], nums[right]))
while left < right and nums[left] == nums[left + 1]:
left += 1
while left < right and nums[right] == nums[right - 1]:
right -= 1
left += 1
right -= 1
return result
# My solution during Mock
class Solution(object):
def threeSum(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
if not nums or len(nums) < 2:
return []
sortedNums = sorted(nums)
results = []
resultMap = {}
for first in range(0, len(sortedNums) - 2):
firstNum = sortedNums[first]
second, third = first + 1, len(sortedNums) - 1
while second < third:
secondNum, thirdNum = sortedNums[second], sortedNums[third]
if (firstNum + secondNum + thirdNum) == 0:
key = str(firstNum) + "-" + str(secondNum) + "-" + str(thirdNum)
if key not in resultMap:
results.append([firstNum, secondNum, thirdNum])
resultMap[key] = 1
second += 1
third -= 1
elif (firstNum + secondNum + thirdNum) > 0:
third -= 1
else:
second += 1
return results
|
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.path import Path
import json
import numpy as np
from pymongo import MongoClient
import requests
from StringIO import StringIO
from PIL import Image
zid = 'ARG0001n7u'
client = MongoClient('localhost', 27017)
db = client['radio']
subjects = db['radio_subjects'] # subjects = images
s = subjects.find_one({'zooniverse_id':zid})
r = requests.get(s['location']['contours'])
contours = r.json()
sf_x = 500./contours['width']
sf_y = 500./contours['height']
verts_all = []
codes_all = []
components = contours['contours']
for comp in components:
for idx,level in enumerate(comp):
verts = [((p['x'])*sf_x,(p['y']-1)*sf_y) for p in level['arr']]
#verts.append(verts[0]) # Place beginning contour point at end again to close path?
codes = np.ones(len(verts),int) * Path.LINETO
codes[0] = Path.MOVETO
#codes[-1] = Path.CLOSEPOLY
'''
for v,c in zip(verts,codes):
print idx,v,c
'''
verts_all.extend(verts)
codes_all.extend(codes)
path = Path(verts_all, codes_all)
patch_black = patches.PathPatch(path, facecolor = 'none', edgecolor='black', lw=1)
patch_orange = patches.PathPatch(path, facecolor = 'none', edgecolor='orange', lw=1)
fig = plt.figure()
url_standard = s['location']['standard']
r_standard = requests.get(url_standard)
im_standard = Image.open(StringIO(r_standard.content))
ax1 = fig.add_subplot(121,aspect='equal')
ax1.imshow(im_standard,origin='upper')
ax1.add_patch(patch_black)
ax1.set_title('WISE')
url_radio = s['location']['radio']
r_radio = requests.get(url_radio)
im_radio = Image.open(StringIO(r_radio.content))
ax2 = fig.add_subplot(122,aspect='equal')
ax2.imshow(im_radio,origin='upper')
ax2.add_patch(patch_orange)
ax2.set_title('FIRST')
'''
ax2.set_xlim(220,280)
ax2.set_ylim(280,220)
'''
plt.show()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Dell EMC OpenManage Ansible Modules
# Version 3.0.0
# Copyright (C) 2020-2021 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r'''
---
module: ome_template_identity_pool
short_description: Attach or detach an identity pool to a requested template on OpenManage Enterprise
version_added: "2.0.0"
description: This module allows to-
- Attach an identity pool to a requested template on OpenManage Enterprise.
- Detach an identity pool from a requested template on OpenManage Enterprise.
extends_documentation_fragment:
- dellemc.openmanage.ome_auth_options
options:
template_name:
description: Name of the template to which an identity pool is attached or detached.
type: str
required: true
identity_pool_name:
description: Name of the identity pool.
- To attach an identity pool to a template, provide the name of the identity pool.
- This option is not applicable when detaching an identity pool from a template.
type: str
requirements:
- "python >= 2.7.5"
author: "Felix Stephen (@felixs88)"
notes:
- Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
- This module does not support C(check_mode).
'''
EXAMPLES = r'''
---
- name: Attach an identity pool to a template
dellemc.openmanage.ome_template_identity_pool:
hostname: "192.168.0.1"
username: "username"
password: "password"
template_name: template_name
identity_pool_name: identity_pool_name
- name: Detach an identity pool from a template
dellemc.openmanage.ome_template_identity_pool:
hostname: "192.168.0.1"
username: "username"
password: "password"
template_name: template_name
'''
RETURN = r'''
---
msg:
type: str
description: Overall identity pool status of the attach or detach operation.
returned: always
sample: Successfully attached identity pool to template.
error_info:
description: Details of the HTTP Error.
returned: on HTTP error
type: dict
sample: {
"error": {
"code": "Base.1.0.GeneralError",
"message": "A general error has occurred. See ExtendedInfo for more information.",
"@Message.ExtendedInfo": [
{
"MessageId": "GEN1234",
"RelatedProperties": [],
"Message": "Unable to process the request because an error occurred.",
"MessageArgs": [],
"Severity": "Critical",
"Resolution": "Retry the operation. If the issue persists, contact your system administrator."
}
]
}
}
'''
import json
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME
from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
from ssl import SSLError
CONFIG_URI = "TemplateService/Actions/TemplateService.UpdateNetworkConfig"
TEMPLATE_URI = "TemplateService/Templates"
IDENTITY_URI = "IdentityPoolService/IdentityPools"
TEMPLATE_ATTRIBUTE_VIEW = "TemplateService/Templates({template_id})/Views(4)/AttributeViewDetails"
KEY_ATTR_NAME = 'DisplayName'
def get_template_vlan_info(rest_obj, template_id):
nic_bonding_tech = ""
try:
resp = rest_obj.invoke_request('GET', TEMPLATE_ATTRIBUTE_VIEW.format(template_id=template_id))
if resp.success:
nic_model = resp.json_data.get('AttributeGroups', [])
for xnic in nic_model:
if xnic.get(KEY_ATTR_NAME) == "NicBondingTechnology":
nic_bonding_list = xnic.get("Attributes", [])
for xbnd in nic_bonding_list:
if xbnd.get(KEY_ATTR_NAME).lower() == "nic bonding technology":
nic_bonding_tech = xbnd.get('Value')
except Exception:
nic_bonding_tech = ""
return nic_bonding_tech
def get_template_id(rest_obj, module):
"""Get template id based on requested template name."""
template_name = module.params["template_name"]
query_param = {"$filter": "Name eq '{0}'".format(template_name)}
template_req = rest_obj.invoke_request("GET", TEMPLATE_URI, query_param=query_param)
for each in template_req.json_data.get('value'):
if each['Name'] == template_name:
template_id = each['Id']
break
else:
module.fail_json(msg="Unable to complete the operation because the requested template"
" with name '{0}' is not present.".format(template_name))
return template_id
def get_identity_id(rest_obj, module):
"""Get identity pool id based on requested identity pool name."""
identity_name = module.params["identity_pool_name"]
resp = rest_obj.get_all_report_details(IDENTITY_URI)
for each in resp["report_list"]:
if each['Name'] == identity_name:
identity_id = each['Id']
break
else:
module.fail_json(msg="Unable to complete the operation because the requested identity"
" pool with name '{0}' is not present.".format(identity_name))
return identity_id
def main():
module = AnsibleModule(
argument_spec={
"hostname": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": True, "type": "str", "no_log": True},
"port": {"required": False, "type": "int", "default": 443},
"template_name": {"required": True, "type": "str"},
"identity_pool_name": {"required": False, "type": "str"},
},
supports_check_mode=False
)
try:
with RestOME(module.params, req_session=True) as rest_obj:
template_id = get_template_id(rest_obj, module)
identity_id, message = 0, "Successfully detached identity pool from template."
if module.params["identity_pool_name"] is not None:
identity_id = get_identity_id(rest_obj, module)
message = "Successfully attached identity pool to template."
nic_bonding_tech = get_template_vlan_info(rest_obj, template_id)
payload = {"TemplateId": template_id, "IdentityPoolId": identity_id, "BondingTechnology": nic_bonding_tech}
resp = rest_obj.invoke_request("POST", CONFIG_URI, data=payload)
if resp.status_code == 200:
module.exit_json(msg=message, changed=True)
except HTTPError as err:
module.fail_json(msg=str(err), error_info=json.load(err))
except URLError as err:
module.exit_json(msg=str(err), unreachable=True)
except (ValueError, TypeError, ConnectionError, SSLError, SSLValidationError) as err:
module.fail_json(msg=str(err))
except Exception as err:
module.fail_json(msg=str(err))
if __name__ == "__main__":
main()
|
from django.db import IntegrityError
from Poem.api.views import NotFound
from Poem.helpers.history_helpers import create_history
from Poem.poem import models as poem_models
from rest_framework import status
from rest_framework.authentication import SessionAuthentication
from rest_framework.response import Response
from rest_framework.views import APIView
class ListMetricsInGroup(APIView):
authentication_classes = (SessionAuthentication,)
def get(self, request, group=None):
if group:
gr = poem_models.GroupOfMetrics.objects.filter(name__exact=group)
if len(gr) > 0:
metrics = poem_models.Metric.objects.filter(
group__name__exact=group
)
else:
raise NotFound(status=404, detail='Group not found')
else:
metrics = poem_models.Metric.objects.filter(
group=None
)
results = []
for item in metrics:
results.append({'id': item.id, 'name': item.name})
results = sorted(results, key=lambda k: k['name'])
return Response({'result': results})
def put(self, request):
group = poem_models.GroupOfMetrics.objects.get(
name=request.data['name']
)
for name in dict(request.data)['items']:
metric = poem_models.Metric.objects.get(name=name)
if metric.group != group:
metric.group = group
metric.save()
create_history(metric, request.user.username)
# remove the metrics that existed before, and now were removed
metrics = poem_models.Metric.objects.filter(group=group)
for metric in metrics:
if metric.name not in dict(request.data)['items']:
metric.group = None
metric.save()
create_history(metric, request.user.username)
return Response(status=status.HTTP_201_CREATED)
def post(self, request):
try:
group = poem_models.GroupOfMetrics.objects.create(
name=request.data['name']
)
if 'items' in dict(request.data):
for name in dict(request.data)['items']:
metric = poem_models.Metric.objects.get(name=name)
metric.group = group
metric.save()
create_history(metric, request.user.username)
except IntegrityError:
return Response(
{'detail': 'Group of metrics with this name already exists.'},
status=status.HTTP_400_BAD_REQUEST
)
else:
return Response(status=status.HTTP_201_CREATED)
def delete(self, request, group=None):
if group:
try:
group = poem_models.GroupOfMetrics.objects.get(name=group)
group.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
except poem_models.GroupOfMetrics.DoesNotExist:
raise(NotFound(status=404, detail='Group of metrics not found'))
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
class ListAggregationsInGroup(APIView):
authentication_classes = (SessionAuthentication,)
def get(self, request, group=None):
if group:
aggr = poem_models.Aggregation.objects.filter(
groupname=group
)
else:
aggr = poem_models.Aggregation.objects.filter(
groupname=''
)
results = []
for item in aggr:
results.append({'id': item.id, 'name': item.name})
results = sorted(results, key=lambda k: k['name'])
return Response({'result': results})
def put(self, request):
group = poem_models.GroupOfAggregations.objects.get(
name=request.data['name']
)
for aggr in dict(request.data)['items']:
ag = poem_models.Aggregation.objects.get(name=aggr)
group.aggregations.add(ag)
ag.groupname = group.name
ag.save()
# remove removed aggregations:
for aggr in group.aggregations.all():
if aggr.name not in dict(request.data)['items']:
group.aggregations.remove(aggr)
aggr.groupname = ''
aggr.save()
return Response(status=status.HTTP_201_CREATED)
def post(self, request):
try:
group = poem_models.GroupOfAggregations.objects.create(
name=request.data['name']
)
if 'items' in dict(request.data):
for aggr in dict(request.data)['items']:
ag = poem_models.Aggregation.objects.get(name=aggr)
group.aggregations.add(ag)
ag.groupname = group.name
ag.save()
except IntegrityError:
return Response(
{
'detail':
'Group of aggregations with this name already exists.'
},
status=status.HTTP_400_BAD_REQUEST
)
else:
return Response(status=status.HTTP_201_CREATED)
def delete(self, request, group=None):
if group:
try:
gr = poem_models.GroupOfAggregations.objects.get(name=group)
gr.delete()
for aggr in poem_models.Aggregation.objects.filter(
groupname=group
):
aggr.groupname = ''
aggr.save()
return Response(status=status.HTTP_204_NO_CONTENT)
except poem_models.GroupOfAggregations.DoesNotExist:
raise NotFound(status=404,
detail='Group of aggregations not found')
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
class ListMetricProfilesInGroup(APIView):
authentication_classes = (SessionAuthentication,)
def get(self, request, group=None):
if group:
mp = poem_models.MetricProfiles.objects.filter(
groupname=group
)
else:
mp = poem_models.MetricProfiles.objects.filter(
groupname=''
)
results = []
for item in mp:
results.append({'id': item.id, 'name': item.name})
results = sorted(results, key=lambda k: k['name'])
return Response({'result': results})
def put(self, request):
group = poem_models.GroupOfMetricProfiles.objects.get(
name=request.data['name']
)
for item in dict(request.data)['items']:
mp = poem_models.MetricProfiles.objects.get(name=item)
group.metricprofiles.add(mp)
mp.groupname = group.name
mp.save()
# remove removed metric profiles
for mp in group.metricprofiles.all():
if mp.name not in dict(request.data)['items']:
group.metricprofiles.remove(mp)
mp.groupname = ''
mp.save()
return Response(status=status.HTTP_201_CREATED)
def post(self, request):
try:
group = poem_models.GroupOfMetricProfiles.objects.create(
name=request.data['name']
)
if 'items' in dict(request.data):
for item in dict(request.data)['items']:
mp = poem_models.MetricProfiles.objects.get(name=item)
group.metricprofiles.add(mp)
mp.groupname = group.name
mp.save()
except IntegrityError:
return Response(
{
'detail':
'Metric profiles group with this name already exists.'
},
status=status.HTTP_400_BAD_REQUEST
)
else:
return Response(status=status.HTTP_201_CREATED)
def delete(self, request, group=None):
if group:
try:
gr = poem_models.GroupOfMetricProfiles.objects.get(
name=group
)
gr.delete()
for mp in poem_models.MetricProfiles.objects.filter(
groupname=group
):
mp.groupname = ''
mp.save()
return Response(status=status.HTTP_204_NO_CONTENT)
except poem_models.GroupOfMetricProfiles.DoesNotExist:
raise NotFound(status=404,
detail='Group of metric profiles not found')
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
class ListThresholdsProfilesInGroup(APIView):
authentication_classes = (SessionAuthentication,)
def get(self, request, group=None):
if group:
tp = poem_models.ThresholdsProfiles.objects.filter(
groupname=group
)
else:
tp = poem_models.ThresholdsProfiles.objects.filter(
groupname=''
)
results = []
for item in tp:
results.append({'id': item.id, 'name': item.name})
results = sorted(results, key=lambda k: k['name'])
return Response({'result': results})
def put(self, request):
group = poem_models.GroupOfThresholdsProfiles.objects.get(
name=request.data['name']
)
for item in dict(request.data)['items']:
tp = poem_models.ThresholdsProfiles.objects.get(name=item)
group.thresholdsprofiles.add(tp)
tp.groupname = group.name
tp.save()
# remove removed metric profiles
for tp in group.thresholdsprofiles.all():
if tp.name not in dict(request.data)['items']:
group.thresholdsprofiles.remove(tp)
tp.groupname = ''
tp.save()
return Response(status=status.HTTP_201_CREATED)
def post(self, request):
try:
group = poem_models.GroupOfThresholdsProfiles.objects.create(
name=request.data['name']
)
if 'items' in dict(request.data):
for item in dict(request.data)['items']:
tp = poem_models.ThresholdsProfiles.objects.get(name=item)
group.thresholdsprofiles.add(tp)
tp.groupname = group.name
tp.save()
except IntegrityError:
return Response(
{
'detail':
'Thresholds profiles group with this name already '
'exists.'
},
status=status.HTTP_400_BAD_REQUEST)
else:
return Response(status=status.HTTP_201_CREATED)
def delete(self, request, group=None):
if group:
try:
gr = poem_models.GroupOfThresholdsProfiles.objects.get(
name=group
)
gr.delete()
for tp in poem_models.ThresholdsProfiles.objects.filter(
groupname=group
):
tp.groupname = ''
tp.save()
return Response(status=status.HTTP_204_NO_CONTENT)
except poem_models.GroupOfThresholdsProfiles.DoesNotExist:
raise NotFound(status=404,
detail='Group of threshold profiles not found')
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
|
from operator import mul as _mul
from functools import reduce as _reduce
def product(iterable):
"""
Returns the product of an iterable
If the list is empty, returns 1
"""
return _reduce(_mul, iterable, 1)
|
#!/usr/bin/env python
import argparse
parser = argparse.ArgumentParser(description="Go through the chunks.dat file and segment the dataset into chunks.")
parser.add_argument("--plot", action="store_true", help="Make plots of the partitioned chunks.")
args = parser.parse_args()
import os
import psoap.constants as C
from psoap.data import Spectrum, Chunk
from astropy.io import ascii
import matplotlib.pyplot as plt
import multiprocessing as mp
import yaml
try:
f = open("config.yaml")
config = yaml.load(f)
f.close()
except FileNotFoundError as e:
print("You need to copy a config.yaml file to this directory, and then edit the values to your particular case.")
raise
# read in the chunks.dat file
chunks = ascii.read(config["chunk_file"])
print("Processing the following chunks of data")
print(chunks)
# read in the actual dataset
dataset = Spectrum(config["data_file"])
# sort by signal-to-noise
dataset.sort_by_SN(config.get("snr_order", C.snr_default))
# limit?
limit = config["epoch_limit"]
if limit > dataset.n_epochs:
limit = dataset.n_epochs
def process_chunk(chunk):
order, wl0, wl1 = chunk
print("Processing order {}, wl0: {:.1f}, wl1: {:.1f} and limiting to {:} highest S/N epochs.".format(order, wl0, wl1, limit))
# higest S/N epoch
wl = dataset.wl[0, order, :]
ind = (wl > wl0) & (wl < wl1)
# limit these to the epochs we want, for computational purposes
wl = dataset.wl[:limit, order, ind]
fl = dataset.fl[:limit, order, ind]
sigma = dataset.sigma[:limit, order, ind]
date = dataset.date[:limit, order, ind]
date1D = dataset.date1D[:limit]
# Stuff this into a chunk object, and save it
chunkSpec = Chunk(wl, fl, sigma, date)
chunkSpec.save(order, wl0, wl1)
if args.plot:
fig, ax = plt.subplots(nrows=1, sharex=True)
# Plot in reverse order so that highest S/N spectra are on top
for i in range(limit)[::-1]:
ax.plot(wl[i], fl[i])
ax.set_xlabel(r"$\lambda\quad[\AA]$")
fig.savefig(C.chunk_fmt.format(order, wl0, wl1) + ".png", dpi=300)
# Now make a separate directory with plots labeled by their date so we can mask problem regions
plots_dir = "plots_" + C.chunk_fmt.format(order, wl0, wl1)
if not os.path.exists(plots_dir):
os.makedirs(plots_dir)
# plot these relative to the highest S/N flux, so we know what looks suspicious, and what to mask.
for i in range(limit):
fig, ax = plt.subplots(nrows=1)
ax.plot(wl[0], fl[0], color="0.5")
ax.plot(wl[i], fl[i])
ax.set_xlabel(r"$\lambda\quad[\AA]$")
fig.savefig(plots_dir + "/{:.1f}.png".format(date1D[i]))
plt.close('all')
pool = mp.Pool(mp.cpu_count())
pool.map(process_chunk, chunks)
|
from django import forms
from transport.models import Transport
from django.utils.translation import gettext_lazy as _
from units.models import Goods
class TransportForm(forms.ModelForm):
address = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'id':"addressAutocomplete"}))
class Meta:
model = Transport
fields= ['address','email','phone_number']
labels = {
'address': 'ADDRESS',
}
help_texts = {
}
widgets = {
'email': forms.EmailInput(attrs={'placeholder': 'Input your email'}),
'phone_number': forms.TextInput(attrs={'placeholder': 'Input phone number'})
}
error_messages={
'phoneNumberRegex': _('Use the required formart +254712345678')
}
|
import numpy as np
import matplotlib.pyplot as plt
from jump_reward_inference.state_space_1D import beat_state_space_1D, downbeat_state_space_1D
class BDObservationModel:
"""
Observation model for beat and downbeat tracking with particle filtering.
Based on the first character of this parameter, each (down-)beat period gets split into (down-)beat states
"B" stands for border model which classifies 1/(observation lambda) fraction of states as downbeat states and
the rest as the beat states (if it is used for downbeat tracking state space) or the same fraction of states
as beat states and the rest as the none beat states (if it is used for beat tracking state space).
"N" model assigns a constant number of the beginning states as downbeat states and the rest as beat states
or beginning states as beat and the rest as none-beat states
"G" model is a smooth Gaussian transition (soft border) between downbeat/beat or beat/none-beat states
Parameters
----------
state_space : :class:`BarStateSpace` instance
BarStateSpace instance.
observation_lambda : str
References:
----------
M. Heydari and Z. Duan, “Don’t look back: An online
beat tracking method using RNN and enhanced particle filtering,” in In Proc. IEEE Int. Conf. Acoust. Speech
Signal Process. (ICASSP), 2021.
M. Heydari, F. Cwitkowitz, and Z. Duan, “BeatNet:CRNN and particle filtering for online joint beat down-beat
and meter tracking,” in Proc. of the 22th Intl. Conf.on Music Information Retrieval (ISMIR), 2021.
M. Heydari and Z. Duan, “Don’t Look Back: An online beat tracking method using RNN and enhanced
particle filtering,” in Proc. IEEE Int. Conf. Acoust. Speech Signal Process. (ICASSP), 2021.
"""
def __init__(self, state_space, observation_lambda):
if observation_lambda[0] == 'B': # Observation model for the rigid boundaries
observation_lambda = int(observation_lambda[1:])
# compute observation pointers
# always point to the non-beat densities
pointers = np.zeros(state_space.num_states, dtype=np.uint32)
# unless they are in the beat range of the state space
border = 1. / observation_lambda
pointers[state_space.state_positions % 1 < border] = 1
# the downbeat (i.e. the first beat range) points to density column 2
pointers[state_space.state_positions < border] = 2
# instantiate a ObservationModel with the pointers
self.pointers = pointers
elif observation_lambda[0] == 'N': # Observation model for the fixed number of states as beats boundaries
observation_lambda = int(observation_lambda[1:])
# computing observation pointers
# always point to the non-beat densities
pointers = np.zeros(state_space.num_states, dtype=np.uint32)
# unless they are in the beat range of the state space
for i in range(observation_lambda):
border = np.asarray(state_space.first_states) + i
pointers[border[1:]] = 1
# the downbeat (i.e. the first beat range) points to density column 2
pointers[border[0]] = 2
# instantiate a ObservationModel with the pointers
self.pointers = pointers
elif observation_lambda[0] == 'G': # Observation model for gaussian boundaries
observation_lambda = float(observation_lambda[1:])
pointers = np.zeros((state_space.num_beats + 1, state_space.num_states))
for i in range(state_space.num_beats + 1):
pointers[i] = gaussian(state_space.state_positions, i, observation_lambda)
pointers[0] = pointers[0] + pointers[-1]
pointers[1] = np.sum(pointers[1:-1], axis=0)
pointers = pointers[:2]
self.pointers = pointers
def gaussian(x, mu, sig):
return np.exp(-np.power((x - mu) / sig, 2.) / 2) # /(np.sqrt(2.*np.pi)*sig)
# Beat state space observations
def densities(observations, observation_model, state_model):
new_obs = np.zeros(state_model.num_states, float)
if len(np.shape(observation_model.pointers)) != 2: # For Border line and fixed number observation distribution
new_obs[np.argwhere(
observation_model.pointers == 2)] = observations
new_obs[np.argwhere(
observation_model.pointers == 0)] = 0.03
elif len(np.shape(observation_model.pointers)) == 2: # For Gaussian observation distribution
new_obs = observation_model.pointers[
0] * observations
new_obs[new_obs < 0.005] = 0.03
return new_obs
# Downbeat state space observations
def densities2(observations, observation_model, state_model):
new_obs = np.zeros(state_model.num_states, float)
if len(np.shape(observation_model.pointers)) != 2: # For Border line and fixed number observation distribution
new_obs[np.argwhere(
observation_model.pointers == 2)] = observations[1]
new_obs[np.argwhere(
observation_model.pointers == 0)] = 0.00002
elif len(np.shape(observation_model.pointers)) == 2: # For Gaussian observation distribution
new_obs = observation_model.pointers[0] * observations
new_obs[new_obs < 0.005] = 0.03
# return the observations
return new_obs
# def densities_down(observations, beats_per_bar):
# new_obs = np.zeros(beats_per_bar, float)
# new_obs[0] = observations[1] # downbeat
# new_obs[1:] = observations[0] # beat
# return new_obs
class inference_1D:
'''
Running the jump-reward inference for the 1D state spaces over the given activation functions to infer music rhythmic information jointly.
Parameters
----------
activations : numpy array, shape (num_frames, 2)
Activation function with probabilities corresponding to beats
and downbeats given in the first and second column, respectively.
Returns
----------
beats, downbeats, local tempo, local meter : numpy array, shape (num_beats, 4)
"1" at the second column indicates the downbeats.
References:
----------
'''
MIN_BPM = 55.
MAX_BPM = 215.
LAMBDA_B = 0.01 # beat transition lambda
Lambda_D = 0.01 # downbeat transition lambda
OBSERVATION_LAMBDA = "B56"
fps = 50
T = 1 / fps
MIN_BEAT_PER_BAR = 1
MAX_BEAT_PER_BAR = 4
OFFSET = 4 # The point of time after which the inference model starts to work. Can be zero!
IG_THRESHOLD = 0.4 # Information Gate threshold
def __init__(self, beats_per_bar=[], min_bpm=MIN_BPM, max_bpm=MAX_BPM, offset=OFFSET,
min_bpb=MIN_BEAT_PER_BAR, max_bpb=MAX_BEAT_PER_BAR, ig_threshold=IG_THRESHOLD,
lambda_b=LAMBDA_B, lambda_d=Lambda_D, observation_lambda=OBSERVATION_LAMBDA,
fps=None, plot=False, **kwargs):
self.beats_per_bar = beats_per_bar
self.fps = fps
self.observation_lambda = observation_lambda
self.offset = offset
self.ig_threshold = ig_threshold
self.plot = plot
beats_per_bar = np.array(beats_per_bar, ndmin=1)
# convert timing information to construct a beat state space
min_interval = round(60. * fps / max_bpm)
max_interval = round(60. * fps / min_bpm)
# State spaces and observation models initialization
self.st = beat_state_space_1D(alpha=lambda_b, tempo=None, fps=None, min_interval=min_interval,
max_interval=max_interval, ) # beat tracking state space
self.st2 = downbeat_state_space_1D(alpha=lambda_d, meter=self.beats_per_bar, min_beats_per_bar=min_bpb,
max_beats_per_bar=max_bpb) # downbeat tracking state space
self.om = BDObservationModel(self.st, observation_lambda) # beat tracking observation model
self.om2 = BDObservationModel(self.st2, "B60") # downbeat tracking observation model
pass
def process(self, activations):
T = 1 / self.fps
font = {'color': 'green', 'weight': 'normal', 'size': 12}
counter = 0
if self.plot:
fig = plt.figure(figsize=(1800 / 96, 900 / 96), dpi=96)
subplot1 = fig.add_subplot(411)
subplot2 = fig.add_subplot(412)
subplot3 = fig.add_subplot(413)
subplot4 = fig.add_subplot(414)
subplot1.title.set_text('Beat tracking inference diagram')
subplot2.title.set_text('Beat states jumping back weigths')
subplot3.title.set_text('Downbeat tracking inference diagram')
subplot4.title.set_text('Downbeat states jumping back weigths')
fig.tight_layout()
# output vector initialization for beat, downbeat, tempo and meter
output = np.zeros((1, 4), dtype=float)
# Beat and downbeat belief state initialization
beat_distribution = np.ones(self.st.num_states) * 0.8
beat_distribution[5] = 1 # this is just a flag to show the initial transitions
down_distribution = np.ones(self.st2.num_states) * 0.8
# local tempo and meter initialization
local_tempo = 0
meter = 0
activations = activations[int(self.offset / T):]
both_activations = activations.copy()
activations = np.max(activations, axis=1)
activations[activations < self.ig_threshold] = 0.03
for i in range(len(activations)): # loop through all frames to infer beats/downbeats
counter += 1
# beat detection
# beat transition (motion)
local_beat = ''
if np.max(self.st.jump_weights) > 1:
self.st.jump_weights = 0.7 * self.st.jump_weights / np.max(self.st.jump_weights)
b_weight = self.st.jump_weights.copy()
beat_jump_rewards1 = -beat_distribution * b_weight # calculating the transition rewards
b_weight[b_weight < 0.7] = 0 # Thresholding the jump backs
beat_distribution1 = sum(beat_distribution * b_weight) # jump back
beat_distribution2 = np.roll((beat_distribution * (1 - b_weight)), 1) # move forward
beat_distribution2[0] += beat_distribution1
beat_distribution = beat_distribution2
# Beat correction
if activations[i] > self.ig_threshold: # beat correction is done only when there is a meaningful activation
obs = densities(activations[i], self.om, self.st)
beat_distribution_old = beat_distribution.copy()
beat_distribution = beat_distribution_old * obs
if np.min(beat_distribution) < 1e-5: # normalize beat distribution if its minimum is below a threshold
beat_distribution = 0.8 * beat_distribution / np.max(beat_distribution)
beat_max = np.argmax(beat_distribution)
beat_jump_rewards2 = beat_distribution - beat_distribution_old # beat correction rewards calculation
beat_jump_rewards = beat_jump_rewards2
beat_jump_rewards[:self.st.min_interval - 1] = 0
if np.max(-beat_jump_rewards) != 0:
beat_jump_rewards = -4 * beat_jump_rewards / np.max(-beat_jump_rewards)
self.st.jump_weights += beat_jump_rewards
local_tempo = round(self.fps * 60 / (np.argmax(self.st.jump_weights) + 1))
else:
beat_jump_rewards1[:self.st.min_interval - 1] = 0
self.st.jump_weights += 2 * beat_jump_rewards1
self.st.jump_weights[:self.st.min_interval - 1] = 0
beat_max = np.argmax(beat_distribution)
# downbeat detection
if (beat_max < (
int(.07 / T)) + 1) and (counter * T + self.offset) - output[-1][
0] > .45 * T * self.st.min_interval: # here the local tempo (:np.argmax(self.st.jump_weights)+1): can be used as the criteria rather than the minimum tempo
local_beat = 'NoooOOoooW!'
# downbeat transition (motion)
if np.max(self.st2.jump_weights) > 1:
self.st2.jump_weights = 0.2 * self.st2.jump_weights / np.max(self.st2.jump_weights)
d_weight = self.st2.jump_weights.copy()
down_jump_rewards1 = - down_distribution * d_weight
d_weight[d_weight < 0.2] = 0
down_distribution1 = sum(down_distribution * d_weight) # jump back
down_distribution2 = np.roll((down_distribution * (1 - d_weight)), 1) # move forward
down_distribution2[0] += down_distribution1
down_distribution = down_distribution2
# Downbeat correction
if both_activations[i][1] > 0.00002:
obs2 = densities2(both_activations[i], self.om2, self.st2)
down_distribution_old = down_distribution.copy()
down_distribution = down_distribution_old * obs2
if np.min(down_distribution) < 1e-5: # normalize downbeat distribution if its minimum is below a threshold
down_distribution = 0.8 * down_distribution/np.max(down_distribution)
down_max = np.argmax(down_distribution)
down_jump_rewards2 = down_distribution - down_distribution_old # downbeat correction rewards calculation
down_jump_rewards = down_jump_rewards2
down_jump_rewards[:self.st2.max_interval - 1] = 0
if np.max(-down_jump_rewards) != 0:
down_jump_rewards = -0.3 * down_jump_rewards / np.max(-down_jump_rewards)
self.st2.jump_weights = self.st2.jump_weights + down_jump_rewards
meter = np.argmax(self.st2.jump_weights) + 1
else:
down_jump_rewards1[:self.st2.min_interval - 1] = 0
self.st2.jump_weights += 2 * down_jump_rewards1
self.st2.jump_weights[:self.st2.min_interval - 1] = 0
down_max = np.argmax(down_distribution)
# Beat vs Downbeat mark off
if down_max == self.st2.first_states[0]:
output = np.append(output, [[counter * T + self.offset, 1, local_tempo, meter]], axis=0)
last_detected = "Downbeat"
else:
output = np.append(output, [[counter * T + self.offset, 2, local_tempo, meter]], axis=0)
last_detected = "Beat"
# Downbeat probability mass and weights
if self.plot:
subplot3.cla()
subplot4.cla()
down_distribution = down_distribution / np.max(down_distribution)
subplot3.bar(np.arange(self.st2.num_states), down_distribution, color='maroon', width=0.4,
alpha=0.2)
subplot3.bar(0, both_activations[i][1], color='green', width=0.4, alpha=0.3)
# subplot3.bar(np.arange(1, self.st2.num_states), both_activations[i][0], color='yellow', width=0.4, alpha=0.3)
subplot4.bar(np.arange(self.st2.num_states), self.st2.jump_weights, color='maroon', width=0.4,
alpha=0.2)
subplot4.set_ylim([0, 1])
subplot3.title.set_text('Downbeat tracking 1D inference model')
subplot4.title.set_text(f'Downbeat states jumping back weigths')
subplot4.text(1, -0.26, f'The type of the last detected event: {last_detected}',
horizontalalignment='right', verticalalignment='center', transform=subplot2.transAxes,
fontdict=font)
subplot4.text(1, -1.63, f'Local time signature = {meter}/4 ', horizontalalignment='right',
verticalalignment='center', transform=subplot2.transAxes, fontdict=font)
position2 = down_max
subplot3.axvline(x=position2)
# Downbeat probability mass and weights
if self.plot: # activates this when you want to plot the performance
if counter % 1 == 0: # Choosing how often to plot
print(counter)
subplot1.cla()
subplot2.cla()
beat_distribution = beat_distribution / np.max(beat_distribution)
subplot1.bar(np.arange(self.st.num_states), beat_distribution, color='maroon', width=0.4, alpha=0.2)
subplot1.bar(0, activations[i], color='green', width=0.4, alpha=0.3)
# subplot2.bar(np.arange(self.st.num_states), np.concatenate((np.zeros(self.st.min_interval),self.st.jump_weights)), color='maroon', width=0.4, alpha=0.2)
subplot2.bar(np.arange(self.st.num_states), self.st.jump_weights, color='maroon', width=0.4,
alpha=0.2)
subplot2.set_ylim([0, 1])
subplot1.title.set_text('Beat tracking 1D inference model')
subplot2.title.set_text("Beat states jumping back weigths")
subplot1.text(1, 2.48, f'Beat moment: {local_beat} ', horizontalalignment='right',
verticalalignment='top', transform=subplot2.transAxes, fontdict=font)
subplot2.text(1, 1.12, f'Local tempo: {local_tempo} (BPM)', horizontalalignment='right',
verticalalignment='top', transform=subplot2.transAxes, fontdict=font)
position = beat_max
subplot1.axvline(x=position)
plt.pause(0.05)
subplot1.clear()
return output[1:]
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'download_new.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_SingleFileDownload(object):
def setupUi(self, SingleFileDownload):
SingleFileDownload.setObjectName(_fromUtf8("SingleFileDownload"))
SingleFileDownload.resize(622, 557)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
SingleFileDownload.setPalette(palette)
SingleFileDownload.setAutoFillBackground(False)
SingleFileDownload.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
self.start_download_bt = QtGui.QPushButton(SingleFileDownload)
self.start_download_bt.setGeometry(QtCore.QRect(490, 515, 121, 31))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Lato"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.start_download_bt.setFont(font)
self.start_download_bt.setStyleSheet(_fromUtf8("QPushButton:hover{\n"
" background-color: #83bf20;\n"
" border-color: #83bf20;\n"
"}\n"
"QPushButton:active {\n"
" background-color: #93cc36;\n"
" border-color: #93cc36;\n"
"}\n"
"QPushButton{\n"
" background-color: #88c425;\n"
" border: 1px solid #88c425;\n"
" color: #fff;\n"
" border-radius: 7px;\n"
"}"))
self.start_download_bt.setObjectName(_fromUtf8("start_download_bt"))
self.cancel_bt = QtGui.QPushButton(SingleFileDownload)
self.cancel_bt.setGeometry(QtCore.QRect(10, 515, 91, 31))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Lato"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.cancel_bt.setFont(font)
self.cancel_bt.setStyleSheet(_fromUtf8("QPushButton {\n"
" background-color: #CC0000;\n"
" border: 1px solid #CC0000;\n"
" color: #fff;\n"
" border-radius: 7px;\n"
"}\n"
"QPushButton:hover{\n"
" background-color: #bb0a0a;\n"
" border-color: #bb0a0a;\n"
"}\n"
"QPushButton:active {\n"
" background-color: #ce0e0e;\n"
" border-color: #ce0e0e;\n"
"}"))
self.cancel_bt.setObjectName(_fromUtf8("cancel_bt"))
self.shard_queue_table = QtGui.QTableWidget(SingleFileDownload)
self.shard_queue_table.setGeometry(QtCore.QRect(10, 235, 601, 231))
self.shard_queue_table.setObjectName(_fromUtf8("shard_queue_table"))
self.shard_queue_table.setColumnCount(0)
self.shard_queue_table.setRowCount(0)
self.overall_progress = QtGui.QProgressBar(SingleFileDownload)
self.overall_progress.setGeometry(QtCore.QRect(10, 475, 601, 31))
self.overall_progress.setProperty("value", 0)
self.overall_progress.setObjectName(_fromUtf8("overall_progress"))
self.file_save_path = QtGui.QLineEdit(SingleFileDownload)
self.file_save_path.setGeometry(QtCore.QRect(170, 82, 401, 31))
self.file_save_path.setObjectName(_fromUtf8("file_save_path"))
self.file_path_select_bt = QtGui.QPushButton(SingleFileDownload)
self.file_path_select_bt.setGeometry(QtCore.QRect(580, 82, 31, 31))
self.file_path_select_bt.setStyleSheet(_fromUtf8("QPushButton {\n"
" background-color: #555555;\n"
" border: 1px solid #555555;\n"
" color: #fff;\n"
" border-radius: 7px;\n"
" padding: 100px;\n"
"}\n"
"QPushButton:hover{\n"
" background-color: #403f3f;\n"
" border-color: #403f3f;\n"
"}\n"
"QPushButton:active {\n"
" background-color: #505050;\n"
" border-color: #505050;\n"
"}"))
self.file_path_select_bt.setObjectName(_fromUtf8("file_path_select_bt"))
self.label_6 = QtGui.QLabel(SingleFileDownload)
self.label_6.setGeometry(QtCore.QRect(20, 20, 141, 16))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Lato"))
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.label_6.setFont(font)
self.label_6.setObjectName(_fromUtf8("label_6"))
self.file_name = QtGui.QLabel(SingleFileDownload)
self.file_name.setGeometry(QtCore.QRect(170, 20, 441, 21))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Lato"))
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.file_name.setFont(font)
self.file_name.setStyleSheet(_fromUtf8("QLabel{\n"
"color: #2683ff;\n"
"}\n"
""))
self.file_name.setObjectName(_fromUtf8("file_name"))
self.label_5 = QtGui.QLabel(SingleFileDownload)
self.label_5.setGeometry(QtCore.QRect(20, 89, 91, 16))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Lato"))
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.label_5.setFont(font)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.label_8 = QtGui.QLabel(SingleFileDownload)
self.label_8.setGeometry(QtCore.QRect(20, 127, 111, 16))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Lato"))
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.label_8.setFont(font)
self.label_8.setObjectName(_fromUtf8("label_8"))
self.tmp_dir_bt = QtGui.QPushButton(SingleFileDownload)
self.tmp_dir_bt.setGeometry(QtCore.QRect(580, 120, 31, 31))
self.tmp_dir_bt.setStyleSheet(_fromUtf8("QPushButton {\n"
" background-color: #555555;\n"
" border: 1px solid #555555;\n"
" color: #fff;\n"
" border-radius: 7px;\n"
" padding: 100px;\n"
"}\n"
"QPushButton:hover{\n"
" background-color: #403f3f;\n"
" border-color: #403f3f;\n"
"}\n"
"QPushButton:active {\n"
" background-color: #505050;\n"
" border-color: #505050;\n"
"}"))
self.tmp_dir_bt.setObjectName(_fromUtf8("tmp_dir_bt"))
self.tmp_dir = QtGui.QLineEdit(SingleFileDownload)
self.tmp_dir.setGeometry(QtCore.QRect(170, 120, 401, 31))
self.tmp_dir.setObjectName(_fromUtf8("tmp_dir"))
self.label_15 = QtGui.QLabel(SingleFileDownload)
self.label_15.setGeometry(QtCore.QRect(20, 210, 211, 16))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Lato"))
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.label_15.setFont(font)
self.label_15.setObjectName(_fromUtf8("label_15"))
self.downloaded_shards = QtGui.QLabel(SingleFileDownload)
self.downloaded_shards.setGeometry(QtCore.QRect(230, 210, 381, 21))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Lato"))
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.downloaded_shards.setFont(font)
self.downloaded_shards.setStyleSheet(_fromUtf8("QLabel{\n"
"color: #2683ff;\n"
"}\n"
""))
self.downloaded_shards.setObjectName(_fromUtf8("downloaded_shards"))
self.connections_onetime = QtGui.QSpinBox(SingleFileDownload)
self.connections_onetime.setGeometry(QtCore.QRect(170, 160, 61, 32))
self.connections_onetime.setObjectName(_fromUtf8("connections_onetime"))
self.label_19 = QtGui.QLabel(SingleFileDownload)
self.label_19.setGeometry(QtCore.QRect(20, 170, 121, 16))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Lato"))
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.label_19.setFont(font)
self.label_19.setObjectName(_fromUtf8("label_19"))
self.label_9 = QtGui.QLabel(SingleFileDownload)
self.label_9.setGeometry(QtCore.QRect(20, 50, 141, 16))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Lato"))
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.label_9.setFont(font)
self.label_9.setObjectName(_fromUtf8("label_9"))
self.file_id = QtGui.QLabel(SingleFileDownload)
self.file_id.setGeometry(QtCore.QRect(170, 50, 441, 21))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Lato"))
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.file_id.setFont(font)
self.file_id.setStyleSheet(_fromUtf8("QLabel{\n"
"color: #2683ff;\n"
"}\n"
""))
self.file_id.setObjectName(_fromUtf8("file_id"))
self.current_state = QtGui.QLabel(SingleFileDownload)
self.current_state.setGeometry(QtCore.QRect(120, 520, 351, 21))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Lato"))
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.current_state.setFont(font)
self.current_state.setStyleSheet(_fromUtf8("QLabel {\n"
"text-align: center;\n"
"}"))
self.current_state.setObjectName(_fromUtf8("current_state"))
self.label_16 = QtGui.QLabel(SingleFileDownload)
self.label_16.setGeometry(QtCore.QRect(250, 169, 281, 21))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Lato"))
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.label_16.setFont(font)
self.label_16.setObjectName(_fromUtf8("label_16"))
self.current_active_connections = QtGui.QLabel(SingleFileDownload)
self.current_active_connections.setGeometry(QtCore.QRect(540, 170, 71, 21))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Lato"))
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.current_active_connections.setFont(font)
self.current_active_connections.setStyleSheet(_fromUtf8("QLabel{\n"
"color: #2683ff;\n"
"}\n"
""))
self.current_active_connections.setObjectName(_fromUtf8("current_active_connections"))
self.retranslateUi(SingleFileDownload)
QtCore.QMetaObject.connectSlotsByName(SingleFileDownload)
SingleFileDownload.setTabOrder(self.file_save_path, self.file_path_select_bt)
SingleFileDownload.setTabOrder(self.file_path_select_bt, self.tmp_dir)
SingleFileDownload.setTabOrder(self.tmp_dir, self.tmp_dir_bt)
SingleFileDownload.setTabOrder(self.tmp_dir_bt, self.connections_onetime)
SingleFileDownload.setTabOrder(self.connections_onetime, self.shard_queue_table)
SingleFileDownload.setTabOrder(self.shard_queue_table, self.cancel_bt)
SingleFileDownload.setTabOrder(self.cancel_bt, self.start_download_bt)
def retranslateUi(self, SingleFileDownload):
SingleFileDownload.setWindowTitle(_translate("SingleFileDownload", "Download file - Storj GUI", None))
self.start_download_bt.setText(_translate("SingleFileDownload", "DOWNLOAD", None))
self.cancel_bt.setText(_translate("SingleFileDownload", "CANCEL", None))
self.file_path_select_bt.setText(_translate("SingleFileDownload", "...", None))
self.label_6.setText(_translate("SingleFileDownload", "<html><head/><body><p><span style=\" color:#555555;\">DOWNLOAD FILE:</span></p></body></html>", None))
self.file_name.setText(_translate("SingleFileDownload", "<html><head/><body><p><span style=\" color:#2683ff;\">SOME_FILE.MP4</span></p></body></html>", None))
self.label_5.setText(_translate("SingleFileDownload", "<html><head/><body><p><span style=\" color:#555555;\">FILE:</span></p></body></html>", None))
self.label_8.setText(_translate("SingleFileDownload", "<html><head/><body><p><span style=\" color:#555555;\">TEMP PATH:</span></p></body></html>", None))
self.tmp_dir_bt.setText(_translate("SingleFileDownload", "...", None))
self.label_15.setText(_translate("SingleFileDownload", "<html><head/><body><p><span style=\" color:#555555;\">DOWNLOADED SHARDS:</span></p></body></html>", None))
self.downloaded_shards.setText(_translate("SingleFileDownload", "<html><head/><body><p><span style=\" color:#2683ff;\">256/2015594</span></p></body></html>", None))
self.label_19.setText(_translate("SingleFileDownload", "<html><head/><body><p><span style=\" color:#555555;\">CONNECTIONS:</span></p></body></html>", None))
self.label_9.setText(_translate("SingleFileDownload", "<html><head/><body><p><span style=\" color:#555555;\">FILE ID:</span></p></body></html>", None))
self.file_id.setText(_translate("SingleFileDownload", "<html><head/><body><p><span style=\" color:#2683ff;\">SOME_FILE_ID</span></p></body></html>", None))
self.current_state.setText(_translate("SingleFileDownload", "<html><head/><body><p><span style=\" color:#555555;\">WAITING TO START DOWNLOAD</span></p></body></html>", None))
self.label_16.setText(_translate("SingleFileDownload", "<html><head/><body><p>CURRENT ACTIVE CONNECTIONS:</p></body></html>", None))
self.current_active_connections.setText(_translate("SingleFileDownload", "<html><head/><body><p><span style=\" color:#2683ff;\">0</span></p></body></html>", None))
|
""" Definitions of sections to be used in definitions of input parameters
(input files for SPR-KKR tasks) """
from ...common.grammar_types import DefKeyword, SetOf, Flag, energy
from ..input_parameters_definitions import \
SectionDefinition as Section, \
ValueDefinition as V
def CONTROL(ADSI):
""" Create the definition of the CONTROL section of the task input file.
Parameters
----------
ADSI: string
the default value for the ADSI parameter of the resulting section
Return
------
CONTROL: SectionDefinition
"""
return Section('CONTROL',[
V('DATASET', str, 'case', required = True, help="Meaning of the parameter"),
V('ADSI', DefKeyword(ADSI), required = True, help="Type of the computation -- do DFT selfconsistent cycle"),
V('POTFIL', str, required=True, help="Potential file (see SPRKKR documentation for its format). It is not necessary to set it, it will be set by the calculator."),
V('KRWS', int, required=False)
])
TAU = Section('TAU',[
V('BZINT', DefKeyword('POINTS', 'ANOTHER_OPTION'), required=True),
V('NKTAB', 250)
])
"""The definition of the TAU section of the task input file """
ENERGY = Section('ENERGY',[
V('GRID', [5], required=True),
V('NE', [32], required=True),
V('ImE', energy, 0.0),
V('EMIN', -0.2),
])
"""The definition of the ENERGY section of the task input file """
SCF = Section('SCF', [
V('NITER', 200),
V('MIX', 0.2),
V('VXC', DefKeyword('VWN')),
V('EFGUESS', 0.7),
V('TOL', 0.00001),
V('ISTBRY', 1)
])
"""The definition of the SCF section of the task input file """
SITES = Section('SITES', [
V('NL', [3])
])
"""The definition of the SITES section of the task input file """
def TASK(TASK):
""" Create the definition of the CONTROL section of the task input file.
Parameters
----------
TASK: string
the default value for the TASK parameter of the resulting section
Return
------
TASK: SectionDefinition
"""
return Section('TASK', [
V('TASK', DefKeyword(TASK), name_in_grammar=False)
])
__all__ = [
'CONTROL', 'TAU', 'ENERGY', 'SCF', 'SITES', 'TASK'
]
|
"""
isinstance() 函数来判断一个对象是否是一个已知的类型,返回值是True or False
语法格式:isinstance(object, tuple)
参数:
object -- 实例对象。
tuple -- 基本类型或者由它们组成的元组,也可以是直接或间接类名。
"""
print(isinstance('hello', (int, str))) #注意不是 string
# 父类
class Parent(object):
pass
#子类
class Sub(Parent):
pass
#实例化
A = Parent()
B = Sub()
# isinstance() 会认为子类是一种父类类型,考虑继承关系。
print(isinstance(Sub, Parent))
print(isinstance(A, Parent))
print(isinstance(A, Sub))
print(isinstance(B, Parent))
print(isinstance(B, Sub))
|
# -*- coding: utf-8 -*-
"""
Author-related RESTful API module.
"""
from flask import request
from flask_restful import Resource
from flask_sqlalchemy import BaseQuery
from marshmallow import ValidationError
from .. import auth, db
from ..models import Author, author_schema, authors_schema
from ..utils import paginate
class AuthorList(Resource):
"""
Resource for a collection of authors.
"""
decorators = [auth.login_required]
@paginate(authors_schema)
def get(self) -> BaseQuery:
"""
Returns all the authors in the specified page.
:return: BaseQuery
"""
# For pagination, we need to return a query that hasn't run yet.
return Author.query
def post(self):
"""
Adds a new author.
:return:
"""
try:
new_author_data = author_schema.load(request.get_json())
except ValidationError as e:
return {
'message': e.messages
}, 400
found_author = Author.query(name=new_author_data['name']).first()
if found_author: # Found existing author
return {
'status': 'Found existing author',
'data': author_schema.dump(found_author)
}, 200
new_author = Author(**new_author_data)
db.session.add(new_author)
db.session.commit()
return {
'status': 'success',
'data': author_schema.dump(new_author)
}, 201
class AuthorItem(Resource):
"""
Resource for a single author.
"""
decorators = [auth.login_required]
def get(self, id: int):
"""
Returns the author with the given ID.
:param id: int
:return:
"""
author = Author.query.get_or_404(id, description='Author not found')
return {
'status': 'success',
'data': author_schema.dump(author)
}
def put(self, id: int):
"""
Updates the author with the given ID.
:param id: int
:return:
"""
author = Author.query.get_or_404(id, description='Author not found')
try:
author_data_updates = author_schema.load(
request.get_json(), partial=True
)
except ValidationError as e:
return {
'message': e.messages
}, 400
if 'name' in author_data_updates:
author.name = author_data_updates['name']
if 'email' in author_data_updates:
author.email = author_data_updates['email']
db.session.commit()
return {
'status': 'success',
'data': author_schema.dump(author)
}
def delete(self, id: int):
"""
Deletes the author with the given ID.
:param id: int
:return:
"""
author = Author.query.get_or_404(id, description='Author not found')
db.session.delete(author)
db.session.commit()
return '', 204
|
from datetime import datetime
import re
import_file_path = "/Users/hewro/Desktop/test.txt"
export_file_path = "/Users/hewro/Desktop/dayone_out.txt"
# 如果上一行是日期,紧邻的下一行也是日期,并且两个时间戳一致则该行日期不再录用
last_timestamp = 0.0
repeat_time = 0
day_cout = 0
def parse2time(title):
global last_timestamp, repeat_time, day_cout
y = datetime.strptime(title, '%Y年%m月%d日')
curre_timestam = y.timestamp()
if curre_timestam == last_timestamp:
repeat_time = repeat_time + 1
# print("repart time" + title)
return ""
else:
last_timestamp = curre_timestam
day_cout = day_cout + 1
str = datetime.strftime(y, ' Date: %Y年%m月%d日 GMT+8 23:59:59')
return str
def checkIsDate(content):
content = content.strip()
ret = re.match(r'(\d{4}年\d{1,2}月\d{1,2}日)', content)
if ret is not None:
content = parse2time(ret.group(1))
if content is "":
return "----------分割线-------\n"
else:
return "\n"+content + "\n\n\n"
else:
return content + "\n"
if __name__ == '__main__':
f_open = open(import_file_path)
f_write = open(export_file_path, 'a')
# 对每一行文字进行扫描
line = f_open.readline()
while line:
# 匹配时间的正则表达式,如果匹配成功,修改为day one的时间戳
content = checkIsDate(line)
# print(content)
f_write.write(content)
line = f_open.readline()
f_open.close()
f_write.close()
print("repeat_time" + str(repeat_time))
print("day_count" + str(day_cout))
|
from typing import List
import stim
from ._zx_graph_solver import zx_graph_to_external_stabilizers, text_diagram_to_zx_graph, ExternalStabilizer
def test_disconnected():
assert zx_graph_to_external_stabilizers(text_diagram_to_zx_graph("""
in---X X---out
""")) == [
ExternalStabilizer(input=stim.PauliString("Z"), output=stim.PauliString("_")),
ExternalStabilizer(input=stim.PauliString("_"), output=stim.PauliString("Z")),
]
assert zx_graph_to_external_stabilizers(text_diagram_to_zx_graph("""
in---Z---out
|
X
""")) == [
ExternalStabilizer(input=stim.PauliString("Z"), output=stim.PauliString("_")),
ExternalStabilizer(input=stim.PauliString("_"), output=stim.PauliString("Z")),
]
assert zx_graph_to_external_stabilizers(text_diagram_to_zx_graph("""
in---Z---X---out
| |
*---*
""")) == [
ExternalStabilizer(input=stim.PauliString("X"), output=stim.PauliString("_")),
ExternalStabilizer(input=stim.PauliString("_"), output=stim.PauliString("Z")),
]
def test_cnot():
assert zx_graph_to_external_stabilizers(text_diagram_to_zx_graph("""
in---X---out
|
in---Z---out
""")) == external_stabilizers_of_circuit(stim.Circuit("CNOT 1 0"))
assert zx_graph_to_external_stabilizers(text_diagram_to_zx_graph("""
in---Z---out
|
in---X---out
""")) == external_stabilizers_of_circuit(stim.Circuit("CNOT 0 1"))
def test_cz():
assert zx_graph_to_external_stabilizers(text_diagram_to_zx_graph("""
in---Z---out
|
H
|
in---Z---out
""")) == external_stabilizers_of_circuit(stim.Circuit("CZ 0 1"))
def test_s():
assert zx_graph_to_external_stabilizers(text_diagram_to_zx_graph("""
in---Z(pi/2)---out
""")) == external_stabilizers_of_circuit(stim.Circuit("S 0"))
def test_s_dag():
assert zx_graph_to_external_stabilizers(text_diagram_to_zx_graph("""
in---Z(-pi/2)---out
""")) == external_stabilizers_of_circuit(stim.Circuit("S_DAG 0"))
def test_sqrt_x():
assert zx_graph_to_external_stabilizers(text_diagram_to_zx_graph("""
in---X(pi/2)---out
""")) == external_stabilizers_of_circuit(stim.Circuit("SQRT_X 0"))
def test_sqrt_x_sqrt_x():
assert zx_graph_to_external_stabilizers(text_diagram_to_zx_graph("""
in---X(pi/2)---X(pi/2)---out
""")) == external_stabilizers_of_circuit(stim.Circuit("X 0"))
def test_sqrt_z_sqrt_z():
assert zx_graph_to_external_stabilizers(text_diagram_to_zx_graph("""
in---Z(pi/2)---Z(pi/2)---out
""")) == external_stabilizers_of_circuit(stim.Circuit("Z 0"))
def test_sqrt_x_dag():
assert zx_graph_to_external_stabilizers(text_diagram_to_zx_graph("""
in---X(-pi/2)---out
""")) == external_stabilizers_of_circuit(stim.Circuit("SQRT_X_DAG 0"))
def test_x():
assert zx_graph_to_external_stabilizers(text_diagram_to_zx_graph("""
in---X(pi)---out
""")) == external_stabilizers_of_circuit(stim.Circuit("X 0"))
def test_z():
assert zx_graph_to_external_stabilizers(text_diagram_to_zx_graph("""
in---Z(pi)---out
""")) == external_stabilizers_of_circuit(stim.Circuit("Z 0"))
def test_id():
assert zx_graph_to_external_stabilizers(text_diagram_to_zx_graph("""
in---X---Z---out
""")) == external_stabilizers_of_circuit(stim.Circuit("I 0"))
def test_s_state_distill():
assert zx_graph_to_external_stabilizers(text_diagram_to_zx_graph(r"""
* *---------------Z--------------------Z-------Z(pi/2)
/ \ | | |
*-----* *------------Z---+---------------+---Z----------------+-------Z(pi/2)
| | | | | |
X---X---Z(pi/2) X---X---Z(pi/2) X---X---Z(pi/2) X---X---Z(pi/2)
| | | | | |
*---+------------------Z-------------------+--------------------+---Z---Z(pi/2)
| | |
in-------Z--------------------------------------Z-------------------Z(pi)--------out
""")) == external_stabilizers_of_circuit(stim.Circuit("S 0"))
def external_stabilizers_of_circuit(circuit: stim.Circuit) -> List[ExternalStabilizer]:
n = circuit.num_qubits
s = stim.TableauSimulator()
s.do(circuit)
t = s.current_inverse_tableau()**-1
stabilizers = []
for k in range(n):
p = [0] * n
p[k] = 1
stabilizers.append(stim.PauliString(p) + t.x_output(k))
p[k] = 3
stabilizers.append(stim.PauliString(p) + t.z_output(k))
return [ExternalStabilizer.from_dual(e, circuit.num_qubits) for e in stabilizers]
|
from sklearn.compose import ColumnTransformer
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import IsolationForest
from sklearn.pipeline import make_pipeline
from sklearn.pipeline import FeatureUnion
from sklearn.preprocessing import OneHotEncoder
from sklearn.svm import LinearSVC
from sklearn2pmml import sklearn2pmml
from sklearn2pmml.ensemble import SelectFirstClassifier
from sklearn2pmml.pipeline import PMMLPipeline
from sklego.meta import EstimatorTransformer
from sklego.preprocessing import IdentityTransformer
import pandas
df = pandas.read_csv("audit.csv")
cat_columns = ["Education", "Employment", "Gender", "Marital", "Occupation"]
cont_columns = ["Age", "Hours", "Income"]
audit_X = df[cat_columns + cont_columns]
audit_y = df["Adjusted"]
#
# Data pre-processing
#
transformer = ColumnTransformer([
("cont", "passthrough", cont_columns),
# Use dense encoding for improved Scikit-Lego compatibility
("cat", OneHotEncoder(sparse = False), cat_columns)
])
#
# Data enrichment with the anomaly score
#
outlier_detector = IsolationForest(random_state = 13)
enricher = FeatureUnion([
("identity", IdentityTransformer()),
#("outlier_detector", make_pipeline(EstimatorTransformer(outlier_detector, predict_func = "predict"), OneHotEncoder()))
("outlier_detector", EstimatorTransformer(outlier_detector, predict_func = "decision_function"))
])
#
# Anomaly score-aware classification
#
def make_column_dropper(drop_cols):
return ColumnTransformer([
("drop", "drop", drop_cols)
], remainder = "passthrough")
classifier = SelectFirstClassifier([
("outlier", LinearSVC(), "X[-1] <= 0"),
("inlier", make_pipeline(make_column_dropper([-1]), LogisticRegression()), str(True))
])
pipeline = PMMLPipeline([
("transformer", transformer),
("enricher", enricher),
("classifier", classifier)
])
pipeline.fit(audit_X, audit_y)
sklearn2pmml(pipeline, "SelectFirstAudit.pmml")
|
import logging
from telegram import Update
from telegram.ext import Updater
from telegram.ext import CallbackContext
from telegram.ext import CommandHandler
from telegram.ext import MessageHandler
from telegram.ext import Filters
from Caller import Caller
class TelegramInterface:
def __init__(self, token, admin_id):
self.token = token
self.admin_id = admin_id
self.updater = Updater(token=self.token, use_context=True)
self.dispatcher = self.updater.dispatcher
self.start_handler = CommandHandler('start', self.start)
self.spam_handler = MessageHandler(Filters.regex('^[0-9]{9}$'), self.spam)
self.incorrect_handler = MessageHandler(Filters.regex('.*'), self.incorrect)
self.dispatcher.add_handler(self.start_handler)
self.dispatcher.add_handler(self.spam_handler)
self.dispatcher.add_handler(self.incorrect_handler)
self.updater.start_polling()
self.caller = Caller()
def check_permissions(self, update):
if update.message.from_user.id == self.admin_id:
return True
else:
logging.error("non admin user trying to log in:" + update.message.text)
return False
def start(self, update: Update, context: CallbackContext):
if self.check_permissions(update):
context.bot.send_message(chat_id=update.effective_chat.id,
text="I'm a bot, please talk to me!")
def incorrect(self, update: Update, context: CallbackContext):
if self.check_permissions(update):
context.bot.send_message(chat_id=update.effective_chat.id, text='Wrong number')
def spam(self, update: Update, context: CallbackContext):
if self.check_permissions(update):
message = "Calling to "+ update.message.text + " Using Génesis"
logging.info(message)
context.bot.send_message(chat_id=update.effective_chat.id, text=message)
self.caller.call_genesis(update.message.text)
|
import DecisionTree
def main():
#Insert input file
"""
IMPORTANT: Change this file path to change training data
"""
file = open('SoybeanTraining.csv')
"""
IMPORTANT: Change this variable too change target attribute
"""
target = "class"
data = [[]]
for line in file:
line = line.strip("\r\n")
data.append(line.split(','))
data.remove([])
attributes = data[0]
data.remove(attributes)
#Run ID3
tree = DecisionTree.makeTree(data, attributes, target, 0)
print "generated decision tree"
#Generate program
file = open('program.py', 'w')
file.write("import Node\n\n")
#open input file
file.write("data = [[]]\n")
"""
IMPORTANT: Change this file path to change testing data
"""
file.write("f = open('Soybean.csv')\n")
#gather data
file.write("for line in f:\n\tline = line.strip(\"\\r\\n\")\n\tdata.append(line.split(','))\n")
file.write("data.remove([])\n")
#input dictionary tree
file.write("tree = %s\n" % str(tree))
file.write("attributes = %s\n" % str(attributes))
file.write("count = 0\n")
file.write("for entry in data:\n")
file.write("\tcount += 1\n")
#copy dictionary
file.write("\ttempDict = tree.copy()\n")
file.write("\tresult = \"\"\n")
#generate actual tree
file.write("\twhile(isinstance(tempDict, dict)):\n")
file.write("\t\troot = Node.Node(tempDict.keys()[0], tempDict[tempDict.keys()[0]])\n")
file.write("\t\ttempDict = tempDict[tempDict.keys()[0]]\n")
#this must be attribute
file.write("\t\tindex = attributes.index(root.value)\n")
file.write("\t\tvalue = entry[index]\n")
#ensure that key exists
file.write("\t\tif(value in tempDict.keys()):\n")
file.write("\t\t\tchild = Node.Node(value, tempDict[value])\n")
file.write("\t\t\tresult = tempDict[value]\n")
file.write("\t\t\ttempDict = tempDict[value]\n")
#otherwise, break
file.write("\t\telse:\n")
file.write("\t\t\tprint \"can't process input %s\" % count\n")
file.write("\t\t\tresult = \"?\"\n")
file.write("\t\t\tbreak\n")
#print solutions
file.write("\tprint (\"entry%s = %s\" % (count, result))\n")
print "written program"
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, seethersan and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from ovenube_peru.ple_peru.utils import Utils, to_file
class LibroElectronicoDiarioSimplificado(Utils):
def get_account(self, company, year, periodo, primer=None):
account_list = []
from_date, to_date = self.get_dates(year, periodo)
if primer == "1":
account = frappe.db.sql("""select
DATE_FORMAT(NOW(),'%Y%m%d') as periodo,
SUBSTRING(name,1,POSITION('-' in name)-2) as codigo_asiento,
SUBSTRING(name,POSITION('-' in name)+2) as descripcion_asiento,
'01' as codigo_plan,
'PLAN CONTABLE GENERAL EMPRESARIAL' as descripcion_plan,
"" as codigo_cuenta,
"" as descripcion_cuenta,
'1' as indicador_cuenta
from
`tabAccount`
where SUBSTRING(name,1,POSITION('-' in name)-1) > 100
and company = '"""+company+"'", as_dict=True)
for d in account:
account_list.append({
'periodo': d.periodo,
'codigo_asiento': d.codigo_asiento,
'descripcion_asiento': d.descripcion_asiento,
'codigo_plan': d.codigo_plan,
'descripcion_plan': d.descripcion_plan,
'codigo_cuenta': d.codigo_cuenta,
'descripcion_cuenta': d.descripcion_cuenta,
'indicador_cuenta': d.indicador_cuenta
})
else:
account = frappe.db.sql("""select
CONCAT(DATE_FORMAT(gl.posting_date,'%Y%m'),'00') as periodo,
REPLACE(voucher_no, '-', '') as cuo,
CONCAT('M', IF(voucher_type = 'Sales Invoice',
(SELECT
COUNT(name)
FROM
`tabGL Entry` as gl_1
WHERE gl_1.voucher_no = gl.voucher_no
AND SUBSTRING(gl_1.account, 1, 2) <= SUBSTRING(gl.account, 1, 2)),
(SELECT
COUNT(name)
FROM
`tabGL Entry` as gl_1
WHERE gl_1.voucher_no = gl.voucher_no
AND SUBSTRING(gl_1.account, 1, 2) >= SUBSTRING(gl.account, 1, 2)))) as correlativo_asiento,
SUBSTRING(gl.account,1,POSITION('-' in gl.account)-2) as codigo_asiento,
"" as cuo_ue,
"" as centro_costo,
IF(gl.account_currency = 'SOL', 'PEN', gl.account_currency) as tipo_moneda,
IF(voucher_type = 'Purchase Invoice',
(select
`codigo_tipo_documento`
from
`tabPurchase Invoice`where name=voucher_no),
(select
`codigo_tipo_documento`
from
`tabSales Invoice`
where name=voucher_no)) as codigo_documento,
IF(voucher_type = 'Purchase Invoice',
(select
`tax_id`
from
`tabPurchase Invoice`where name=voucher_no),
(select
`tax_id`
from
`tabSales Invoice`
where name=voucher_no)) as tax_id,
IF(voucher_type = 'Purchase Invoice',
(select
IF(LENGTH(codigo_comprobante) = 1, CONCAT('0', codigo_comprobante), codigo_comprobante)
from
`tabPurchase Invoice`where name=voucher_no),
(select
IF(LENGTH(codigo_comprobante) = 1, CONCAT('0', codigo_comprobante), codigo_comprobante)
from
`tabSales Invoice`
where name=voucher_no)) as codigo_comprobante,
IF(voucher_type = 'Purchase Invoice',IFNULL(
(select
bill_series
from
`tabPurchase Invoice`
where name=voucher_no),''),
SUBSTRING_INDEX(SUBSTRING_INDEX(voucher_no,'-',-2),'-',1)) as serie_comprobante,
IF(voucher_type = 'Purchase Invoice',
(select
bill_no
from
`tabPurchase Invoice`
where name=voucher_no), SUBSTRING_INDEX(SUBSTRING_INDEX(voucher_no,'-',-2),'-',-1)) as numero_comprobante,
DATE_FORMAT(gl.posting_date,'%d/%m/%Y') as fecha_contable,
DATE_FORMAT(gl.posting_date,'%d/%m/%Y') as fecha_vencimiento,
DATE_FORMAT(gl.posting_date,'%d/%m/%Y') as fecha_emision,
gl.remarks as glosa,
'' as glosa_referencial,
IF(gl.debit_in_account_currency = 0, '0.00', ROUND(gl.debit_in_account_currency, 2)) as debe,
IF(gl.credit_in_account_currency = 0, '0.00', ROUND(gl.credit_in_account_currency, 2)) as haber,
IF(voucher_type = 'Purchase Invoice',
CONCAT('080100&',
(select
CONCAT(DATE_FORMAT(IFNULL(bill_expiration_date,bill_date),'%Y%m'),'00&', REPLACE(voucher_no, '-', ''), '&','M2')
from
`tabPurchase Invoice` purchase_invoice
where purchase_invoice.name=voucher_no)),
(IF(voucher_type = 'Sales Invoice', CONCAT('140100&',
(select
CONCAT(DATE_FORMAT(due_date,'%Y%m'),'00&', REPLACE(voucher_no, '-', ''),'&', 'M1')
from
`tabSales Invoice` sales_invoice
where sales_invoice.name=voucher_no)),''))) as estructurado,
'1' as estado
from
`tabGL Entry` gl
where SUBSTRING(account,1,POSITION('-' in account)-1) > 100
and posting_date >= '""" + str(from_date) + """'
and posting_date <= '""" + str(to_date) + """'
and company = '"""+company+"""'
order by posting_date""", as_dict=True)
for d in account:
account_list.append({
'periodo': d.periodo,
'cuo': d.cuo,
'correlativo_asiento': d.correlativo_asiento,
'codigo_asiento': d.codigo_asiento,
'cuo_ue': d.cuo_ue,
'centro_costo': d.centro_costo,
'tipo_moneda': d.tipo_moneda,
'tipo_documento': d.codigo_documento,
'tax_id': d.tax_id,
'codigo_comprobante': d.codigo_comprobante,
'serie_comprobante': d.serie_comprobante,
'numero_comprobante': d.numero_comprobante,
'fecha_contable': d.fecha_contable,
'fecha_vencimiento': d.fecha_vencimiento,
'fecha_emision': d.fecha_emision,
'glosa': d.glosa,
'glosa_referencial': d.glosa_referencial,
'debe': d.debe,
'haber': d.haber,
'estructurado': d.estructurado,
'estado': d.estado
})
return account_list
def export_libro_diario_simplificado(self, company, year, periodo, ruc, primer):
tipo = "diario_simplificado"
data = self.get_account(company, year, periodo, primer)
codigo_periodo = self.ple_name(year, periodo)
if primer == "1":
nombre = "LE" + str(ruc) + codigo_periodo + '00050400' + '00' + '1' + '1' + '1' + '1'
else:
nombre = "LE" + str(ruc) + codigo_periodo + '00050200' + '00' + '1' + '1' + '1' + '1'
nombre = nombre + ".txt"
return to_file(data, tipo, nombre, primer)
|
value = float(input())
print('NOTAS:')
for cell in [100, 50, 20, 10, 5, 2]:
print('{} nota(s) de R$ {},00'.format(int(value/cell), cell))
value = value % cell
print('MOEDAS:')
for coin in [1, 0.5, 0.25, 0.10, 0.05, 0.01]:
print('{} moeda(s) de R$ {:.2f}'.format(int(value/coin), coin).replace('.',','))
value = round(value % coin, 2)
|
from conans import ConanFile, tools
import os
class GreatestConan(ConanFile):
name = "greatest"
version = "1.4.1"
url = "https://github.com/bincrafters/conan-greatest"
homepage = "https://github.com/silentbicycle/greatest"
description = "A C testing library in 1 file. No dependencies, no dynamic allocation. "
license = "ISC"
_source_subfolder = "source_subfolder"
no_copy_source = True
def source(self):
tools.get("{0}/archive/v{1}.tar.gz".format(self.homepage, self.version))
extracted_dir = self.name + "-" + self.version
os.rename(extracted_dir, self._source_subfolder)
def package(self):
self.copy(pattern="LICENSE", dst="licenses", src=self._source_subfolder)
self.copy(pattern="greatest.h", dst="include", src=self._source_subfolder)
def package_id(self):
self.info.header_only()
|
from pymongo import MongoClient
import pandas as pd
import json
class MongoUtil:
def __init__(self, host='localhost', port=27017):
#The only fields that are searchabel
self.searchableFields = ['website','username']
try:
self.client = MongoClient(host, port)
self.db = self.client["PasswordDatabase"]
self.coll = self.db["PasswordCollection"]
# print('Connected to mongodb')
except Exception as e:
print('Failed to connect to mongodb')
# Creates new database for new users
# TODO: Encrype database using users unique key.
def newDatabase(self, user_id):
try:
new_database = self.client[user_id]
password_coll = new_database['PasswordCollection']
# You have to insert a record in order to create database/collections.
password_coll.insert_one({'website:': 'DELETE', 'username': 'DELETE', 'password': 'DELETE'})
# Delete unused record
password_coll.delete_one({'website:': 'DELETE', 'username': 'DELETE', 'password': 'DELETE'})
return True
except Exception as e:
return False, {'Error': e}
# Returns all passwords
def getCollection(self, user_id):
try:
return self.client[user_id]['PasswordCollection'].find({}, {'_id':0, 'website':1, 'username':1, 'password':1})
except Exception as e:
return {'Error' : e}
#Record is tuple with 2 fields, record[0] is website, record[1] is username/email, and record[2] is salted password
def addRecordRemote(self, user_id, record):
try:
assert len(record) == 3
#Check if user_id database exists
#assert user_id in self.client.list_database_names()
self.client[user_id]['PasswordCollection'].insert_one(record)
return True
except Exception as e:
return False, e
#Record is tuple with 2 fields, record[0] is website, record[1] is username/email, and record[2] is salted password
def addRecord(self, record):
try:
assert len(record) == 3
self.coll.insert_one(record)
return True
except Exception as e:
return False
#Import csv lastpass into database
def importLastPass(self, fileLocation):
try:
df = pd.read_csv(fileLocation)
df = df[['name', 'username', 'password']]
df.columns = ['website', 'username', 'password']
records = json.loads(df.T.to_json()).values()
self.coll.insert_many(records)
except Exception as e:
print('Import Failed.')
#Searchs if a website already has a password, searchField is [username/email, website]
def searchRecord(self, user_id, searchField, searchText):
assert searchField in self.searchableFields
return self.client[user_id]['PasswordCollection'].find_one(({searchField: searchText}), {'_id':0, 'website':1, 'username':1, 'password':1})
#Searchs if a website already has a password, searchField is [username/email, website]
def searchRecord(self, searchField, searchText):
assert searchField in self.searchableFields
return self.coll.find_one(({searchField: searchText}), {'_id':0, 'website':1, 'username':1, 'password':1})
def printCollection(self):
for row in self.coll:
print(row)
|
from __future__ import unicode_literals
import frappe
from frappe.model.utils.rename_field import *
def execute():
for doctype in ("Purchase Receipt Item", "Delivery Note Item"):
frappe.reload_doctype(doctype)
table_columns = frappe.db.get_table_columns(doctype)
if "qa_no" in table_columns:
rename_field(doctype, "qa_no", "quality_inspection")
frappe.reload_doctype("Item")
rename_field("Item", "inspection_required", "inspection_required_before_purchase")
frappe.reload_doc('stock', 'doctype', 'quality_inspection')
frappe.db.sql("""
update
`tabQuality Inspection`
set
reference_type = 'Purchase Receipt', reference_name = purchase_receipt_no
where
ifnull(purchase_receipt_no, '') != '' and inspection_type = 'Incoming'
""")
frappe.db.sql("""
update
`tabQuality Inspection`
set
reference_type = 'Delivery Note', reference_name = delivery_note_no
where
ifnull(delivery_note_no, '') != '' and inspection_type = 'Outgoing'
""")
for old_fieldname in ["purchase_receipt_no", "delivery_note_no"]:
update_reports("Quality Inspection", old_fieldname, "reference_name")
update_users_report_view_settings("Quality Inspection", old_fieldname, "reference_name")
update_property_setters("Quality Inspection", old_fieldname, "reference_name")
|
import tensorflow as tf
input_image_size = 28
output_image_size = 24
input_image_channels = 1
num_labels = 10
valid_records = 5000
test_records = 10000
train_records = 55000
batch_size = 100
def read_path_file(image_file):
f = open(image_file, 'r')
paths = []
labels = []
for line in f:
label, path = line[:-1].split(',')
paths.append(path)
labels.append(int(label))
return paths, labels
def distorted_inputs(image_file, num_threads):
image_list, label_list = read_path_file(image_file)
images = tf.convert_to_tensor(image_list, dtype=tf.string)
labels = tf.convert_to_tensor(label_list, dtype=tf.int32)
filename_queue = tf.train.slice_input_producer([images, labels], shuffle=False)
result = read_image(filename_queue)
distorted_image = tf.image.resize_image_with_crop_or_pad(result.image, output_image_size,
output_image_size)
distorted_image = tf.image.random_brightness(distorted_image, max_delta=63)
distorted_image = tf.image.random_contrast(distorted_image, lower=0.2, upper=1.8)
white_image = tf.image.per_image_whitening(distorted_image)
return generate_batches(white_image, result.label, num_threads)
def inputs(image_file, num_threads):
image_list, label_list = read_path_file(image_file)
images = tf.convert_to_tensor(image_list, dtype=tf.string)
labels = tf.convert_to_tensor(label_list, dtype=tf.int32)
filename_queue = tf.train.slice_input_producer([images, labels], shuffle=False)
result = read_image(filename_queue)
distorted_image = tf.image.resize_image_with_crop_or_pad(result.image, output_image_size,
output_image_size)
white_image = tf.image.per_image_whitening(distorted_image)
return generate_batches(white_image, result.label, num_threads)
def read_image(filename_queue):
class image_object(object):
pass
result = image_object()
file_contents = tf.read_file(filename_queue[0])
image = tf.image.decode_png(file_contents, channels=input_image_channels)
image = tf.cast(image, tf.float32)
result.image = tf.reshape(image, [input_image_size, input_image_size, input_image_channels])
label = tf.cast(filename_queue[1], tf.int32)
result.label = tf.sparse_to_dense(label, [num_labels], 1.0, 0.0)
return result
def generate_batches(image, label, num_threads):
images, labels = tf.train.batch(
[image, label], batch_size=batch_size,
num_threads=num_threads,
)
return images, labels
|
'''
Script to graph various statistics
'''
import os
from os import path
import sys
import itertools
from statistics import geometric_mean
import argparse
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
apps = ["bt", "cg", "ep", "ft", "lu", "mg", "sp", "ua", "dc"]
'''
Graph data as a bar chart
arr is the array of data
length is how many bars to plot: either 5 or 6 (with or without geo mean)
title is the title of the graph
outfile is the file to write the final image to
'''
def graph_runtime(arr, length, title, outfile):
ind = np.arange(length)
width = 0.3
penelope='#a22a2b'
slurm='#357e99'
fig, ax = plt.subplots()
rects2 = ax.bar(ind, arr[:,1], width, color=slurm, label='Slurm')
rects3 = ax.bar(ind + width, arr[:,2], width, color=penelope, label='Penelope')
# add some text for labels, title and axes ticks
ax.set_ylabel('Performance')
ax.set_xticks(ind + width/2)
ax.set_title(title)
ax.set_xlabel('Powercap per socket per Node')
if length == 5:
ax.set_xticklabels(('60W', '70W', '80W', '90W', '100W'))
elif length == 6:
ax.set_xticklabels(('60W', '70W', '80W', '90W', '100W', 'Geo. Mean'))
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
def autolabel(rects):
"""
Attach a text label above each bar displaying its height
"""
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., 1.01*height,
'{:.2f}'.format(height),
ha='center', va='bottom', fontsize=6)
autolabel(rects2)
autolabel(rects3)
plt.tight_layout()
plt.savefig(outfile, facecolor=fig.get_facecolor(),
edgecolor='#d5d4c2')
plt.close()
'''
Normalize data and plot barchart
filename is the input file
outfilename is the filename to write the final plot to
'''
def process_runtime(filename, outfilename):
final_data = np.genfromtxt(filename, delimiter=',')
normalized = np.reciprocal(final_data) / np.reciprocal(final_data[:,0][:,np.newaxis])
graph_runtime(normalized, 5, 'Total', outfilename)
def batch_runtime(dirname):
home = os.path.expanduser("~")
basedir = home + "/" + dirname
baseoutdir = basedir + "graphs/"
os.makedirs(baseoutdir, exist_ok=True)
for pair in itertools.combinations(apps, 2):
app1, app2 = pair
app_dir = app1 + "_" + app2 + "/"
filename = basedir + "final_data/" + app_dir + "final_data.csv"
baseoutdir_app = baseoutdir + app_dir
os.makedirs(baseoutdir_app, exist_ok=True)
outfilename = baseoutdir_app + app1 + "_" + app2 + "_runtime.png"
process_runtime(filename, outfilename)
def geometric_mean_graph(dirname):
home = os.path.expanduser("~")
basedir = home + "/" + dirname
baseoutdir = basedir + "graphs/"
os.makedirs(baseoutdir, exist_ok=True)
slurm_perfs = {x:[] for x in range(5)}
penelope_perfs = {x:[] for x in range(5)}
for pair in itertools.combinations(apps, 2):
app1, app2 = pair
app_dir = app1 + "_" + app2 + "/"
filename = basedir + "final_data/" + app_dir + "final_data.csv"
baseoutdir_app = baseoutdir + app_dir
os.makedirs(baseoutdir_app, exist_ok=True)
outfilename = baseoutdir_app + app1 + "_" + app2 + "_runtime.png"
final_data = np.genfromtxt(filename, delimiter=',')
normalized = np.reciprocal(final_data) / np.reciprocal(final_data[:,0][:,np.newaxis])
for i in range(5):
slurm_perfs[i].append(normalized[i][1])
penelope_perfs[i].append(normalized[i][2])
geo_means = []
for i in range(5):
geo_slurm = geometric_mean(slurm_perfs[i])
geo_penelope = geometric_mean(penelope_perfs[i])
geo_means.append([1, geo_slurm, geo_penelope])
data_to_graph = np.array(geo_means)
slurm_mean = np.mean(data_to_graph[:,1])
penelope_mean = np.mean(data_to_graph[:,2])
means = np.array([1,slurm_mean, penelope_mean])
data_to_graph = np.vstack([data_to_graph, means])
outfilename = baseoutdir + "geo_mean_runtime.png"
graph_runtime(data_to_graph, 6, 'Performance Under Faulty Conditions' +
' Normalized to Fair', outfilename)
# pass in dircnt/corecnt for graph file naming
def graph_stat(dirname, dir_, core, corecnt, index, powercap, statname):
penelope_color = '#a22a2b'
slurm_color = '#357e99'
colors = [slurm_color, penelope_color]
for f in core:
filename = dirname + dir_ + f
if not path.exists(filename):
continue
data = np.genfromtxt(filename, delimiter=',')
if data.size != 0:
if "penelope" in f:
plt.plot(data[:,0], data[:,index], color=colors[1], label = "Penelope")
else:
plt.plot(data[:,0], data[:,index], color=colors[0], label = "Slurm")
plt.title(statname.capitalize() + " versus Time")
plt.legend()
outfile = dirname + dir_ + statname + "_core" + str(corecnt) + "_" + powercap +".png"
plt.savefig(outfile)
print(outfile)
plt.close()
# pass in array of systems, and powercap interested in
def generate_files(systems, powercap):
files = []
for i in range(1,3):
core_files = []
for system in systems:
add_str = system + "_core" + str(i) + "_" + powercap + ".csv"
core_files.append(add_str)
files.append(core_files)
return files
# needs to read files, graph proper data, and write to outfile
# pass in systems list, directory, column of csv, and stat name
def process_stat(systems, dirname, index, statname):
powercaps = ["60", "70", "80", "90", "100"]
sub_clusters = ["frontend/", "backend/"]
for sc in sub_clusters:
data_path = dirname + sc
for data_folder in os.listdir(data_path):
for pair in itertools.combinations(apps, 2):
app1, app2 = pair
app_dir = app1 + "_" + app2 + "/"
sub_folder = data_folder + "/" + app_dir
for powercap in powercaps:
files = generate_files(systems, powercap)
corecnt = 1
for core in files:
graph_stat(data_path, sub_folder, core, corecnt, index, powercap, statname)
corecnt += 1
def main():
# arg parsing
parser = argparse.ArgumentParser()
parser.add_argument("-P", "--presentation", help="Set background to that for presentation", action="store_true")
parser.add_argument("-p", "--penelope", help="Use penelope files", action="store_true")
parser.add_argument("-s", "--slurm", help="Use slurm files", action="store_true")
parser.add_argument("stat", help="What graph to build: options are IPC," +
"power, powercap, runtime, geo, or all. IPC is likely not supported," +
"and will not be run with all")
parser.add_argument("path", help="path to data")
args = parser.parse_args()
# ensure that dirname ends with / so we can append to it
dirname = args.dir
if dirname[-1] != '/':
dirname = dirname + '/'
# define system constants so user(me) doesn't need to write the whole path
home = os.path.expanduser("~")
basedir = home + "/" + dirname
baseoutdir = basedir + "graphs/"
systems = []
if args.penelope:
systems.append("penelope")
if args.slurm and args.stat != "ipc":
systems.append("slurm")
# need to be negative since we need to count backwards
# because Tapan is bad at planning the ordering of columns
if args.stat == "all":
geometric_mean_graph(dirname)
batch_runtime(dirname)
stat_map = {"powercap":-1, "power":-2}
for stat in stats:
statoutdir = baseoutdir + stat + "/"
os.makedirs(baseoutdir, exist_ok=True)
os.makedirs(statoutdir, exist_ok=True)
process_stat(systems, basedir, stat_map[stat], stat)
else:
index = 0
if args.stat == "ipc":
index = -3
elif args.stat == "power":
index = -2
elif args.stat == "powercap":
index = -1
elif args.stat == "runtime":
batch_runtime(dirname)
return
elif args.stat == "geo":
geometric_mean_graph(dirname)
return
else:
exit("invalid stat name: options are ipc, power, powercap, runtime")
baseoutdir = basedir + "graphs/"
statoutdir = baseoutdir + args.stat + "/"
os.makedirs(baseoutdir, exist_ok=True)
os.makedirs(statoutdir, exist_ok=True)
process_stat(systems, basedir, index, args.stat)
if __name__ == '__main__':
main()
|
from selenium import webdriver
import unittest
class TablesCheck(unittest.TestCase):
def setUp(self):
# Define driver
self.driver = webdriver.Chrome()
# Navigate to the url
url = 'http://www.w3schools.com/html/html_tables.asp'
self.driver.get(url)
def test_get_number_of_tables(self):
"""
Checks whether the total number of rows equals the expected number.
"""
driver = self.driver
all_rows = driver.find_elements_by_tag_name('tr')
number_of_rows = len(all_rows)
expected_num_of_rows = 26
if expected_num_of_rows != number_of_rows:
raise AssertionError('The number of rows did not match. The actual number is: \
{} and the expected number is {}'
.format(str(number_of_rows), str(expected_num_of_rows)))
else: print('Verified: The number of rows is: {}, and the expected number is {}.'
.format(str(number_of_rows), str(expected_num_of_rows)))
def test_row_contains_text(self):
"""
Checks whether a row contains a specific text.
"""
text_to_check = 'Eve'
row_number = 1
all_rows = self.driver.find_elements_by_tag_name('tr')
my_row = all_rows[row_number]
row_text = my_row.text
if text_to_check not in row_text:
raise AssertionError('The text "{}" is not in row {} ["{}"]'.format(text_to_check, row_number, row_text))
else:
print('The text "{}" is in row {} ["{}"]'.format(text_to_check, row_number, row_text))
def tearDown(self):
self.driver.quit()
if __name__ == '__main__':
unittest.main()
|
# Send model regression test results to Segment with a summary
# of all test results.
import analytics
import datetime
import json
import os
from datadog_api_client.v1 import ApiClient, Configuration
from datadog_api_client.v1.api.metrics_api import MetricsApi
from datadog_api_client.v1.model.metrics_payload import MetricsPayload
from datadog_api_client.v1.model.point import Point
from datadog_api_client.v1.model.series import Series
DD_ENV = "rasa-regression-tests"
DD_SERVICE = "rasa"
METRIC_PREFIX = "rasa.perf.benchmark."
IS_EXTERNAL = os.environ["IS_EXTERNAL"]
DATASET_REPOSITORY_BRANCH = os.environ["DATASET_REPOSITORY_BRANCH"]
EXTERNAL_DATASET_REPOSITORY_BRANCH = None
CONFIG_REPOSITORY = "training-data"
CONFIG_REPOSITORY_BRANCH = os.environ["DATASET_REPOSITORY_BRANCH"]
analytics.write_key = os.environ["SEGMENT_TOKEN"]
task_mapping = {
"intent_report.json": "Intent Classification",
"CRFEntityExtractor_report.json": "Entity Prediction",
"DIETClassifier_report.json": "Entity Prediction",
"response_selection_report.json": "Response Selection",
"story_report.json": "Story Prediction",
}
def transform_to_seconds(duration: str) -> float:
"""Transform string (with hours, minutes, and seconds) to seconds.
Args:
duration: Examples: '1m27s', '1m27.3s', '27s', '1h27s', '1h1m27s'
Raises:
Exception: If the input is not supported.
Returns:
Duration converted in seconds.
"""
h_split = duration.split("h")
if len(h_split) == 1:
rest = h_split[0]
hours = 0
else:
hours = int(h_split[0])
rest = h_split[1]
m_split = rest.split("m")
if len(m_split) == 2:
minutes = int(m_split[0])
seconds = float(m_split[1].rstrip("s"))
elif len(m_split) == 1:
minutes = 0
seconds = float(m_split[0].rstrip("s"))
else:
raise Exception(f"Unsupported duration: {duration}")
overall_seconds = hours * 60 * 60 + minutes * 60 + seconds
return overall_seconds
def send_to_datadog(context):
# Initialize
tags = {
"dataset": os.environ["DATASET_NAME"],
"dataset_repository_branch": DATASET_REPOSITORY_BRANCH,
"external_dataset_repository": IS_EXTERNAL,
"config_repository": CONFIG_REPOSITORY,
"config_repository_branch": CONFIG_REPOSITORY_BRANCH,
"dataset_repository_branch": os.environ["DATASET_REPOSITORY_BRANCH"],
"dataset_commit": os.environ["DATASET_COMMIT"],
"workflow": os.environ["GITHUB_WORKFLOW"],
"config": os.environ["CONFIG"],
"pr_url": os.environ["PR_URL"],
"accelerator_type": os.environ["ACCELERATOR_TYPE"],
"github_run_id": os.environ["GITHUB_RUN_ID"],
"github_sha": os.environ["GITHUB_SHA"],
"github_event": os.environ["GITHUB_EVENT_NAME"],
"type": os.environ["TYPE"],
"branch": os.environ["BRANCH"],
"env": DD_ENV,
"service": DD_SERVICE,
}
tags_list = [f"{k}:{v}" for k, v in tags.items()]
# Send metrics
metrics = {
"test_run_time": os.environ["TEST_RUN_TIME"],
"train_run_time": os.environ["TRAIN_RUN_TIME"],
"total_run_time": os.environ["TOTAL_RUN_TIME"],
}
timestamp = datetime.datetime.now().timestamp()
series = []
for metric_name, metric_value in metrics.items():
overall_seconds = transform_to_seconds(metric_value)
series.append(
Series(
metric=f"{METRIC_PREFIX}{metric_name}.gauge",
type="gauge",
points=[Point([timestamp, overall_seconds])],
tags=tags_list,
)
)
body = MetricsPayload(series=series)
with ApiClient(Configuration()) as api_client:
api_instance = MetricsApi(api_client)
response = api_instance.submit_metrics(body=body)
if response.get('status') != 'ok':
print(response)
def send_to_segment(context):
global IS_EXTERNAL
global DATASET_REPOSITORY_BRANCH
jobID = os.environ["GITHUB_RUN_ID"]
analytics.identify(
jobID, {"name": "model-regression-tests", "created_at": datetime.datetime.now()}
)
if str(IS_EXTERNAL).lower() in ("yes", "true", "t", "1"):
IS_EXTERNAL = True
DATASET_REPOSITORY_BRANCH = os.environ["EXTERNAL_DATASET_REPOSITORY_BRANCH"]
else:
IS_EXTERNAL = False
analytics.track(
jobID,
"results",
{
"dataset": os.environ["DATASET_NAME"],
"dataset_repository_branch": DATASET_REPOSITORY_BRANCH,
"external_dataset_repository": IS_EXTERNAL,
"config_repository": CONFIG_REPOSITORY,
"config_repository_branch": CONFIG_REPOSITORY_BRANCH,
"dataset_repository_branch": os.environ["DATASET_REPOSITORY_BRANCH"],
"dataset_commit": os.environ["DATASET_COMMIT"],
"workflow": os.environ["GITHUB_WORKFLOW"],
"config": os.environ["CONFIG"],
"pr_url": os.environ["PR_URL"],
"accelerator_type": os.environ["ACCELERATOR_TYPE"],
"test_run_time": os.environ["TEST_RUN_TIME"],
"train_run_time": os.environ["TRAIN_RUN_TIME"],
"total_run_time": os.environ["TOTAL_RUN_TIME"],
"github_run_id": os.environ["GITHUB_RUN_ID"],
"github_sha": os.environ["GITHUB_SHA"],
"github_event": os.environ["GITHUB_EVENT_NAME"],
"type": os.environ["TYPE"],
**context,
},
)
def read_results(file):
with open(file) as json_file:
data = json.load(json_file)
keys = [
"accuracy",
"weighted avg",
"macro avg",
"micro avg",
"conversation_accuracy",
]
result = {key: data[key] for key in keys if key in data}
return result
def push_results(file_name, file):
result = read_results(file)
result["file_name"] = file_name
result["task"] = task_mapping[file_name]
send_to_segment(result)
if __name__ == "__main__":
send_to_datadog(None)
for dirpath, dirnames, files in os.walk(os.environ["RESULT_DIR"]):
for f in files:
if any(f.endswith(valid_name) for valid_name in task_mapping.keys()):
push_results(f, os.path.join(dirpath, f))
analytics.flush()
|
from lpd8.programs import Programs
from lpd8.pads import Pad, Pads
from lpd8.knobs import Knobs
class Actions:
"""
Class used to implement main logic related to LPD8 <> SuperCollider interactions
"""
_pad_to_bank = { # pads to banks translation dict
Pads.PAD_6: 0,
Pads.PAD_2: 1,
Pads.PAD_7: 2,
Pads.PAD_3: 3,
Pads.PAD_8: 4
}
_on = [False, False] # Synths state in SuperCollider - both off when starting program
_active_bank = 0 # Active virtual bank for knobs
_banks = [ # Banks to save knobs value
{
Knobs.KNOB_1: 48, # osc1 - note
Knobs.KNOB_2: 1, # osc1 - harmonics
Knobs.KNOB_3: 0.05, # osc1 - attack
Knobs.KNOB_4: 0, # osc1 - sweep
Knobs.KNOB_5: 0, # osc1 - detune
Knobs.KNOB_6: 0.5, # osc1 - duty
Knobs.KNOB_7: 1, # osc1 - release
Knobs.KNOB_8: 0 # osc1 - sweep_time
},
{
Knobs.KNOB_1: 60, # osc1 - note
Knobs.KNOB_2: 1, # osc1 - harmonics
Knobs.KNOB_3: 0.05, # osc1 - attack
Knobs.KNOB_4: 0, # osc1 - sweep
Knobs.KNOB_5: 0, # osc1 - detune
Knobs.KNOB_6: 0.5, # osc1 - duty
Knobs.KNOB_7: 1, # osc1 - release
Knobs.KNOB_8: 0 # osc1 - sweep_time
},
{
Knobs.KNOB_1: 0, # osc1 - feedback
Knobs.KNOB_2: 0.5, # osc1 - volume
Knobs.KNOB_3: 0.5, # master volume
Knobs.KNOB_4: 1, # master tempo
Knobs.KNOB_5: 0, # osc2 - feedback
Knobs.KNOB_6: 0.5, # osc2 - volume
Knobs.KNOB_7: 0, # master balance
Knobs.KNOB_8: 0 # ?
},
{
Knobs.KNOB_1: 0, # delay - delay
Knobs.KNOB_2: 0, # delay - decay
Knobs.KNOB_3: 10, # delay - low pass
Knobs.KNOB_4: 10000, # delay - high pass
Knobs.KNOB_5: 0, # ?
Knobs.KNOB_6: 0, # ?
Knobs.KNOB_7: 0, # ?
Knobs.KNOB_8: 0 # ?
}
]
def __init__(self, lpd8, osc):
"""
Class constructor
:param lpd8: an instance of LPD8 controller
:param osc: an instance of an OSC client / server implementation
"""
self._lpd8 = lpd8
self._osc = osc
self._running = True # This is the running flag checked in the main loop
def switch_bank(self, data):
"""
Switch from a bank to another
:param data: the data received from LPD8 midi message (position 1 is pad index, translated to oscillator index)
"""
# Retrieve bank index associated to PAD
index = self._pad_to_bank[data[1]]
# Only switch if needed
if index != self._active_bank:
# Unlit previous pad and switch banks
pad_off = list(self._pad_to_bank.keys())[list(self._pad_to_bank.values()).index(self._active_bank)]
self._lpd8.set_pad_switch_state(Programs.PGM_4, pad_off, Pad.OFF)
self._lpd8.pad_update()
self._active_bank = index
self.load_bank(self._active_bank)
# Otherwise just make sure this pad is still lit
else:
self._lpd8.set_pad_switch_state(Programs.PGM_4, data[1], Pad.ON)
def load_bank(self, bank):
"""
Loads a parameter bank. Each bank may contain up to 8 parameters
:param bank: bank number (from 0 to 4)
"""
# Set limits for bank 0 or bank 1 (oscillators parameters)
if bank == 0 or bank == 1:
self._lpd8.set_knob_limits(Programs.PGM_4, Knobs.KNOB_1, 36, 84)
self._lpd8.set_knob_limits(Programs.PGM_4, Knobs.KNOB_2, 1, 50)
self._lpd8.set_knob_limits(Programs.PGM_4, Knobs.KNOB_3, 0.05, 1, is_int=False)
self._lpd8.set_knob_limits(Programs.PGM_4, Knobs.KNOB_4, 0, 0.5, is_int=False)
self._lpd8.set_knob_limits(Programs.PGM_4, Knobs.KNOB_5, -0.05, 0.05, is_int=False)
self._lpd8.set_knob_limits(Programs.PGM_4, Knobs.KNOB_6, 0.01, 0.5, is_int=False)
self._lpd8.set_knob_limits(Programs.PGM_4, Knobs.KNOB_7, 0.1, 5, is_int=False)
self._lpd8.set_knob_limits(Programs.PGM_4, Knobs.KNOB_8, 0, 1, is_int=False)
# Set limits for bank 2
elif bank == 2:
self._lpd8.set_knob_limits(Programs.PGM_4, Knobs.KNOB_1, 0, 100, is_int=False)
self._lpd8.set_knob_limits(Programs.PGM_4, Knobs.KNOB_2, 0, 2, is_int=False)
self._lpd8.set_knob_limits(Programs.PGM_4, Knobs.KNOB_3, 0, 1, is_int=False)
self._lpd8.set_knob_limits(Programs.PGM_4, Knobs.KNOB_4, 0.5, 5, is_int=False)
self._lpd8.set_knob_limits(Programs.PGM_4, Knobs.KNOB_5, 0, 100, is_int=False)
self._lpd8.set_knob_limits(Programs.PGM_4, Knobs.KNOB_6, 0, 2, is_int=False)
self._lpd8.set_knob_limits(Programs.PGM_4, Knobs.KNOB_7, -1, 1, is_int=False)
# Set limits for bank 3
elif bank == 3:
self._lpd8.set_knob_limits(Programs.PGM_4, Knobs.KNOB_1, 0, 0.5, is_int=False)
self._lpd8.set_knob_limits(Programs.PGM_4, Knobs.KNOB_2, 1, 5, is_int=False)
self._lpd8.set_knob_limits(Programs.PGM_4, Knobs.KNOB_3, 10, 10000, is_exp=True)
self._lpd8.set_knob_limits(Programs.PGM_4, Knobs.KNOB_4, 10, 10000, is_exp=True)
# Load saved values for current bank
for knob in Knobs.ALL_KNOBS:
self._lpd8.set_knob_value(Programs.PGM_4, knob, self._banks[bank][knob])
def send_init(self):
"""
Send all banks data to SuperCollider so it can initialize the oscillators with bank's default values
Also lit PAD 6 as this is the default one
:return:
"""
self._lpd8.set_pad_switch_state(Programs.PGM_4, Pads.PAD_6, Pad.ON)
self._lpd8.pad_update()
for index, bank in enumerate(self._banks):
for knob, value in bank.items():
self._osc.send('control', [index, knob, value])
def exit_running(self, *args):
"""
Exit running state and by extension main loop / program
"""
self._running = False
def check_running(self):
"""
Allow to check running state in the main loop
"""
return self._running
def control_osc(self, data):
"""
Send a parameter change to SuperCollider
:param data: the data received from LPD8 midi message (position 1 is knob number and position 2 the value)
"""
self._banks[self._active_bank][data[1]] = data[2]
self._osc.send('control', [self._active_bank, data[1], data[2]])
def beats(self, *args):
"""
This is a trick to allow PAD 5 controlled by oscillator 1 and PAD 1 controlled by oscillator 2
to blink at different BPMs (actually the beat of each oscillator)
The beat is looped back through OSC messages from SuperCollider. When value of beat event is 1,
PAD 5 is blinked, when it is 2, PAD 1 is blinked. Blink mode is added to the related pad only during
the update and then immediately reset to avoid both pads to blink at oscillator 1 or oscillator 2 loop back
"""
if args[1] == 1:
self._lpd8.set_pad_mode(Programs.PGM_4, Pads.PAD_5, Pad.SWITCH_MODE + Pad.BLINK_MODE)
elif args[1] == 2:
self._lpd8.set_pad_mode(Programs.PGM_4, Pads.PAD_1, Pad.SWITCH_MODE + Pad.BLINK_MODE)
self._lpd8.pad_update()
self._lpd8.set_pad_mode(Programs.PGM_4, Pads.PAD_1, Pad.SWITCH_MODE)
self._lpd8.set_pad_mode(Programs.PGM_4, Pads.PAD_5, Pad.SWITCH_MODE)
def on_off(self, data):
"""
Starts or stops an oscillator in SuperCollider. It toggles the oscillator status based on class flags
:param data: the data received from LPD8 midi message (position 1 is the oscillator index)
:return:
"""
if data[1] == Pads.PAD_5:
index = 0
else:
index = 1
if self._on[index]:
self._osc.send('off', index)
else:
self._osc.send('on', index)
self._on[index] = not self._on[index]
|
# -*- coding:utf-8 -*-
"""
Created on 8/20/2018
Author: wbq813 (wbq813@foxmail.com)
"""
# 字符串预处理模块,为分析器TimeNormalizer提供相应的字符串预处理服务
import re
NUM_MAP = {"零": 0, "0": 0,
"一": 1, "1": 1,
"二": 2, "2": 2, "两": 2,
"三": 3, "3": 3,
"四": 4, "4": 4,
"五": 5, "5": 5,
"六": 6, "6": 6,
"七": 7, "7": 7, "天": 7, "日": 7, "末": 7,
"八": 8, "8": 8,
"九": 9, "9": 9, }
class StrPreProcess:
# 删除一字符串中所有匹配某一规则字串 可用于清理一个字符串中的空白符和语气助词
@staticmethod
def del_keyword(str_in, rules):
pattern = re.compile(rules)
return pattern.sub('', str_in)
# 不完整的中文数字,比如 “五万二”
@staticmethod
def abbreviation_match(pattern, str_target, unit, unit_num):
def split_replace(match):
str_in = match.group()
s = str_in.split(unit)
num = 0
if len(s) == 2:
num += StrPreProcess.word_to_num(s[0]) * unit_num * 10 + StrPreProcess.word_to_num(s[1]) * unit_num
return str(num)
return pattern.sub(split_replace, str_target)
# 替换单位:百,千,万
@staticmethod
def replace_unit(pattern, str_target, unit, unit_num):
def replace_unit(match):
str_in = match.group()
s = str_in.split(unit)
num = 0
len_s = len(s)
if len_s == 1:
hundred = int(s[0])
num += hundred * unit_num
elif len_s == 2:
hundred = int(s[0])
num += hundred * unit_num
num += int(s[1])
return str(num)
return pattern.sub(replace_unit, str_target)
# 将字符串中所有的用汉字表示的数字转化为用阿拉伯数字表示的数字
@staticmethod
def num_translate(str_target):
# 不完整的中文数字,比如 “五万二”
pattern = re.compile(r"[一二两三四五六七八九123456789]万[一二两三四五六七八九123456789](?!(千|百|十))")
str_target = StrPreProcess.abbreviation_match(pattern, str_target, "万", 1000)
pattern = re.compile(r"[一二两三四五六七八九123456789]千[一二两三四五六七八九123456789](?!(百|十))")
str_target = StrPreProcess.abbreviation_match(pattern, str_target, "千", 100)
pattern = re.compile(r"[一二两三四五六七八九123456789]百[一二两三四五六七八九123456789](?!十)")
str_target = StrPreProcess.abbreviation_match(pattern, str_target, "百", 10)
# 完整的中文数字,周
def replace_num(match):
res = StrPreProcess.word_to_num(match.group())
return str(res)
pattern = re.compile(r"[零一二两三四五六七八九]")
str_target = pattern.sub(replace_num, str_target)
pattern = re.compile(r"(?<=[周|星期])[末天日]")
str_target = pattern.sub(replace_num, str_target)
# 替换单位十
def replace_unit_ten(match):
str_in = match.group()
s = str_in.split("十")
num = 0
len_s = len(s)
if len_s == 0:
num += 10
elif len_s == 1:
ten = int(s[0])
if ten == 0:
num += 10
else:
num += ten * 10
elif len_s == 2:
if s[0] == "":
num += 10
else:
ten = int(s[0])
if ten == 0:
num += 10
else:
num += ten * 10
if len(s[1]) > 0:
num += int(s[1])
return str(num)
pattern = re.compile(r"(?<![周|星期])0?[0-9]?十[0-9]?")
str_target = pattern.sub(replace_unit_ten, str_target)
# 替换单位:百,千,万
pattern = re.compile(r"0?[1-9]百[0-9]?[0-9]?")
str_target = StrPreProcess.replace_unit(pattern, str_target, "百", 100)
pattern = re.compile(r"0?[1-9]千[0-9]?[0-9]?[0-9]?")
str_target = StrPreProcess.replace_unit(pattern, str_target, "千", 1000)
pattern = re.compile(r"[0-9]+万[0-9]?[0-9]?[0-9]?[0-9]?")
str_target = StrPreProcess.replace_unit(pattern, str_target, "万", 10000)
return str_target
@staticmethod
def word_to_num(str_in):
res = NUM_MAP.get(str_in)
if res is None:
return -1
return res
|
from turtle import fillcolor
from manim import *
class Amedee(Scene):
def construct(self):
name=Tex("Amedee", tex_template=TexTemplateLibrary.ctex, font_size=30).to_edge(UL,buff=1)
sq=Square(
side_length=2,
fill_color=GREEN,
fill_opacity=0.75
).shift(
LEFT*3
)
tri =Triangle().scale(0.6).to_edge(DR)
self.play(Write(name))
self.play(DrawBorderThenFill(sq))
self.play(Create(tri))
self.wait()
self.play(name.animate.to_edge(UR),run_time=2)
self.play(sq.animate.scale(2),tri.animate.to_edge(DL),run_time=3)
self.wait()
|
*d,s="""k= I-~)1 H-~S( 3!4-~( C*~U!0-~L3 :&~luos& L+~laets ot evah yam I dna revo gnuh( +,~a m'I
laed a & 9+~c em tel( I-~ereht yeH
hsu& M,~sekam epat tcud sih dnA
hsur a ni slived eht haeYP#V&~H0 [&~esraeh a dna evarg a naht retteb s'taht sseug I
krow fo tuo lla er'ew tub ainrofilaC gni& P$~eW
tsao& ]#~ot me' evord uoy dna ,tfiws os nwod emac uoY
stsoh s'ti& W ~sil kcalb ehT
& > ~fo dnik thgin yadrutaS A
knip dna thgirb os nrub ot desu noen& <!~nehw * , ~ ,rebmemer I
tolp' $!~hs uoy ,tpirc' P"~eppilf ' -!~g on s'tiderc r( G ~hsac eht tog uoY
togrof ton& 0 ~y tub enog er'uoY
doowylloH elttil yeh lleW
' ' ~ho ho ,nips a rof uoy ekat annog ybab hcaocegats eht dnA
ni no poh uoy t'now dias staob noJ ehT
allebarA anodeS fo pot no kcams thgiR
llef ti enotsdnas der ehT"""
z="int(f'%s',2)"%(2*'{ord(d.pop())-32:06b}')
while d:*d,c=d;s+=eval('c#'*(c<'~')+f's[-{z}:][:{z}]')
print(s)
|
from django_celery_beat.admin import PeriodicTaskForm, TaskChoiceField, CrontabSchedule
from django.utils.translation import ugettext_lazy as _
from main.fields import SourceChoiceField, DbTypeChoiceField
from main.models import BruteConfig, Dictionary, Database
from django import forms
from typing import Any
import json
class BasePeriodicTaskForm(PeriodicTaskForm):
def __init__(self, *args: list, **kwargs: Any):
super().__init__(*args, **kwargs)
self.fields["name"].help_text = "If you leave Name empty it will be generated"
self.fields["regtask"].help_text = "Do not change this value"
if kwargs.get("instance"):
for key, value in json.loads(kwargs["instance"].kwargs).items():
try:
self.fields[key].initial = value
except Exception as err:
print(err)
name = forms.CharField(label=_("Name"), required=False)
regtask = TaskChoiceField(label=_("Task (registered)"), required=True)
enabled = forms.CheckboxInput()
database = forms.ModelChoiceField(queryset=Database.objects.all().order_by("host"))
def save(self, commit: bool = True) -> object:
data = self.data.dict()
data.pop("csrfmiddlewaretoken")
data["task"] = data.pop("regtask") or data.pop("task")
if data.get("name", "").strip():
data["name"] = data.pop("name").strip()
else:
db = Database.objects.get(id=data.get("database"))
s = "d" if "dictionary" in data else "b"
i = (
data.get("dictionary")
if "dictionary" in data
else data.get("bruteforce_config")
)
self.instance.name = f"{db.host} {s}-{i}"
self.instance.kwargs = json.dumps(
{k: data[k] for k in self.fields if k in data}
)
self.instance.task = data["task"]
return super().save(commit=commit)
class DictionaryPeriodicTaskForm(BasePeriodicTaskForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["regtask"].initial = "Dictionary"
dictionary = forms.ModelChoiceField(
queryset=Dictionary.objects.all(), label=_("Dictionary"), required=True
)
class BruteforcePeriodicTaskForm(BasePeriodicTaskForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["regtask"].initial = "Bruteforce"
bruteforce_config = forms.ModelChoiceField(
queryset=BruteConfig.objects.all(), label=_("Config"), required=True
)
class DatabaseForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if kwargs.get("instance"):
self.initial["ip"] = (
",".join(self.initial["ip"]) if self.initial["ip"] else ""
)
self.fields["ip"].help_text = "Comma separated value"
host = forms.CharField(max_length=256)
ip = forms.CharField(label=_("IP list"), required=False)
port = forms.IntegerField(required=False)
db_type = DbTypeChoiceField()
def clean(self):
data = self.cleaned_data
data["ip"] = data["ip"].split(",") if data.get("ip", None) else []
return data
|
from cinto import contas
num = int(input('Digite um valor'))
fat = contas.fatorial(num)
print(f'O fatorial de {num} é {fat}')
print(f'O dobro de {num} é {contas.dobro(num)}')
print(f'O triplo de {num} é {contas.triplo(num)}')
|
# Generated by Django 3.2.4 on 2021-06-22 09:03
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("testapp", "0002_alter_blogpage_channel_id"),
]
operations = [
migrations.RemoveField(
model_name="blogpage",
name="last_update_at",
),
]
|
import pandas as pd
import numpy as np
from scipy.io import loadmat
from tqdm import tqdm
from noiseceiling.utils import _find_repeats
mat = loadmat('data/raw/AU_data_for_Lukas.mat')
au_names = [n[0] for n in mat['AUnames'][0]]
rename_au = {'AU10Open': 'AU10', 'AU10LOpen': 'AU10L', 'AU10ROpen': 'AU10R', 'AU16Open': 'AU16', 'AU27i': 'AU27'}
au_names = [rename_au[name] if name in rename_au.keys() else name for name in au_names]
emo_names = [e[0] for e in mat['expnames'][0]]
# Extract amplitudes per AU
au_data = mat['data_AUamp']
au_data[np.isnan(au_data)] = 0
au_data_onoff = mat['data_AUon']
# Check whether the on/off data is indeed amplitude > 0
au_data_bin = (au_data > 0).astype(int)
np.testing.assert_array_equal(au_data_bin, au_data_onoff)
# Make sure dimensions match
au_data = np.moveaxis(au_data, -1, 0) # 60 x 42 x 2400
au_model = np.moveaxis(mat['models_AUon'], -1, 1) # 60 x 42 x 6
emo_rating = np.moveaxis(mat['data_cat'], -1, 0) # 60 x 2400 x 7
intensity = mat['data_rat'].T # 60 x 2400
# load face identities
mat = loadmat('data/raw/cluster_data_ID.mat')['id'].squeeze()
f_ids = np.stack([mat[i].T for i in range(len(mat))]) # 60 x 2400 x 8
f_ids = f_ids.round(1) # round to one decimal to reduce precision
# Last 45 participants saw one of 8 faces [face ID code: 0-7]
f_ids_45 = f_ids[15:, :, :].argmax(axis=-1)
# First 15 participants saw a weighted face [face ID code: 8-9xx]
f_ids_df = pd.DataFrame(f_ids[:15, :, :].reshape((15*2400, 8)), columns=[f'fp_{i}' for i in range(8)])
uniq_face_ids, _ = _find_repeats(f_ids_df, progress_bar=False)
uniq_face_ids = np.vstack((uniq_face_ids.reshape((15, 2400)) + 7, f_ids_45)) # 60 x 2400
# Last four columns represent male faces
gender = (f_ids.argmax(axis=2) > 3).astype(int) # 0 = female, 1 = male
gender = gender.reshape((60, 2400))
for i in tqdm(range(au_data.shape[0])):
idx = []
for ii in range(au_data.shape[2]):
au_on = np.where(au_data[i, :, ii] > 0)[0]
this_idx= '_'.join(
[f'{au_names[iii]}-{int(100 * au_data[i, iii, ii])}'
for iii in au_on]
)
if not this_idx:
this_idx = 'empty'
idx.append(this_idx)
this_dat = np.c_[au_data[i, :, :].T, uniq_face_ids[i, :], gender[i, :]]
df = pd.DataFrame(this_dat, columns=au_names + ['face_id', 'face_gender'], index=idx)
# Let's do some cleaning. First, remove the bilateral AUs that *also*
# are activated unilaterally
for au in ['2', '6', '7', '10', '12', '14', '20']:
L = 'AU' + au + 'L'
R = 'AU' + au + 'R'
act = df['AU' + au].values
df[L] = np.c_[act, df[L].values].max(axis=1)
if au != '2':
df[R] = np.c_[act, df[R].values].max(axis=1)
else:
df[R] = act
# Remove the bilateral one
df = df.drop('AU' + au, axis=1)
# Now, let's "remove" (recode) compound AUs
for aus in ['1-2', '25-12', '12-6']:
act = df['AU' + aus].values
for au in aus.split('-'):
if au in ['1', '25']:
df['AU' + au] = np.c_[act, df['AU' + au]].max(axis=1)
else:
df['AU' + au + 'L'] = np.c_[act, df['AU' + au + 'L']].max(axis=1)
df['AU' + au + 'R'] = np.c_[act, df['AU' + au + 'R']].max(axis=1)
df = df.drop('AU' + aus, axis=1)
new_cols = []
for col in df.columns:
if col in ['face_id', 'face_gender']:
new_col = col
elif 'L' in col or 'R' in col:
new_col = col.replace('L', '').replace('R', '')
new_col = 'AU' + new_col[2:].zfill(2) + col[-1]
else:
new_col = 'AU' + col[2:].zfill(2)
new_cols.append(new_col)
df.columns = new_cols
df = df.loc[:, sorted(df.columns)]
au_cols = [col for col in df.columns if 'AU' in col]
if i == 0:
np.savetxt('data/au_names_new.txt', au_cols, fmt='%s')
# Merge activation 0.0666 and 0.1333
vals = df.to_numpy()
vals = np.round(vals, 1)
df.loc[:] = vals
new_idx = []
for _, row in df.iterrows():
au_on = sorted(np.where(row.iloc[:33] > 0)[0])
this_idx = '_'.join(
[f'{df.columns[i]}-{int(100 * row[i])}'
for i in au_on]
)
if not this_idx:
this_idx = 'empty'
new_idx.append(this_idx)
df.loc[:, :] = np.round(vals, 2)
df.index = new_idx
df['emotion'] = [emo_names[idx] for idx in emo_rating[i, :, :].argmax(axis=1)]
df['intensity'] = intensity[i, :]
sub_id = f'{str(i+1).zfill(2)}WC'
df['sub'] = sub_id
df['sub_ethnicity'] = 'WC'
df['face_gender'] = [{0: 'F', 1: 'M'}[g] for g in df['face_gender']]
f_out = f'data/ratings/WC/sub-{sub_id}_ratings.tsv'
df.to_csv(f_out, sep='\t')
|
# app/main.py
# FastAPI start app
# uvicorn app.main:app --reload
from typing import Dict
from fastapi import FastAPI, Request, status
from ai import utils
from app.core import auth, nlp, user
from app.database import mongodb
# Define app
app = FastAPI(
title="Zero-Shot Classification API",
description="Classification and evaluation of texts in English through\
Zero-Shot learning. This task allows classifying other data\
types different from those used to train the model with custom\
labels.",
version="0.0.1",
)
# Add routes of endopints
app.include_router(mongodb.router)
app.include_router(auth.router)
app.include_router(nlp.router)
app.include_router(user.router)
# URL root
@app.get("/", tags=["General"])
@utils.construct_response
def index(request: Request) -> Dict:
"""Health check
Args:
request (Request): Health check message.
Returns:
Dict: Response message.
"""
response = {
"message": "Successful operation",
"status-code": status.HTTP_200_OK,
"data": {},
}
return response
|
import enum
class logmessage_types(enum.Enum):
sent, received, internal = range(3)
class internal_submessage_types(enum.Enum):
quit, error = range(2)
class controlmessage_types(enum.Enum):
quit, reconnect, send_line, ping, ping_timeout = range(5)
class cronmessage_types(enum.Enum):
quit, schedule, delete, reschedule = range(4)
|
import argparse
import os
import numpy as np
from sklearn.metrics import confusion_matrix, classification_report
from pytorch_transformers import BertTokenizer
import nemo
import nemo_nlp
from nemo_nlp.callbacks.joint_intent_slot import \
eval_iter_callback, eval_epochs_done_callback
from nemo_nlp.text_data_utils import \
process_snips, process_atis, merge
from nemo_nlp import read_intent_slot_outputs
# Parsing arguments
parser = argparse.ArgumentParser(description='Joint-intent BERT')
parser.add_argument("--local_rank", default=None, type=int)
parser.add_argument("--batch_size", default=32, type=int)
parser.add_argument("--max_seq_length", default=50, type=int)
parser.add_argument("--num_gpus", default=1, type=int)
parser.add_argument("--fc_dropout", default=0.1, type=float)
parser.add_argument("--pretrained_bert_model",
default="bert-base-uncased",
type=str)
parser.add_argument("--dataset_name", default='snips-atis', type=str)
parser.add_argument("--data_dir",
default='data/nlu',
type=str)
parser.add_argument("--work_dir",
# default='outputs/ATIS/20190814-152523/checkpoints',
# default='outputs/SNIPS-ALL/20190821-154734/checkpoints',
default='outputs/SNIPS-ATIS/20190829-140622/checkpoints',
type=str)
parser.add_argument("--amp_opt_level", default="O0",
type=str, choices=["O0", "O1", "O2"])
parser.add_argument("--do_lower_case", action='store_false')
args = parser.parse_args()
if not os.path.exists(args.data_dir):
raise ValueError(f'Data not found at {args.data_dir}')
nf = nemo.core.NeuralModuleFactory(backend=nemo.core.Backend.PyTorch,
local_rank=args.local_rank,
optimization_level=args.amp_opt_level,
log_dir=None)
logger = nf.logger
# Load the pretrained BERT parameters
# pretrained_model can be one of:
# bert-base-uncased, bert-large-uncased, bert-base-cased,
# bert-large-cased, bert-base-multilingual-uncased,
# bert-base-multilingual-cased, bert-base-chinese.
pretrained_bert_model = nf.get_module(
name="huggingface.BERT",
params={"pretrained_model_name": args.pretrained_bert_model,
"local_rank": args.local_rank},
collection="nemo_nlp",
pretrained=True)
if args.dataset_name == 'atis':
num_intents = 26
num_slots = 129
data_dir = process_atis(args.data_dir, args.do_lower_case)
pad_label = num_slots - 1
elif args.dataset_name == 'snips-atis':
data_dir, pad_label = merge(args.data_dir,
['ATIS/nemo-processed-uncased',
'snips/nemo-processed-uncased/all'],
args.dataset_name)
num_intents = 41
num_slots = 140
elif args.dataset_name.startswith('snips'):
data_dir = process_snips(args.data_dir, args.do_lower_case)
if args.dataset_name.endswith('light'):
data_dir = f'{data_dir}/light'
num_intents = 6
num_slots = 4
elif args.dataset_name.endswith('speak'):
data_dir = f'{data_dir}/speak'
num_intents = 9
num_slots = 9
elif args.dataset_name.endswith('all'):
data_dir = f'{data_dir}/all'
num_intents = 15
num_slots = 12
pad_label = num_slots - 1
else:
nf.logger.info("Looks like you pass in the name of dataset that isn't "
"already supported by NeMo. Please make sure that you "
"build the preprocessing method for it.")
tokenizer = BertTokenizer.from_pretrained(args.pretrained_bert_model)
hidden_size = pretrained_bert_model.local_parameters["hidden_size"]
classifier = nemo_nlp.JointIntentSlotClassifier(hidden_size=hidden_size,
num_intents=num_intents,
num_slots=num_slots,
dropout=args.fc_dropout)
loss_fn = nemo_nlp.JointIntentSlotLoss(num_slots=num_slots)
# Evaluation pipeline
logger.info("Loading eval data...")
data_layer = nemo_nlp.BertJointIntentSlotDataLayer(
path_to_data=data_dir + '/test.tsv',
path_to_slot=data_dir + '/test_slots.tsv',
pad_label=num_slots-1,
tokenizer=tokenizer,
mode='eval',
max_seq_length=args.max_seq_length,
batch_size=args.batch_size,
shuffle=False,
num_workers=0,
local_rank=args.local_rank
)
ids, type_ids, input_mask, slot_mask, intents, slots = data_layer()
hidden_states = pretrained_bert_model(input_ids=ids,
token_type_ids=type_ids,
attention_mask=input_mask)
intent_logits, slot_logits = classifier(hidden_states=hidden_states)
###########################################################################
# Instantiate an optimizer to perform `infer` action
evaluated_tensors = nf.infer(
tensors=[intent_logits, slot_logits, slot_mask, intents, slots],
checkpoint_dir=args.work_dir,
)
def concatenate(lists):
return np.concatenate([t.cpu() for t in lists])
def get_preds(logits):
return np.argmax(logits, 1)
intent_logits = concatenate(evaluated_tensors[0])
slot_logits = concatenate(evaluated_tensors[1])
slot_masks = concatenate(evaluated_tensors[2])
intents = concatenate(evaluated_tensors[3])
slots = concatenate(evaluated_tensors[4])
pred_intents = np.argmax(intent_logits, 1)
logger.info('Intent prediction results')
logger.info(classification_report(intents, pred_intents))
pred_slots = np.argmax(slot_logits, axis=2)
pred_slot_list, slot_list = [], []
for i, pred_slot in enumerate(pred_slots):
pred_slot_list.extend(list(pred_slot[slot_masks[i]][1:-1]))
slot_list.extend(list(slots[i][slot_masks[i]][1:-1]))
logger.info('Slot prediction results')
logger.info(classification_report(slot_list, pred_slot_list))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
import hashlib
import copy
from random import randint
from random import choice
possible_names = ['Carolina', 'Guilherme', 'Daniel', 'Luna', 'Natalie', 'Elaini', 'Eduarda', 'Isabella', 'Júlia', 'André', 'Thiago', 'Sandra', 'Edson', 'Ossian', 'Laura', 'Dante', 'Fernando', 'Antonio', 'Denise', 'Eloisa', 'Eduardo', 'Sílvio', 'Pedro', 'José', 'Jorge', 'Alfredo', 'Amadeus', 'Marcos', 'Enzo', 'Mariana', 'Aline', 'Marcus', 'Silvana', 'Ada', 'Renan', 'Roberto', 'Caio', 'Maria', 'Livia', 'Emerson', 'Luiz', 'Luis', 'Alan']
possible_surnames = ['Andrade', 'Krum', 'Kawasaki', 'Gallice', 'Pedri', 'Harres', 'Queiroz', 'Rodrigues', 'Vicente', 'Kniss', 'Yamada', 'Cremonezi', 'Schumacher', 'Campos', 'Kovalski', 'Strozzi', 'Iboshi', 'Handa', 'Megumi', 'Shinohata', 'Padovani', 'Olini', 'Martins', 'Pereira', 'Moreira', 'Lopes', 'Souza', 'Sousa', 'Gomes', 'Ameixa', 'Pera', 'Maruffa', 'Krajuska', 'Dudeque', 'Starosta', 'Franco', 'Dencker', 'Gil', 'Barreto', 'Kruger', 'Calopsita', 'Meneghel']
possible_emails = ['gmail.com', 'outlook.com', 'bol.com.br', 'outlook.com']
possible_street_types = ['Av.', 'Rua']
possible_street_prefixes = ['Marechal', 'Presidente', 'Coronel']
possible_street_separator = [' de ', ' da ', ' ', ' ', ' ']
possible_states = ['Amapá', 'Paraná', 'Amazonas', 'Rio Grande do Sul', 'Santa Catarina', 'São Paulo', 'Rio de Janeiro', 'Espírito Santo', 'Alagoas', 'Sergipe', 'Bahia', 'Mato Grosso', 'Mato Grosso do Sul', 'Pará', 'Minas Gerais']
possible_cities = ['Curitiba', 'Manaus', 'Porto Alegre', 'Florianópolis', 'São Paulo', 'Rio de Janeiro', 'Vitória', 'Maceió', 'Aracaju', 'Salvador', 'Cuiabá', 'Campo Grande', 'Belém', 'Belo Horizonte', 'Palotina', 'Divinópolis', 'Nova Serrana', 'Monte Alegre do Sul', 'Santa Maria', 'Porto Seguro', 'Campo Largo', 'Curitibanos', 'Itapoá']
possible_ddds = [67, 66, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 24, 27, 28, 31, 32, 33, 34, 35, 37, 38, 41]
visaPrefixList = [
['4', '5', '3', '9'],
['4', '5', '5', '6'],
['4', '9', '1', '6'],
['4', '5', '3', '2'],
['4', '9', '2', '9'],
['4', '0', '2', '4', '0', '0', '7', '1'],
['4', '4', '8', '6'],
['4', '7', '1', '6'],
['4']]
mastercardPrefixList = [
['5', '1'], ['5', '2'], ['5', '3'], ['5', '4'], ['5', '5']]
class Person:
def __init__(self):
self.name, self.surname = self.generate_random_name()
self.email = self.generate_random_email()
self.password = self.generate_random_password()
self.cpf = self.generate_random_cpf()
self.street, self.number, self.complement, self.city, self.cep = self.generate_random_address()
self.telephone = self.generate_random_phone(True)
self.cellphone = self.generate_random_phone(False)
self.cardNickname, self.pan, self.expDate, self.cvv = self.generate_random_card()
def generate_random_name(self):
name = random.choice(possible_names)
surname = random.choice(possible_surnames)
return name, surname
def generate_random_email(self):
email = self.name.lower() + "." + self.surname.lower() + "@" + random.choice(possible_emails)
return email
def generate_random_password(self):
h = hashlib.md5()
h.update((self.name + self.surname))
return (h.hexdigest())[0:6]
def generate_random_cpf(self):
cpf = ""
for i in range(0, 11):
digit = random.randrange(0, 9)
cpf += str(digit)
return cpf
def generate_random_phone(self, telephone=True):
if telephone:
typePhone = ' '
else:
typePhone = ' 9'
number = '+55 {0}{1}'.format(random.choice(possible_ddds), typePhone)
number += str(randint(1,9))
for n in range(7):
number += str(randint(0,9))
return number
def generate_random_address(self):
def generate_random_street():
prefix = random.choice(possible_street_prefixes)
i = random.randint(1, 100)
# Gera rua legal
if i <= 85:
first_name = random.choice(possible_names)
separator = random.choice(possible_street_separator)
second_name = random.choice(possible_surnames)
last_name = random.choice(possible_surnames)
street = prefix + ' ' + first_name + separator + second_name + ' ' + last_name
# Gera rua de estado
else:
state_name = random.choice(possible_states)
street = prefix + ' ' + state_name
return street
street = generate_random_street()
number = str(random.randint(1, 2000))
i = random.randint(1, 100)
if i <= 50:
complement = 'Casa'
else:
complement = 'Ap. ' + str(random.randint(1, 200))
city = random.choice(possible_cities)
cep = ""
for i in range(0, 9):
digit = random.randrange(0, 9)
cep += str(digit)
return street, number, complement, city, cep
def generate_random_card(self):
def completed_number(prefix, length):
"""
'prefix' is the start of the CC number as a string, any number of digits.
'length' is the length of the CC number to generate. Typically 13 or 16
"""
ccnumber = prefix
# generate digits
while len(ccnumber) < (length - 1):
digit = str(random.choice(range(0, 10)))
ccnumber.append(digit)
# Calculate sum
sum = 0
pos = 0
reversedCCnumber = []
reversedCCnumber.extend(ccnumber)
reversedCCnumber.reverse()
while pos < length - 1:
odd = int(reversedCCnumber[pos]) * 2
if odd > 9:
odd -= 9
sum += odd
if pos != (length - 2):
sum += int(reversedCCnumber[pos + 1])
pos += 2
# Calculate check digit
checkdigit = ((sum / 10 + 1) * 10 - sum) % 10
ccnumber.append(str(checkdigit))
return ''.join(ccnumber)
def credit_card_number(prefixList, length):
ccnumber = copy.copy(random.choice(prefixList))
return (completed_number(ccnumber, length))
visa16 = credit_card_number(visaPrefixList, 16)[:-2]
#cardNickname = self.name.upper() + ' ' + self.surname.upper()
cardNickname = "VISA teste"
month = str(random.randint(0, 12))
if int(month) < 10:
month = "0" + month
expDate = month + str(2) + str(random.randint(1, 9))
cvv = str(random.randint(0, 999))
if int(cvv) < 10:
cvv = "0" + cvv
if int(cvv) < 100:
cvv = "0" + cvv
return cardNickname, visa16, expDate, cvv
people_list = []
for i in range(0, 10000):
person = Person()
numbers = [person.pan for person in people_list]
while person.number in numbers:
person = Person()
people_list.append(person)
file = open('pessoas.csv', 'w+')
file.write('email,senha,nome,sobrenome,cep,endereco,numero,complemento,cidade, telefone fixo, celular, apelido cartao,pan,dataexp,cvv,cpf\n')
for person in people_list:
file.write(person.email + ',' + person.password + ',' + person.name + ',' + person.surname + ',' + person.cep + ',' + person.street + ',' + person.number + ',' + person.complement + ',' + person.city + ',' + person.telephone + ',' + person.cellphone + ',' + person.cardNickname + ',' + person.pan + ',' + person.expDate + ',' + person.cvv + ',' + person.cpf + '\n')
print(person.name + ' ' + person.surname + ' / ' + person.email + ' / ' + person.password + ' / ' + person.cpf + ' / ' + person.street + ', ' + person.number + ', ' + person.complement + '. ' + person.city + ', ' + person.telephone + ', ' + person.cellphone + ', ' + person.cep + ' / ' + person.cardNickname + ', ' + person.pan + ', ' + person.expDate + ', ' + person.cvv)
file.close()
|
# Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import os
from . import algorithm
from . import declaration_based
from . import registration_based
from pygccxml import declarations
templates = declarations.templates
class held_type_t(object):
"""
Helper class that can hold smart pointer name and create identifier for the
held type from that given a creator.
"""
def __init__( self, smart_ptr ):
"""
:param smart_ptr: smart pointer type as string
"""
object.__init__( self )
self._smart_ptr = smart_ptr
def _get_smart_ptr( self ):
return self._smart_ptr
def _set_smart_ptr( self, ptr ):
self._smart_ptr = ptr
smart_ptr = property( _get_smart_ptr, _set_smart_ptr )
def create( self, creator):
""" Return string of type to use for held type.
Ex: `boost::shared_ptr` class
"""
smart_ptr = algorithm.create_identifier( creator, self.smart_ptr )
arg = algorithm.create_identifier( creator, creator.declaration.decl_string )
return templates.join( smart_ptr, [ arg ] )
class smart_pointer_registrator_t( registration_based.registration_based_t
, declaration_based.declaration_based_t ):
"""
Converter for `boost::python::register_ptr_to_python`.
Lets boost python know that it can use `smart_ptr` to hold a an object.
See: http://www.boost.org/libs/python/doc/v2/register_ptr_to_python.html
"""
def __init__( self, smart_ptr, class_creator ):
"""`smart_ptr`: string of pointer type. Ex: `boost::shared_ptr`"""
registration_based.registration_based_t.__init__( self )
declaration_based.declaration_based_t.__init__( self, class_creator.declaration )
self._smart_ptr = smart_ptr
self._class_creator = class_creator
self.works_on_instance = False
def _get_smart_ptr( self ):
return self._smart_ptr
def _set_smart_ptr( self, ptr ):
self._smart_ptr = ptr
smart_ptr = property( _get_smart_ptr, _set_smart_ptr )
def _get_class_creator( self ):
return self._class_creator
def _set_class_creator( self, cc ):
self._class_creator = cc
class_creator = property( _get_class_creator, _set_class_creator )
def _create_impl(self):
if self.declaration.already_exposed:
return ''
if self.class_creator \
and self.class_creator.held_type \
and isinstance( self.class_creator.held_type, held_type_t ) \
and self.class_creator.held_type.smart_ptr == self.smart_ptr \
and self.target_configuration.boost_python_has_wrapper_held_type \
and not self.class_creator.declaration.require_self_reference:
return '' #boost.python does it automaticly
rptp = algorithm.create_identifier( self, '::boost::python::register_ptr_to_python' )
held_type = held_type_t(self.smart_ptr).create( self )
return templates.join( rptp, [ held_type ] ) + '();'
def _get_system_files_impl( self ):
return []
class smart_pointers_converter_t( registration_based.registration_based_t
, declaration_based.declaration_based_t ):
""" creator for boost::python::implicitly_convertible.
This creates a statement that allows the usage of C++ implicit
conversion from source to target.
See: http://www.boost.org/libs/python/doc/v2/implicit.html
"""
def __init__( self, smart_ptr, source, target ):
registration_based.registration_based_t.__init__( self )
declaration_based.declaration_based_t.__init__( self, source )
self._target = target
self._smart_ptr = smart_ptr
self.works_on_instance = False
def _get_target(self):
return self._target
target = property( _get_target )
def _get_source(self):
return self.declaration
source = property( _get_source )
def _get_smart_ptr( self ):
return self._smart_ptr
def _set_smart_ptr( self, ptr ):
self._smart_ptr = ptr
smart_ptr = property( _get_smart_ptr, _set_smart_ptr )
def _instantiate_smart_ptr( self, decl ):
identifier = algorithm.create_identifier( self, decl.partial_decl_string )
return templates.join( self.smart_ptr, [identifier] )
def _create_impl(self):
implicitly_convertible = algorithm.create_identifier( self, '::boost::python::implicitly_convertible' )
from_arg = self._instantiate_smart_ptr( self.source )
to_arg = self._instantiate_smart_ptr( self.target )
return templates.join(implicitly_convertible, [ from_arg, to_arg ] ) + '();'
def _get_system_files_impl( self ):
return []
|
'''A Confederação Nacional de Natação precisa de um programa que leia o ano de nascimento de um atleta e mostre sua
categoria, de acordo com a idade:
- Até 9 anos: Mirim
- Até 14 anos: Infantil
- Até 18 anos: Junior
- Até 20 anos: Sênior
- Acima: Master'''
from datetime import date
ano = int(input('\033[33mDigite o seu ano de nascimento:\033[m '))
if date.today().year - ano <= 9:
print('\033[31mSua categoria é Mirim!\033[m')
elif 9 < date.today().year - ano <= 14:
print('\033[32mSua categoria é Infantil!\033[m')
elif 14 < date.today().year - ano <=18:
print('\033[34mSua categoria é Junior!\033[m')
elif 18 < date.today().year - ano <= 25:
print('\033[35mSua categoria é Sênior!\033[m')
else:
print('\033[36mSua categoria é Master!\033[m')
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Custom operations
~~~~~~~~~~~~~~~~~
"""
from tensorflow.compiler.plugin.poplar.ops import gen_poputil_ops
from tensorflow.python.ipu.vertex_edsl import PlaceholderVertexExpr
from tensorflow.python.ipu.vertex_edsl import DefaultNameSource
def codelet_expression_op(vertex_expression, *args):
"""Add a custom fused elementwise expression operation to the graph.
Note that no autograd is done on this fused operation because the autograd
code does not understand the internal structure of the fused codelet.
Args:
vertex_expression: A Python function that defines the codelet expression.
args: Tensor inputs to the expression.
Returns:
The Tensor which is a result of applying the elementwise operation
"""
dtype = args[0].dtype
placeholders = map(lambda x: PlaceholderVertexExpr("in" + str(x), None),
range(0, len(args)))
concrete_expression = vertex_expression(*placeholders)
expr = concrete_expression.lower(DefaultNameSource())
return gen_poputil_ops.codelet_expression_op(input=args,
dtype=dtype,
source=expr)
def _validate_inputs_with_gradients(inputs_with_gradients, inputs):
if inputs_with_gradients is None:
return list(range(0, len(inputs)))
if isinstance(inputs_with_gradients, list):
return inputs_with_gradients
return list(inputs_with_gradients)
def precompiled_user_op(inputs,
library_path,
gp_path="",
outs=None,
name="UserOp",
op_name="Build",
separate_gradients=False,
inputs_with_gradients=None):
"""Call the poplar function located in the shared library at `library_path`
as part of the normal TensorFlow execution with the given `inputs`.
The shape and type of the output should be specified by `outs`. If it is
`None` it will default to no output. `outs` should be a dictionary with two
elements like this:
.. code-block:: python
outs = {
"output_types": [my_types_as_a_list],
"output_shapes": [my_shapes_as_a_list],
}
Args:
inputs: The tensor inputs to the operation.
library_path: The path to the shared object that contains the functions
to build the Poplar operation in the graph.
gp_path: The path to the precompiled codelet file.
outs: A dictionary describing the output tensor shapes and types.
name: The name of the operation.
op_name: The prefix of the functions inside the shard object file. This
defaults to 'Build'.
separate_gradients: When set to true, multiple gradient ops will be
generated, one for each input. When false, a single gradient op will be
generated, which should produce the partial derivatives for all inputs.
inputs_with_gradients: When set, produce derivatives only for specified
inputs. List of input indices expected.
Returns:
The array of tensor outputs.
"""
if outs is None:
outs = {
"output_types": [],
"output_shapes": [],
}
inputs_with_gradients = _validate_inputs_with_gradients(
inputs_with_gradients, inputs)
return gen_poputil_ops.ipu_user_op(
inputs,
library_path=library_path,
gp_path=gp_path,
op_name=op_name,
name=name,
separate_gradients=separate_gradients,
gradient_size=0,
partial_derivative_index=0,
inputs_with_gradients=inputs_with_gradients,
**outs)
def cpu_user_operation(inputs,
library_path,
outs=None,
name="UserOp",
op_name="Callback",
separate_gradients=False,
inputs_with_gradients=None):
"""
Call the CPU function located in the shared library at `library_path`
as part of the normal TensorFlow execution with the given `inputs`
copied from the IPU to the CPU, and the outputs are copied back to the
IPU afterwards.
The shape and type of the outputs should be specified by `outs`. If it is
`None` it will default to no output. `outs` should be a dictionary with
two elements like so:
.. code-block:: python
outs = {
"output_types": [my_types_as_a_list],
"output_shapes": [my_shapes_as_a_list],
}
Args:
inputs: The tensor inputs to the operation.
library_path: The path to the shared object that contains the functions
to execute the operation.
outs: A dictionary describing the output tensor shapes and types.
name: The name of the operation.
op_name: The prefix of the functions inside the shard object file. This
defaults to 'Callback'.
separate_gradients: When set to `True`, multiple gradient ops will be
generated, one for each input. When `False`, a single gradient op will be
generated, which should produce the partial derivatives for all inputs.
inputs_with_gradients: When set, produce derivatives only for specified
inputs. List of input indices expected.
Returns:
The array of tensor outputs.
"""
if outs is None:
outs = {
"output_types": [],
"output_shapes": [],
}
inputs_with_gradients = _validate_inputs_with_gradients(
inputs_with_gradients, inputs)
return gen_poputil_ops.ipu_user_read_write_op(
inputs,
library_path=library_path,
op_name=op_name,
name=name,
separate_gradients=separate_gradients,
gradient_size=0,
partial_derivative_index=0,
inputs_with_gradients=inputs_with_gradients,
**outs)
|
import torch
import shutil
import unittest
from lstm_dna.loader import load_genbank, divide_sequence
from .setup_dirs import setup_dirs
class TestLoader(unittest.TestCase):
def setUp(self):
self.indir, self.workdir, self.outdir = setup_dirs(fpath=__file__)
def tearDown(self):
shutil.rmtree(self.workdir)
shutil.rmtree(self.outdir)
def test_load_genbank(self):
X, Y = load_genbank(
gbks=f'{self.indir}/GCF_000008525.1_ASM852v1_genomic.gbff',
label_length=90)
self.assertTupleEqual((3335734, 4), X.size())
self.assertTupleEqual((3335734, ), Y.size())
self.assertEqual(torch.float, X.dtype)
self.assertEqual(torch.long, Y.dtype)
def test_divide_sequence(self):
x = torch.randn(100, 4)
for pad, expected in [
(True, (34, 3, 4)),
(False, (33, 3, 4))
]:
size = divide_sequence(x, seq_len=3, pad=pad).size()
self.assertTupleEqual(expected, size)
|
from request import Request
class Event:
def __init__(self, data):
self.data = data;
def request(self):
return Request(self.data['Records'][0]['cf']['request'])
|
from utility import *
class Snake():
def __init__(self, snake):
#print("~~~~~~Snake Object Created~~~~~~~")
self.id = snake['id']
#print(self.id)
self.name = snake['name']
#print(self.name)
self.health = snake['health']
#print(self.health)
self.coordinates = snake['body']
#print(self.coordinates)
self.head = snake['head']
#print(self.head)
self.length = snake['length']
#print(self.length)
# only attack when our snake is longer than enemySnake and distance between two snakes is less than 3
def shouldAttack(self, enemySnake):
if self.length <= enemySnake.length:
return False
if distanceBetweenTwoPoints(self.head, enemySnake.head) < 3:
return True
def attackDirection(self, enemySnake):
#print("EnemySnake ID: ", enemySnake.id, "=",enemySnake.head)
head = enemySnake.head
vertDiff = head['y'] - self.head['y']
horDiff = head['x'] - self.head['x']
if vertDiff > 0: return 'north'
elif vertDiff < 0: return 'south'
if horDiff > 0: return 'east'
elif horDiff < 0: return 'west'
def attack(self, directions, snakes):
for snake in snakes:
if snake.id == self.id: continue
direction = self.attackDirection(snake)
if self.shouldAttack(snake):
if (direction == 'north'): directions.north *= 2
elif (direction == 'south'): directions.south *= 2
elif (direction == 'east'): directions.east *= 2
elif (direction == 'west'): directions.west *= 2
else:
if self.length <= snake.length and distanceBetweenTwoPoints(self.head, snake.head) < 3:
if (direction == 'north'): directions.north *= .1
elif (direction == 'south'): directions.south *= .1
elif (direction == 'east'): directions.east *= .1
elif (direction == 'west'): directions.west *= .1
return directions
|
from typing import Any, Tuple
from Base.bp2DBin import Bin
from Base.bp2DBox import Box
from Base.bp2DPnt import Point
def face_intersection(b1: Box, b2: Box) -> int:
'''Check how much overlap the faces of two rectangles have.
Return the overlap, as a face, or return None, if there is no face intersection.'''
max = 0
for f1 in b1.get_face_list():
for f2 in b2.get_face_list():
intersection = f1.intersect(f2)
if intersection is not None:
max += intersection.length()
return max
def most_enclosed_position_in_bin(bin: Bin, box: Box) -> Tuple[int, Any]:
'''Finds the most enclosed feasible position for box in bin.
it returns the first open point with this property, i.e.
it also tries to place as bottom left as possible'''
pmax = None
max = -1
for p in bin.get_pnts_open():
if bin.can_place_box_at_pnt(box, p):
box.move(p)
enclosure = face_intersection(box,
bin.bounding_box) # check for edge overlap with bounding box, ie if box is placed on side or in corner of bin
# TODO extremely slow and assumes that remove_box does not fiddle with box.lr and keeps the place position
for box_stored in bin.boxes_stored:
enclosure += face_intersection(box, box_stored)
if enclosure > max:
max = enclosure
pmax = p
return max, pmax
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import preprocessing
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix
#Definiamo KNN come modello, in particolare il K
knn = KNeighborsClassifier(n_neighbors = 3)
#LEggiamo il dataset
df = pd.read_csv("https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data", header = 0)
#Diamo un nome alle colonne
df.columns = ['sepal_length', 'sepal_width', 'petal_length' , 'petal_width', 'class' ]
print(df.head(2))
#Estraiamo le colonne relative alle features
X = df.iloc[:, 0:4]
#Scaliamo i valori
scaler = preprocessing.MinMaxScaler(feature_range=(0, 1))
scaled_X = pd.DataFrame(scaler.fit_transform(X))
print(scaled_X.head(2))
#Creiamo un set di train e test
x_train, x_test, y_train, y_test = train_test_split(scaled_X, df['class'], test_size=0.3, random_state=12345)
#Creiamo il modello, allenandolo con i dati di training
knn.fit(x_train, y_train)
#Effettuiamo una predizione con il modello appena creato
pred = knn.predict(x_test)
#Otteniamo il report relativo alla prediction
print(classification_report(y_test, pred))
#Tramite il Metodo Elbow cerchiamo il miglior numero di Neighbors in un range 1%6
err = []
for i in range (1, 6):
knn = KNeighborsClassifier(n_neighbors = i)
knn.fit(x_train, y_train)
pred_i = knn.predict(x_test)
err.append(np.mean(pred_i != y_test))
#Grafichiamo il risultato del metodo elbow vedendo quello con il minor errore
plt.figure(figsize = (10,6))
plt.plot(range(1, 6), err, color='green', linestyle='dotted', marker= 'o', markerfacecolor='red', markersize=8)
plt.xlabel("Numero di k")
plt.ylabel('Tasso di errore')
plt.show()
#Lanciamo una prediction con il tasso di errore PIU' elevato
knn_max_error = KNeighborsClassifier(n_neighbors = 2)
knn_max_error.fit(x_train, y_train)
pred_max_error = knn_max_error.predict(x_test)
print(classification_report(y_test, pred_max_error))
#Lanciamo una prediction con il tasso di errore MENO elevato
knn_min_error = KNeighborsClassifier(n_neighbors = 5)
knn_min_error.fit(x_train, y_train)
pred_min_error = knn_min_error.predict(x_test)
print(classification_report(y_test, pred_min_error))
|
#!/usr/bin/env python
#
# Copyright (c) 2015, 2016, 2017, 2018, 2019, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import absolute_import
from __future__ import division
import os
import unittest
from test.analysis_helper import *
from test import mock_report
@unittest.skipIf(g_skip_analysis_test, g_skip_analysis_ex)
class TestBalancerAnalysis(unittest.TestCase):
def setUp(self):
self._name_prefix = 'prof'
self._use_agent = True
self._min_power = 160
self._max_power = 200
self._step_power = 10
self._powers = list(range(self._min_power, self._max_power+self._step_power, self._step_power))
self._node_names = ['mynode']
self._config = {'profile_prefix': self._name_prefix,
'output_dir': '.',
'verbose': True,
'iterations': 1,
'min_power': self._min_power, 'max_power': self._max_power,
'step_power': self._step_power}
self._tmp_files = []
# default mocked data for each column given power budget and agent
self._gen_val_governor = {
'count': (lambda node, region, pow: 1),
'energy_pkg': (lambda node, region, pow: (14000.0 + pow)),
'energy_dram': (lambda node, region, pow: 2000.0),
'frequency': (lambda node, region, pow: 1.0e9 + (self._max_power/float(pow))*1.0e9),
'network_time': (lambda node, region, pow: 10),
'runtime': (lambda node, region, pow: (500.0 * (1.0/pow))),
'id': (lambda node, region, pow: 'bad'),
'power': (lambda node, region, pow:
self._gen_val_governor['energy_pkg'](node, region, pow) /
self._gen_val_governor['runtime'](node, region, pow)),
}
self._gen_val_balancer = self._gen_val_governor.copy()
for metric in ['energy_pkg', 'runtime']:
self._gen_val_balancer[metric] = lambda node, region, power: (
self._gen_val_governor[metric](node, region, power) * 0.9)
self._gen_val_balancer['power'] = lambda node, region, power: (
self._gen_val_balancer['energy_pkg'](node, region, power) /
self._gen_val_balancer['runtime'](node, region, power) )
self._agent_params = {
'power_governor': (self._gen_val_governor, self._powers),
'power_balancer': (self._gen_val_balancer, self._powers) }
def tearDown(self):
for ff in self._tmp_files:
try:
os.remove(ff)
except OSError:
pass
def make_expected_summary_df(self, metric):
ref_val_cols = ['reference_mean', 'reference_max', 'reference_min']
tar_val_cols = ['target_mean', 'target_max', 'target_min']
delta_cols = ['reference_max_delta', 'reference_min_delta', 'target_max_delta', 'target_min_delta']
cols = ref_val_cols + tar_val_cols + delta_cols
expected_data = []
for pp in self._powers:
row = []
# Reference metrics
row += [self._gen_val_governor[metric](None, None, pp) for col in ref_val_cols]
# Target metrics
row += [self._gen_val_balancer[metric](None, None, pp) for col in tar_val_cols]
row += [0.0 for col in delta_cols]
expected_data.append(row)
index = pandas.Index(self._powers, name='name')
return pandas.DataFrame(expected_data, index=index, columns=cols)
def test_balancer_plot_process_runtime(self):
metric = 'runtime'
report_df = mock_report.make_mock_report_df(
self._name_prefix, self._node_names, self._agent_params)
mock_parse_data = MockAppOutput(report_df)
analysis = geopmpy.analysis.BalancerAnalysis(metric=metric, normalize=False, speedup=False,
**self._config)
result = analysis.plot_process(mock_parse_data)
expected_df = self.make_expected_summary_df(metric)
compare_dataframe(self, expected_df, result)
def test_balancer_plot_process_energy(self):
report_df = mock_report.make_mock_report_df(
self._name_prefix, self._node_names, self._agent_params)
mock_parse_data = MockAppOutput(report_df)
analysis = geopmpy.analysis.BalancerAnalysis(metric='energy', normalize=False, speedup=False,
**self._config)
result = analysis.plot_process(mock_parse_data)
expected_df = self.make_expected_summary_df('energy_pkg')
compare_dataframe(self, expected_df, result)
def test_balancer_plot_process_power(self):
metric = 'power'
report_df = mock_report.make_mock_report_df(
self._name_prefix, self._node_names, self._agent_params)
mock_parse_data = MockAppOutput(report_df)
analysis = geopmpy.analysis.BalancerAnalysis(metric=metric, normalize=False, speedup=False,
**self._config)
result = analysis.plot_process(mock_parse_data)
expected_df = self.make_expected_summary_df(metric)
compare_dataframe(self, expected_df, result)
if __name__ == '__main__':
unittest.main()
|
import socket
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket.bind((socket.gethostname(), 25000))
serversocket.listen()
print("Server is listening....")
connection, address = serversocket.accept()
print("Connection has been established")
msg = connection.recv(1024)
received=msg.decode()
number=int(received)
sum=0
for x in range(1,int(number/2)+1):
if(number%x==0):
sum+=x
if(number<sum):
message=received+" is an abundant number"
else:
message=received+" is not an abundant number"
connection.send(message.encode())
connection.close()
serversocket.close()
|
import datetime
import random
from datetime import datetime
from discord.ext import commands
class ProgrammCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.programm = ("Hier steht ihr programm",)
self.password = "5mal4plus5ergibt25"
@commands.command(name='new', help="Setzt neues Programm (passwort geschützt)")
async def sun_command(self, ctx, password, *programm ):
if password == self.password and password:
await ctx.send("programm ist gesetzt.")
self.programm = " ".join(programm).split(";")
else:
await ctx.send("password wrong")
@commands.command(name='programm', help="Zeigt programm")
async def get_programm_command(self, ctx ):
await ctx.send("\n".join(self.programm))
def setup(bot):
bot.add_cog(ProgrammCog(bot))
|
class Song():
def __init__(self, title, artist):
self.title=title
self.artist=artist
self.arr=set()
def how_many(self, arr):
length=len(self.arr)
for i in arr:
self.arr.add(i.lower())
return len(self.arr)-length
|
def solution(A):
hashMap = {}
# Loop for each element of the original array
for i in A:
# Check if hashMap has the element and invert the value of true, else add it with initial value of true
if i in hashMap:
hashMap[i] = not hashMap[i]
else:
hashMap[i] = True
hashMapKeys = list(hashMap.keys())
hashMapValues = list(hashMap.values())
# Loop for each element on the object keys array
for i in range(len(hashMapValues)):
# Check if the count is odd for the element and return its key
if hashMapValues[i] is True:
return int(hashMapKeys[i])
pass
|
'''OpenGL extension ARB.instanced_arrays
This module customises the behaviour of the
OpenGL.raw.GL.ARB.instanced_arrays to provide a more
Python-friendly API
Overview (from the spec)
A common use case in GL for some applications is to be able to
draw the same object, or groups of similar objects that share
vertex data, primitive count and type, multiple times. This
extension provides a means of accelerating such use cases while
restricting the number of API calls, and keeping the amount of
duplicate data to a minimum.
In particular, this extension specifies an alternative to the
read-only shader variable introduced by ARB_draw_instanced. It
uses the same draw calls introduced by that extension, but
redefines them so that a vertex shader can instead use vertex
array attributes as a source of instance data.
This extension introduces an array "divisor" for generic
vertex array attributes, which when non-zero specifies that the
attribute is "instanced." An instanced attribute does not
advance per-vertex as usual, but rather after every <divisor>
conceptual draw calls.
(Attributes which aren't instanced are repeated in their entirety
for every conceptual draw call.)
By specifying transform data in an instanced attribute or series
of instanced attributes, vertex shaders can, in concert with the
instancing draw calls, draw multiple instances of an object with
one draw call.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ARB/instanced_arrays.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.instanced_arrays import *
from OpenGL.raw.GL.ARB.instanced_arrays import _EXTENSION_NAME
def glInitInstancedArraysARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
|
# Standard library imports
import sys
import warnings
# Local imports
from uplink import decorators
from uplink.converters import keys, interfaces
__all__ = ["json", "from_json", "schema"]
class _ReturnsBase(decorators.MethodAnnotation):
def _get_return_type(self, return_type): # pragma: no cover
return return_type
def _make_strategy(self, converter): # pragma: no cover
pass
def modify_request(self, request_builder):
return_type = self._get_return_type(request_builder.return_type)
if isinstance(return_type, _StrategyWrapper):
converter = return_type.unwrap()
else:
converter = request_builder.get_converter(
keys.CONVERT_FROM_RESPONSE_BODY, return_type
)
if converter is not None:
# Found a converter that can handle the return type.
request_builder.return_type = _StrategyWrapper(
converter, self._make_strategy(converter)
)
class _StrategyWrapper(object):
def __init__(self, converter, strategy):
self._converter = converter
self._strategy = strategy
def __call__(self, *args, **kwargs):
return self._strategy(*args, **kwargs)
def unwrap(self): # pragma: no cover
return self._converter
class JsonStrategy(object):
# TODO: Consider moving this under json decorator
# TODO: Support JSON Pointer (https://tools.ietf.org/html/rfc6901)
def __init__(self, converter, key=()):
self._converter = converter
if not isinstance(key, (list, tuple)):
key = (key,)
self._key = key
def __call__(self, response):
content = response.json()
for name in self._key:
content = content[name]
content = self._converter(content)
return content
def unwrap(self):
return self._converter
# noinspection PyPep8Naming
class json(_ReturnsBase):
"""
Specifies that the decorated consumer method should return a JSON
object.
.. code-block:: python
# This method will return a JSON object (e.g., a dict or list)
@returns.json
@get("/users/{username}")
def get_user(self, username):
\"""Get a specific user.\"""
Returning a Specific JSON Field:
The :py:attr:`key` argument accepts a string or tuple that
specifies the path of an internal field in the JSON document.
For instance, consider an API that returns JSON responses that,
at the root of the document, contains both the server-retrieved
data and a list of relevant API errors:
.. code-block:: json
:emphasize-lines: 2
{
"data": { "user": "prkumar", "id": 140232 },
"errors": []
}
If returning the list of errors is unnecessary, we can use the
:py:attr:`key` argument to strictly return the inner field
:py:attr:`data`:
.. code-block:: python
@returns.json(key="data")
@get("/users/{username}")
def get_user(self, username):
\"""Get a specific user.\"""
.. versionadded:: v0.5.0
"""
_can_be_static = True
class _DummyConverter(interfaces.Converter):
def convert(self, response):
return response
__dummy_converter = _DummyConverter()
def __init__(self, type=None, key=(), model=None, member=()):
if model: # pragma: no cover
warnings.warn(
"The `model` argument of @returns.json is deprecated and will "
"be removed in v1.0.0. Use `type` instead.",
DeprecationWarning,
)
if member: # pragma: no cover
warnings.warn(
"The `member` argument of @returns.json is deprecated and will "
"be removed in v1.0.0. Use `key` instead.",
DeprecationWarning,
)
self._type = type or model
self._key = key or member
def _get_return_type(self, return_type):
# If self._type and return_type are None, the strategy should
# directly return the JSON body of the HTTP response, instead of
# trying to deserialize it into a certain type. In this case, by
# defaulting the return type to the dummy converter, which
# implements this pass-through behavior, we ensure that
# _make_strategy is called.
default = self.__dummy_converter if self._type is None else self._type
return default if return_type is None else return_type
def _make_strategy(self, converter):
return JsonStrategy(converter, self._key)
from_json = json
"""
Specifies that the decorated consumer method should produce
instances of a :py:obj:`type` class using a registered
deserialization strategy (see :py:meth:`uplink.loads.from_json`)
This decorator accepts the same arguments as
:py:class:`uplink.returns.json`.
Often, a JSON response body represents a schema in your application.
If an existing Python object encapsulates this schema, use the
:py:attr:`type` argument to specify it as the return type:
.. code-block:: python
@returns.from_json(type=User)
@get("/users/{username}")
def get_user(self, username):
\"""Get a specific user.\"""
For Python 3 users, you can alternatively provide a return value
annotation. Hence, the previous code is equivalent to the following
in Python 3:
.. code-block:: python
@returns.from_json
@get("/users/{username}")
def get_user(self, username) -> User:
\"""Get a specific user.\"""
Both usages typically require also registering a converter that
knows how to deserialize the JSON into the specified :py:attr:`type`
(see :py:meth:`uplink.loads.from_json`). This step is unnecessary if
the :py:attr:`type` is defined using a library for which Uplink has
built-in support, such as :py:mod:`marshmallow`.
.. versionadded:: v0.6.0
"""
# noinspection PyPep8Naming
class schema(_ReturnsBase):
"""
Specifies that the function returns a specific type of response.
In Python 3, to provide a consumer method's return type, you can
set it as the method's return annotation:
.. code-block:: python
@get("/users/{username}")
def get_user(self, username) -> UserSchema:
\"""Get a specific user.\"""
For Python 2.7 compatibility, you can use this decorator instead:
.. code-block:: python
@returns.schema(UserSchema)
@get("/users/{username}")
def get_user(self, username):
\"""Get a specific user.\"""
To have Uplink convert response bodies into the desired type, you
will need to define an appropriate converter (e.g., using
:py:class:`uplink.loads`).
.. versionadded:: v0.5.1
"""
def __init__(self, type):
self._schema = type
def _get_return_type(self, return_type):
return self._schema if return_type is None else return_type
def _make_strategy(self, converter):
return converter
class _ModuleProxy(object):
__module = sys.modules[__name__]
schema = model = schema
json = json
from_json = from_json
__all__ = __module.__all__
def __getattr__(self, item):
return getattr(self.__module, item)
def __call__(self, *args, **kwargs):
return schema(*args, **kwargs)
sys.modules[__name__] = _ModuleProxy()
|
from __future__ import print_function
from setuptools import setup, find_packages
packages = find_packages(include = 'fastml_engine.**', exclude=['fastml_engine.egg-info'])
packages.append('fastml_engine.docs')
print(packages)
setup(
name='fastml_engine',
version='1.0.6',
author="HaiTao Hou",
author_email="hou610433155@163.com",
description='A web server for deploy ml/dl model',
long_description=open("README.rst",encoding='utf-8').read(),
long_description_content_type="text/x-rst",
license='MIT',
packages=packages,
package_data={'fastml_engine.docs': ['*.yaml'], },
install_requires=[
'Flask',
'flasgger',
'numpy',
'six',
'gevent',
'Werkzeug',
'concurrent_log_handler==0.9.16',
'portalocker==1.7.0',
"click>=7.0"
"gunicorn; platform_system != 'Windows'",
"waitress; platform_system == 'Windows'",
],
python_requires='>=3.6',
entry_points="""
[console_scripts]
fastml=fastml_engine.cli:cli
""",
keyword="ml ai model inference",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
project_urls={
"Source Code": "https://github.com/fast-mlops/fastml-engine",
}
)
|
class Rect:
def __init__(self, x, y, w, h):
self.x1 = x
self.y1 = y
self.x2 = x + w
self.y2 = y + h
def center(self):
center_x = int((self.x1 + self.x2) / 2)
center_y = int((self.y1 + self.y2) / 2)
return (center_x, center_y)
def intersect(self, other):
# returns true if this rectangle intersects with another one
return (
self.x1 <= other.x2
and self.x2 >= other.x1
and self.y1 <= other.y2
and self.y2 >= other.y1
)
|
from cs251tk.common import chdir
from cs251tk.common import run
def reset(student):
with chdir(student):
run(['git', 'checkout', 'master', '--quiet', '--force'])
|
import spacy
from spacy.language import Language
from spacy.lang.tokenizer_exceptions import URL_MATCH
#from thinc.api import Config
from .stop_words import STOP_WORDS
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
from .punctuation import TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES, TOKENIZER_INFIXES
from .lex_attrs import LEX_ATTRS
from .tag_map import TAG_MAP
from .syntax_iterators import SYNTAX_ITERATORS
from spacy.tokens import Doc
from typing import Optional
from thinc.api import Model
import srsly
from .lemmatizer import Dostoevsky_russianLemmatizer
# https://nightly.spacy.io/api/language#defaults
class Dostoevsky_russianDefaults(Language.Defaults):
stop_words = STOP_WORDS
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
prefixes = TOKENIZER_PREFIXES
suffixes = TOKENIZER_SUFFIXES
infixes = TOKENIZER_INFIXES
token_match = None
url_match = URL_MATCH
tag_map = TAG_MAP
writing_system = {"direction": "ltr", "has_case": True, "has_letters": True}
@spacy.registry.languages("rus") #https://nightly.spacy.io/api/top-level#registry
class Dostoevsky_russian(Language):
lang = "rus"
Defaults = Dostoevsky_russianDefaults
#custom on init
@Dostoevsky_russian.factory(
"lemmatizer",
assigns=["token.lemma"],
default_config={"model": None, "mode": "lookup", "overwrite": False},
default_score_weights={"lemma_acc": 1.0},
)
def make_lemmatizer(
nlp: Language, model: Optional[Model], name: str, mode: str, overwrite: bool
):
return Dostoevsky_russianLemmatizer(nlp.vocab, model, name, mode=mode, overwrite=overwrite)
#Add locations of lookups data to the registry
@spacy.registry.lookups("rus")
def do_registration():
from pathlib import Path
cadet_path = Path.cwd()
lookups_path = cadet_path / "new_lang" / "dostoevsky_russian" / "lookups"
result = {}
for lookup in lookups_path.iterdir():
key = lookup.stem[lookup.stem.find('_') + 1:]
result[key] = str(lookup)
return result
__all__ = ["Dostoevsky_russian"]
|
import unittest
import trafaret as t
import trafaret_schema
class TestConstEnumType(unittest.TestCase):
def test_const(self):
check = trafaret_schema.json_schema({
'const': 'blabla',
})
self.assertEqual(check('blabla'), 'blabla')
check = trafaret_schema.json_schema({
'const': 100,
})
self.assertEqual(check(100), 100)
def test_enum(self):
check = trafaret_schema.json_schema({
'enum': ['blabla', 200],
})
self.assertEqual(check('blabla'), 'blabla')
self.assertEqual(check(200), 200)
with self.assertRaises(t.DataError):
check(300)
def test_type(self):
check = trafaret_schema.json_schema({'type': 'null'})
self.assertEqual(check(None), None)
check = trafaret_schema.json_schema({'type': 'boolean'})
self.assertEqual(check(True), True)
check = trafaret_schema.json_schema({'type': 'object'})
self.assertEqual(check({}), {})
check = trafaret_schema.json_schema({'type': 'array'})
self.assertEqual(check([]), [])
check = trafaret_schema.json_schema({'type': 'number'})
self.assertEqual(check(2.4), 2.4)
check = trafaret_schema.json_schema({'type': 'integer'})
self.assertEqual(check(200), 200)
with self.assertRaises(t.DataError):
check('blabla')
check = trafaret_schema.json_schema({'type': 'string'})
self.assertEqual(check('a'), 'a')
class TestPredicates(unittest.TestCase):
def test_all_of(self):
check = trafaret_schema.json_schema({
'allOf': [
{'minLength': 5},
{'maxLength': 10},
],
})
self.assertEqual(check('blabla'), 'blabla')
with self.assertRaises(t.DataError):
check('bla')
def test_any_of(self):
check = trafaret_schema.json_schema({
'anyOf': [
{'minLength': 5},
{'maxLength': 3},
],
})
self.assertEqual(check('blabla'), 'blabla')
self.assertEqual(check('bla'), 'bla')
with self.assertRaises(t.DataError):
check('blab')
def test_not(self):
check = trafaret_schema.json_schema({
'not': {'minLength': 5},
})
self.assertEqual(check('bla'), 'bla')
with self.assertRaises(t.DataError):
check('blabla')
class TestStringValidation(unittest.TestCase):
def test_string(self):
check = trafaret_schema.json_schema({
'type': 'string',
})
self.assertEqual(check('blabla'), 'blabla')
def test_min_length(self):
check = trafaret_schema.json_schema({
'type': 'string',
'minLength': 60,
})
with self.assertRaises(t.DataError):
check('blabla')
def test_max_length(self):
check = trafaret_schema.json_schema({
'type': 'string',
'maxLength': 5,
})
with self.assertRaises(t.DataError):
check('blabla')
def test_pattern(self):
check = trafaret_schema.json_schema({
'type': 'string',
'pattern': 'bla+',
'maxLength': 10,
'minLength': 5,
})
self.assertEqual(check('blablabla'), 'blablabla')
class TestNumberValidation(unittest.TestCase):
def test_number(self):
check = trafaret_schema.json_schema({
'type': 'integer',
})
self.assertEqual(check(100), 100)
with self.assertRaises(t.DataError):
check(100.4)
def test_minimum(self):
check = trafaret_schema.json_schema({
'type': 'number',
'minimum': 5,
})
with self.assertRaises(t.DataError):
check(1)
check = trafaret_schema.json_schema({
'type': 'number',
'exclusiveMinimum': 5,
})
with self.assertRaises(t.DataError):
check(5)
def test_maximum(self):
check = trafaret_schema.json_schema({
'type': 'number',
'maximum': 5,
})
with self.assertRaises(t.DataError):
check(10)
check = trafaret_schema.json_schema({
'type': 'number',
'exclusiveMaximum': 5,
})
with self.assertRaises(t.DataError):
check(5)
def test_multiple_of(self):
check = trafaret_schema.json_schema({
'type': 'number',
'multipleOf': 5,
})
self.assertEqual(check(10), 10)
with self.assertRaises(t.DataError):
check(11)
class TestArrays(unittest.TestCase):
def test_min_items(self):
check = trafaret_schema.json_schema({
'type': 'array',
'minItems': 5,
})
with self.assertRaises(t.DataError):
check([1,2,3,4])
self.assertEqual(check([1,2,3,4,5]), [1,2,3,4,5])
def test_max_items(self):
check = trafaret_schema.json_schema({
'type': 'array',
'maxItems': 5,
})
with self.assertRaises(t.DataError):
check([1,2,3,4,5,6])
self.assertEqual(check([1,2,3,4,5]), [1,2,3,4,5])
def test_uniq(self):
check = trafaret_schema.json_schema({
'type': 'array',
'uniqueItems': True,
})
with self.assertRaises(t.DataError):
check([1,2,3,4,5,5])
self.assertEqual(check([1,2,3,4,5]), [1,2,3,4,5])
def test_contains(self):
check = trafaret_schema.json_schema({
'type': 'array',
'contains': {'type': 'number'},
})
with self.assertRaises(t.DataError):
check(['a','b','c'])
self.assertEqual(check(['a','b','c',5]), ['a','b','c',5])
def test_simple_items(self):
check = trafaret_schema.json_schema({
'type': 'array',
'items': {'type': 'number'},
})
with self.assertRaises(t.DataError):
check([1,2,'a',4,5,5])
self.assertEqual(check([1,2,3,4,5]), [1,2,3,4,5])
def test_positional_items(self):
check = trafaret_schema.json_schema({
'type': 'array',
'items': [{'type': 'number'}, {'type': 'string'}],
})
with self.assertRaises(t.DataError):
# bad 2nd position
check([1,None])
with self.assertRaises(t.DataError):
# too long array
check([1,'a',4,5,5])
self.assertEqual(check([1,'a']), [1,'a'])
def test_additional_items(self):
check = trafaret_schema.json_schema({
'type': 'array',
'items': [{'type': 'number'}, {'type': 'string'}],
'additionalItems': {'type': 'number'},
})
with self.assertRaises(t.DataError):
check([1,None,4,5,5])
with self.assertRaises(t.DataError):
check([1,'a','a',5,5])
self.assertEqual(check([1,'a',5,5,5]), [1,'a',5,5,5])
class TestObjects(unittest.TestCase):
def test_max_props(self):
check = trafaret_schema.json_schema({
'type': 'object',
'maxProperties': 1,
})
with self.assertRaises(t.DataError):
check({'a': 1, 'b': 2})
self.assertEqual(check({'a': 1}), {'a': 1})
def test_min_props(self):
check = trafaret_schema.json_schema({
'type': 'object',
'minProperties': 2,
})
with self.assertRaises(t.DataError):
check({'a': 1})
self.assertEqual(check({'a': 1, 'b': 2}), {'a': 1, 'b': 2})
def test_required(self):
check = trafaret_schema.json_schema({
'type': 'object',
'required': ['a', 'b'],
})
with self.assertRaises(t.DataError):
check({'a': 1})
self.assertEqual(check({'a': 1, 'b': 2}), {'a': 1, 'b': 2})
def test_properties(self):
check = trafaret_schema.json_schema({
'type': 'object',
'properties': {'a': {'type': 'number'}},
})
with self.assertRaises(t.DataError):
check({'a': 'b'})
self.assertEqual(check({'a': 1, 'b': 2}), {'a': 1, 'b': 2})
def test_pattern_properties(self):
check = trafaret_schema.json_schema({
'type': 'object',
'patternProperties': {'a+': {'type': 'number'}},
})
with self.assertRaises(t.DataError):
check({'a': 'b'})
with self.assertRaises(t.DataError):
check({'a': 3, 'aaa': 'b'})
self.assertEqual(check({'a': 1, 'aaa': 3}), {'a': 1, 'aaa': 3})
def test_additional_properties(self):
check = trafaret_schema.json_schema({
'type': 'object',
'properties': {'a': {'type': 'number'}},
'additionalProperties': {'type': 'boolean'},
})
with self.assertRaises(t.DataError):
check({'a':1, 'b': 2})
self.assertEqual(check({'a': 1, 'b': True}), {'a': 1, 'b': True})
def test_property_names(self):
check = trafaret_schema.json_schema({
'type': 'object',
'propertyNames': {
'type': 'string',
'pattern': 'bla+',
},
})
with self.assertRaises(t.DataError):
check({'a': 'b'})
self.assertEqual(check({'bla': 1, 'blabla': 3}), {'bla': 1, 'blabla': 3})
def test_dependencies(self):
check = trafaret_schema.json_schema({
'type': 'object',
'properties': {'a': {'type': 'number'}},
'dependencies': {'a': ['b', 'c']},
})
self.assertEqual(check({'bla': 1, 'blabla': 3}), {'bla': 1, 'blabla': 3})
with self.assertRaises(t.DataError):
check({'a': 'b'})
self.assertEqual(check({'a': 1, 'b': 3, 'c': 4}), {'a': 1, 'b': 3, 'c': 4})
class TestReferences(unittest.TestCase):
def test_local_reference(self):
check = trafaret_schema.json_schema({
'type': 'object',
"properties": {
"billing_address": { "$ref": "#/definitions/address" },
"shipping_address": { "$ref": "#/definitions/address" },
},
"definitions": {
"address": {
"type": "object",
"properties": {
"street_address": { "type": "string" },
"city": { "type": "string" },
"state": { "type": "string" },
},
"required": ["city"],
},
},
})
data = {
'billing_address': {'city': 'Samara'},
'shipping_address': {'city': 'Samara'},
}
assert check(data) == data
def test_adjacent_reference(self):
register = trafaret_schema.Register()
addresses = trafaret_schema.json_schema({
"$id": "http://yuhuhu.com/address",
"type": "object",
"properties": {
"billing_address": { "$ref": "#/definitions/address" },
"shipping_address": { "$ref": "#/definitions/address" },
},
"definitions": {
"address": {
"type": "object",
"properties": {
"street_address": { "type": "string" },
"city": { "type": "string" },
"state": { "type": "string" },
},
"required": ["city"],
},
},
},
context=register,
)
person = trafaret_schema.json_schema({
"type": "object",
"properties": {
"name": {"type": "string"},
"address": {"$ref": "http://yuhuhu.com/address#/definitions/address"},
},
},
context=register,
)
data = {
'name': 'Peotr',
'address': {'city': 'Moscow'},
}
assert person.check(data) == data
register.validate_references()
|
# Knapsack algorithm
# prints out the value of the optimal solution
a = []
with open('knapsack1.txt', 'r') as doc:
for line in doc:
a.append(list(map(int, line.split())))
kn_size = a[0][0] + 1
n = a[0][1] + 1
del a[0]
A = [[]]*n
A[0] = [0]*kn_size
for i in range(1, n):
A[i] = [0]*kn_size
for x in range(kn_size):
if a[i-1][1] <= x:
A[i][x] = max(A[i-1][x], A[i-1][x-a[i-1][1]]+a[i-1][0])
else:
A[i][x] = A[i-1][x]
print(A[-1][-1])
|
import pickle
from nose.tools import eq_
from ..row_type import RowGenerator
def test_row_type():
MyRow = RowGenerator(["foo", "bar", "baz"])
r = MyRow("15\t16\tNULL")
eq_(r.foo, "15")
eq_(r['foo'], "15")
eq_(r[0], "15")
eq_(r.bar, "16")
eq_(r['bar'], "16")
eq_(r[1], "16")
eq_(r.baz, None)
eq_(r['baz'], None)
eq_(r[2], None)
MyRow = RowGenerator(["foo", "bar", "baz"], types=[int, int, int])
r = MyRow("15\t16\tNULL")
eq_(r.foo, 15)
eq_(r['foo'], 15)
eq_(r[0], 15)
eq_(r.bar, 16)
eq_(r['bar'], 16)
eq_(r[1], 16)
eq_(r.baz, None)
eq_(r['baz'], None)
eq_(r[2], None)
eq_(pickle.loads(pickle.dumps(r)).baz, None)
eq_(pickle.loads(pickle.dumps(r))['baz'], None)
eq_(pickle.loads(pickle.dumps(r))[2], None)
|
# Copyright The IETF Trust 2007, All Rights Reserved
import os
from django.shortcuts import get_object_or_404, render
import debug # pyflakes:ignore
from ietf.doc.models import State, StateType
from ietf.name.models import StreamName
def state_index(request):
types = StateType.objects.all()
names = [ type.slug for type in types ]
for type in types:
if "-" in type.slug and type.slug.split('-',1)[0] in names:
type.stategroups = None
else:
groups = StateType.objects.filter(slug__startswith=type.slug)
type.stategroups = [ g.slug[len(type.slug)+1:] for g in groups if not g == type ] or ""
return render(request, 'help/state_index.html', {"types": types})
def state(request, doc, type=None):
if type:
streams = [ s.slug for s in StreamName.objects.all() ]
if type in streams:
type = "stream-%s" % type
slug = "%s-%s" % (doc,type) if type else doc
statetype = get_object_or_404(StateType, slug=slug)
states = State.objects.filter(used=True, type=statetype).order_by('order')
return render(request, 'help/states.html', {"doc": doc, "type": statetype, "states":states} )
def environment(request):
if request.is_secure():
os.environ['SCHEME'] = "https"
else:
os.environ['SCHEME'] = "http"
os.environ["URL"] = request.build_absolute_uri(".")
return render(request, 'help/environment.html', {"env": os.environ} )
|
# global
import abc
from typing import Optional, Union
# local
import ivy
class ArrayWithLosses(abc.ABC):
def cross_entropy(
self: ivy.Array,
pred: Union[ivy.Array, ivy.NativeArray],
axis: Optional[int] = -1,
epsilon: Optional[float] = 1e-7,
*,
out: Optional[ivy.Array] = None
) -> ivy.Array:
return ivy.cross_entropy(self._data, pred, axis=axis, epsilon=epsilon, out=out)
def binary_cross_entropy(
self: ivy.Array,
pred: Union[ivy.Array, ivy.NativeArray],
epsilon: Optional[float] = 1e-7,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
return ivy.binary_cross_entropy(self._data, pred, epsilon=epsilon, out=out)
def sparse_cross_entropy(
self: ivy.Array,
pred: Union[ivy.Array, ivy.NativeArray],
axis: Optional[int] = -1,
epsilon: Optional[float] = 1e-7,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
return ivy.sparse_cross_entropy(
self._data, pred, axis=axis, epsilon=epsilon, out=out
)
|
from dmae import dissimilarities, layers, losses, metrics, initializers, normalizers
__all__ = [
"dissimilarities", "layers",
"losses", "metrics",
"initializers", "normalizers"
]
|
"""
Inspired by https://github.com/tkarras/progressive_growing_of_gans/blob/master/tfutil.py
"""
import tensorflow as tf
import numpy as np
seed = 1337
np.random.seed(seed)
tf.set_random_seed(seed)
# ---------------------------------------------------------------------------------------------
# For convenience :)
def run(*args, **kwargs):
return tf.get_default_session().run(*args, **kwargs)
def is_tf_expression(x):
return isinstance(x, tf.Tensor) or isinstance(x, tf.Variable) or isinstance(x, tf.Operation)
def safe_log(x, eps=1e-12):
with tf.name_scope("safe_log"):
return tf.log(x + eps)
def safe_log2(x, eps=1e-12):
with tf.name_scope("safe_log2"):
return tf.log(x + eps) * np.float32(1. / np.log(2.))
def lerp(a, b, t):
with tf.name_scope("lerp"):
return a + (b - a) * t
def lerp_clip(a, b, t):
with tf.name_scope("lerp_clip"):
return a + (b - a) * tf.clip_by_value(t, 0., 1.)
def gaussian_noise(x, std=5e-2):
noise = tf.random_normal(x.get_shape(), mean=0., stddev=std, dtype=tf.float32)
return x + noise
# ---------------------------------------------------------------------------------------------
# Image Sampling with TF
def down_sampling(img, interp='tf.image.ResizeMethod.BILINEAR'):
shape = img.get_shape() # [batch, height, width, channels]
h2 = int(shape[1] // 2)
w2 = int(shape[2] // 2)
return tf.image.resize_images(img, [h2, w2], interp)
def up_sampling(img, interp='tf.image.ResizeMethod.BILINEAR'):
shape = img.get_shape() # [batch, height, width, channels]
h2 = int(shape[1] * 2)
w2 = int(shape[2] * 2)
return tf.image.resize_images(img, [h2, w2], interp)
def resize_nn(x, size):
return tf.image.resize_nearest_neighbor(x, size=(int(size), int(size)))
# ---------------------------------------------------------------------------------------------
# Optimizer
class Optimizer(object):
def __init__(self,
name='train',
optimizer='tf.train.AdamOptimizer',
learning_rate=1e-3,
use_loss_scaling=False,
loss_scaling_init=64.,
loss_scaling_inc=5e-4,
loss_scaling_dec=1.,
use_grad_scaling=False,
grad_scaling=7.,
**kwargs):
self.name = name
self.optimizer = optimizer
self.learning_rate = learning_rate
self.use_loss_scaling = use_loss_scaling
self.loss_scaling_init = loss_scaling_init
self.loss_scaling_inc = loss_scaling_inc
self.loss_scaling_dec = loss_scaling_dec
self.use_grad_scaling = use_grad_scaling
self.grad_scaling = grad_scaling
# ---------------------------------------------------------------------------------------------
# Network
class Network:
def __init__(self):
pass
# ---------------------------------------------------------------------------------------------
# Functions
w_init = tf.contrib.layers.variance_scaling_initializer(factor=1., mode='FAN_AVG', uniform=True)
b_init = tf.zeros_initializer()
reg = 5e-4
w_reg = tf.contrib.layers.l2_regularizer(reg)
eps = 1e-5
# Layers
def conv2d(x, f=64, k=3, s=1, pad='SAME', reuse=None, name='conv2d'):
"""
:param x: input
:param f: filters
:param k: kernel size
:param s: strides
:param pad: padding
:param reuse: reusable
:param name: scope name
:return: net
"""
return tf.layers.conv2d(inputs=x,
filters=f, kernel_size=k, strides=s,
kernel_initializer=w_init,
kernel_regularizer=w_reg,
bias_initializer=b_init,
padding=pad,
reuse=reuse,
name=name)
def sub_pixel_conv2d(x, f, s=2):
"""
# ref : https://github.com/tensorlayer/SRGAN/blob/master/tensorlayer/layers.py
"""
if f is None:
f = int(int(x.get_shape()[-1]) / (s ** 2))
bsize, a, b, c = x.get_shape().as_list()
bsize = tf.shape(x)[0]
x_s = tf.split(x, s, 3)
x_r = tf.concat(x_s, 2)
return tf.reshape(x_r, (bsize, s * a, s * b, f))
def deconv2d(x, f=64, k=3, s=1, pad='SAME', reuse=None, name='deconv2d'):
"""
:param x: input
:param f: filters
:param k: kernel size
:param s: strides
:param pad: padding
:param reuse: reusable
:param name: scope name
:return: net
"""
return tf.layers.conv2d_transpose(inputs=x,
filters=f, kernel_size=k, strides=s,
kernel_initializer=w_init,
kernel_regularizer=w_reg,
bias_initializer=b_init,
padding=pad,
reuse=reuse,
name=name)
def dense(x, f=1024, reuse=None, name='fc'):
"""
:param x: input
:param f: fully connected units
:param reuse: reusable
:param name: scope name
:return: net
"""
return tf.layers.dense(inputs=x,
units=f,
kernel_initializer=w_init,
kernel_regularizer=w_reg,
bias_initializer=b_init,
reuse=reuse,
name=name)
# Normalize
def batch_norm(x, momentum=0.9, scaling=True, is_train=True, reuse=None, name="bn"):
return tf.layers.batch_normalization(inputs=x,
momentum=momentum,
epsilon=eps,
scale=scaling,
training=is_train,
reuse=reuse,
name=name)
def instance_norm(x, affine=True, reuse=None, name=""):
with tf.variable_scope('instance_normalize-%s' % name, reuse=reuse):
mean, variance = tf.nn.moments(x, [1, 2], keep_dims=True)
normalized = tf.div(x - mean, tf.sqrt(variance + eps))
if not affine:
return normalized
else:
depth = x.get_shape()[3] # input channel
scale = tf.get_variable('scale', [depth],
initializer=tf.random_normal_initializer(mean=1., stddev=.02, dtype=tf.float32))
offset = tf.get_variable('offset', [depth],
initializer=tf.zeros_initializer())
return scale * normalized + offset
def pixel_norm(x):
return x / tf.sqrt(tf.reduce_mean(tf.square(x), axis=[1, 2, 3]) + eps)
# Activations
def prelu(x, stddev=1e-2, reuse=False, name='prelu'):
with tf.variable_scope(name):
if reuse:
tf.get_variable_scope().reuse_variables()
_alpha = tf.get_variable('_alpha',
shape=x.get_shape(),
initializer=tf.constant_initializer(stddev),
dtype=x.dtype)
return tf.maximum(_alpha * x, x)
# Losses
def l1_loss(x, y):
return tf.reduce_mean((tf.abs(x - y))
def mse_loss(x, y): # l2_loss
return tf.reduce_mean(tf.squared_difference(x=x, y=y))
def rmse_loss(x, y):
return tf.sqrt(mse_loss(x, y))
def psnr_loss(x, y):
return 20. * tf.log(tf.reduce_max(x) / mse_loss(x, y))
def sce_loss(data, label):
return tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=data, labels=label))
def softce_loss(data, label):
return tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=data, labels=label))
def pullaway_loss(x):
n = x.get_shape()[0]
# PullAway Loss # 2.4 Repelling Regularizer in 1609.03126.pdf
normalized = x / tf.sqrt(tf.reduce_sum(tf.square(x), 1, keep_dims=True))
similarity = tf.matmul(normalized, normalized, transpose_b=True)
return (tf.reduce_sum(similarity) - n) / (n * (n - 1))
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 18 20:55:41 2017
@author: virilo
"""
import numpy as np
COLOR_DEPTH=255.0
z=[]
eps = - 2.0 * 16.0 / COLOR_DEPTH
for n in range(256):
normalized=(np.float32(n)/ COLOR_DEPTH) * 2.0 - 1.0
# we add eps
normalized=normalized + eps
# de_normalized=(((np.float32(normalized) + 1.0) * 0.5) * COLOR_DEPTH).astype(np.int16)
de_normalized=np.round(COLOR_DEPTH * (np.float32(normalized) + 1.0) * 0.5).astype(np.int16)
print (n, " -> ", normalized, " -> ",np.int16(de_normalized))
z+=[de_normalized]
for i in range(256):
if i-16 not in z:
print("ERROR: ", i-16)
|
class Direction:
LEFT = 0
RIGHT = 1
UP = 2
DOWN = 3
|
from django.contrib.auth.decorators import permission_required
from django.core.urlresolvers import reverse, reverse_lazy
from django.http import JsonResponse, HttpResponseRedirect
from django.utils.decorators import method_decorator
from django.views.generic import View, DetailView
from django.views.generic.detail import SingleObjectMixin
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.views.generic.list import ListView
from pinax.announcements import signals
from pinax.announcements.forms import AnnouncementForm
from pinax.announcements.models import Announcement
class AnnouncementDetailView(DetailView):
template_name = "pinax/announcements/announcement_detail.html"
model = Announcement
context_object_name = 'announcement'
class AnnouncementDismissView(SingleObjectMixin, View):
model = Announcement
def post(self, request, *args, **kwargs):
self.object = self.get_object()
if self.object.dismissal_type == Announcement.DISMISSAL_SESSION:
# get list from session and type it to set()
excluded = set(request.session.get("excluded_announcements", []))
excluded.add(self.object.pk)
# force to list to avoid TypeError on set() json serialization
request.session["excluded_announcements"] = list(excluded)
status = 200
elif self.object.dismissal_type == Announcement.DISMISSAL_PERMANENT and \
request.user.is_authenticated():
self.object.dismissals.create(user=request.user)
status = 200
else:
status = 409
if request.is_ajax():
return JsonResponse({}, status=status)
return HttpResponseRedirect(request.META.get("HTTP_REFERER", "/"))
class ProtectedView(View):
@method_decorator(permission_required("announcements.can_manage"))
def dispatch(self, *args, **kwargs):
return super(ProtectedView, self).dispatch(*args, **kwargs)
class AnnouncementCreateView(ProtectedView, CreateView):
template_name = "pinax/announcements/announcement_form.html"
model = Announcement
form_class = AnnouncementForm
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.creator = self.request.user
self.object.save()
signals.announcement_created.send(
sender=self.object,
announcement=self.object,
request=self.request
)
return super(AnnouncementCreateView, self).form_valid(form)
def get_success_url(self):
return reverse("pinax_announcements:announcement_list")
class AnnouncementUpdateView(ProtectedView, UpdateView):
template_name = "pinax/announcements/announcement_form.html"
model = Announcement
form_class = AnnouncementForm
def form_valid(self, form):
response = super(AnnouncementUpdateView, self).form_valid(form)
signals.announcement_updated.send(
sender=self.object,
announcement=self.object,
request=self.request
)
return response
def get_success_url(self):
return reverse("pinax_announcements:announcement_list")
class AnnouncementDeleteView(ProtectedView, DeleteView):
template_name = "pinax/announcements/announcement_confirm_delete.html"
model = Announcement
success_url = reverse_lazy("pinax_announcements:announcement_list")
def delete(self, request, *args, **kwargs):
response = super(AnnouncementDeleteView, self).delete(request, *args, **kwargs)
# hookset.announcement_deleted_message(self.request, self.object)
signals.announcement_deleted.send(
sender=None,
announcement=self.object,
request=self.request
)
return response
class AnnouncementListView(ProtectedView, ListView):
template_name = "pinax/announcements/announcement_list.html"
model = Announcement
queryset = Announcement.objects.all().order_by("-creation_date")
paginate_by = 50
|
from django.db import models
from django.conf import settings
from django.contrib.auth.models import (AbstractBaseUser,
PermissionsMixin, BaseUserManager
)
class UserProfilesManager(BaseUserManager):
def create_user(self, email,first_name,last_name, password=None):
if not email:
raise ValueError('All Users should provide email')
email = self.normalize_email(email)
user = self.model(
email=email,
first_name=first_name,
last_name=last_name
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password, **extra_fields):
user = self.create_user(email=email,password=password,**extra_fields)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class UserProfile(AbstractBaseUser,PermissionsMixin):
first_name = models.CharField(max_length=255, blank=True)
last_name = models.CharField(max_length=255, blank=True)
email = models.EmailField(max_length=255, unique=True)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserProfilesManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['first_name', 'last_name']
def get_full_name(self):
return self.first_name + '\t' + self.last_name
def get_short_name(self):
return self.first_name
def __str__(self):
return self.email
class Book(models.Model):
PROGRAMMING = 'prog'
MATHEMATICS = 'math'
FICTION = 'fic'
COMIC = 'com'
MYTHOLOGY = 'myth'
CATEGORY_CHOICES = [
(PROGRAMMING, 'Programming'),
(MATHEMATICS, 'Mathematics'),
(FICTION, 'Fiction'),
(COMIC, 'Comic Book'),
(MYTHOLOGY, 'Mythology'),
]
owner = models.ForeignKey(settings.AUTH_USER_MODEL,related_name='my_books',on_delete=models.CASCADE)
title = models.CharField(max_length=255)
sub_title = models.CharField(max_length=255,blank=True)
category = models.CharField(
max_length=20,
choices=CATEGORY_CHOICES
)
pages = models.PositiveIntegerField()
def __str__(self):
return self.title
class Author(models.Model):
book_id = models.ForeignKey(Book,on_delete=models.CASCADE,related_name='books')
first_name = models.CharField(max_length=80)
last_name = models.CharField(max_length=80)
initials = models.CharField(max_length=5, blank=True)
email = models.EmailField()
def __str__(self):
return self.email
|
__author__ = "Max Dippel, Michael Burkart and Matthias Urban"
__version__ = "0.0.1"
__license__ = "BSD"
import numpy as np
class LossWeightStrategyWeighted():
def __call__(self, pipeline_config, X, Y):
counts = np.sum(Y, axis=0)
total_weight = Y.shape[0]
if len(Y.shape) > 1:
weight_per_class = total_weight / Y.shape[1]
weights = (np.ones(Y.shape[1]) * weight_per_class) / np.maximum(counts, 1)
else:
classes, counts = np.unique(Y, axis=0, return_counts=True)
classes, counts = classes[::-1], counts[::-1]
weight_per_class = total_weight / classes.shape[0]
weights = (np.ones(classes.shape[0]) * weight_per_class) / counts
return weights
class LossWeightStrategyWeightedBinary():
def __call__(self, pipeline_config, X, Y):
counts_one = np.sum(Y, axis=0)
counts_zero = counts_one + (-Y.shape[0])
weights = counts_zero / np.maximum(counts_one, 1)
return weights
|
#! /usr/bin/python
from setuptools import setup
import os.path
setup(name='passencode',
version='2.0.0',
description='passencode',
author='Nicolas Vanhoren',
author_email='nicolas.vanhoren@unknown.com',
url='https://github.com/nicolas-van/passencodeu',
py_modules = [],
packages=[],
scripts=["passencode"],
long_description="Simple program to generate a password hash using passlib",
keywords="",
license="MIT",
classifiers=[
],
install_requires=[
"passlib",
"click >= 3.3",
],
)
|
#!/usr/bin/python
import sys
import json
import os
from collections import OrderedDict
poolInitialSize = sys.argv[1]
poolMaxSize=sys.argv[2]
appSettingsFilePath = "/usr/lib64/microsoft-r/rserver/o16n/9.1.0/Microsoft.RServer.ComputeNode/appsettings.json"
f = open(appSettingsFilePath, "r")
jsondata = f.read().decode("utf-8-sig").encode("utf-8").replace("\r\n","")
data = json.loads(jsondata, object_pairs_hook=OrderedDict)
data["Pool"]["InitialSize"] = int(poolInitialSize)
data["Pool"]["MaxSize"] = int(poolMaxSize)
f = open(appSettingsFilePath, "w")
json.dump(data, f, indent=4, sort_keys=False)
f.close()
os.system("/usr/local/bin/dotnet /usr/lib64/microsoft-r/rserver/o16n/9.1.0/Microsoft.RServer.Utils.AdminUtil/Microsoft.RServer.Utils.AdminUtil.dll -silentcomputenodeinstall")
os.system("iptables --flush")
|
"""Unittests for crypto challenges set 1."""
import unittest
import encode_decode as set_1
class TestChallenges(unittest.TestCase):
"""Test if the challenges were correctly implemented."""
def test_decode_hex(self):
"""Test decoding of hex string."""
self.assertEqual(255, list(set_1.decode_hex('ff'))[0])
self.assertEqual(171, list(set_1.decode_hex('ab'))[0])
self.assertEqual(5, list(set_1.decode_hex('5'))[0])
self.assertEqual(15, list(set_1.decode_hex('f'))[0])
self.assertEqual([73, 39], list(set_1.decode_hex('4927')))
self.assertEqual([15, 59, 172], list(set_1.decode_hex('0F3BAC')))
self.assertEqual([202, 254, 186, 190],
list(set_1.decode_hex('CAFEBABE')))
def test_bytes_to_int(self):
"""Test bytes -> int."""
self.assertEqual(0, set_1.bytes_to_int(b'\x00'))
self.assertEqual(10, set_1.bytes_to_int(b'\x0a'))
self.assertEqual(255, set_1.bytes_to_int(b'\xff'))
self.assertEqual(256, set_1.bytes_to_int(b'\x01\x00'))
self.assertEqual(265, set_1.bytes_to_int(b'\x01\x09'))
self.assertEqual(1376010, set_1.bytes_to_int(b'\x14\xff\x0a'))
self.assertEqual(2335, set_1.bytes_to_int(b'\x00\x09\x1f'))
def test_base64_map(self):
"""Test mapping of values to base64."""
self.assertEqual('A', set_1.base64_map(0))
self.assertEqual('J', set_1.base64_map(9))
self.assertEqual('f', set_1.base64_map(31))
self.assertEqual('3', set_1.base64_map(55))
self.assertEqual('+', set_1.base64_map(62))
self.assertEqual('/', set_1.base64_map(63))
def test_int_to_base64(self):
"""Test mapping of values from int to base64."""
self.assertEqual('AAAA', set_1.int_to_base64(0))
self.assertEqual('AAAJ', set_1.int_to_base64(9))
self.assertEqual('AAKf', set_1.int_to_base64(64 * 10 + 31))
self.assertEqual('A/b/', set_1.int_to_base64((64 * 63 + 27) * 64 + 63))
def test_encode_base64(self):
"""Test encoding of byte array to base64."""
self.assertEqual('AJc=', set_1.encode_base64(b'\x00\x97'))
def test_challenge_1(self):
"""Test `hex2base64()`."""
self.assertEqual('SSc=', set_1.hex2base64('4927'))
self.assertEqual('Dzus', set_1.hex2base64('0F3BAC'))
self.assertEqual('DzusE0Y=', set_1.hex2base64('0F3BAC1346'))
self.assertEqual('yv66vg==', set_1.hex2base64('CAFEBABE'))
expected = ('SSdtIGtpbGxpbmcgeW91ciBicmFpbiBsa'
+ 'WtlIGEgcG9pc29ub3VzIG11c2hyb29t')
result = set_1.hex2base64('49276d206b696c6c696e67207'
+ '96f757220627261696e206c6'
+ '96b65206120706f69736f6e6f7573'
+ '206d757368726f6f6d')
self.assertEqual(expected, result, 'Challenge 1 failed')
if __name__ == '__main__':
unittest.main()
|
import json
import logging
from pathlib import Path
from typing import Dict, Text, Union
import requests
from requests.exceptions import ConnectionError, Timeout, TooManyRedirects
from src.coins.base import Coin
from src.lib.config import Config
MODULE_LOGGER = logging.getLogger(Path(__file__).name)
def get_current_price(config: Config, coin: Coin):
""" """
headers = {
"Accepts": "application/json",
"X-CMC_PRO_API_KEY": config.api_key,
}
params = {"id": coin.id}
response = send_request(url=config.cryptocurrency_quotes_latest_url, headers=headers, params=params)
if response:
data = json.loads(response)["data"]
price = data[coin.id]["quote"]["USD"]["price"]
return price
else:
return None
def send_request(
url: Text,
headers: Union[None, Dict] = None,
params: Union[None, Dict] = None,
):
with requests.Session() as session:
if headers:
MODULE_LOGGER.debug(f"headers: {headers}")
session.headers.update(headers)
if params:
MODULE_LOGGER.debug(f"params: {params}")
session.params.update(params)
try:
MODULE_LOGGER.debug(f"Sending a request to: {url}")
response = session.get(url)
return response.text
except (ConnectionError, Timeout, TooManyRedirects):
MODULE_LOGGER.error("An error has occurred during request send.", exc_info=True)
return None
|
def divisibleSumPairs(n, k, ar):
numPairs = 0
ar = sorted(ar)
for i in range(n-1):
for j in range(i,n):
if ((ar[i]+ar[j]) % k == 0) and i < j:
numPairs +=1
return numPairs
if __name__ =="__main__":
#ar = [43 ,95 ,51 ,55 ,40 ,86 ,65 ,81 ,51 ,20 ,47 ,50 ,65 ,53 ,23 ,78 ,75 ,75 ,47 ,73 ,25 ,27 ,14 ,8 ,26 ,58 ,95 ,28 ,3 ,23 ,48 ,69 ,26 ,3 ,73 ,52 ,34 ,7 ,40 ,33 ,56 ,98 ,71 ,29 ,70 ,71 ,28 ,12 ,18 ,49 ,19 ,25 ,2 ,18 ,15 ,41 ,51 ,42 ,46 ,19 ,98 ,56 ,54 ,98 ,72 ,25 ,16 ,49 ,34 ,99 ,48 ,93 ,64 ,44 ,50 ,91 ,44 ,17 ,63 ,27 ,3 ,65 ,75 ,19 ,68 ,30 ,43 ,37 ,72 ,54 ,82 ,92 ,37 ,52 ,72 ,62 ,3 ,88 ,82 ,71]
#k = 22
#n = 100
n = 6
k = 3
ar = [1, 3, 2, 6, 1, 2]
print(divisibleSumPairs(n,k,ar))
|
import torch
from matplotlib import pyplot as plt
import math
π = math.pi
torch.manual_seed(4)
X = torch.randn(2, 2)
Y = torch.zeros(2)
Y[0] = 1
s1 = 320
s2 = 26000
s1 = 160
s2 = 120
def R(θ):
θ = torch.tensor(θ)
return torch.tensor([[torch.cos(θ), -torch.sin(θ)],
[torch.sin(θ), torch.cos(θ)]])
# %% Figure 1a
n = 4
θs = torch.arange(0, 2*π, 2*π/n)
RX = torch.stack([X@R(θ).T for θ in θs])
fig, ax = plt.subplots(figsize=(4,4))
ax.scatter(*RX[0,0].T, c='orange', marker='*', s=s1)
ax.scatter(*RX[1:,0].T, c='orange', s=s2)
ax.scatter(*RX[0,1].T, c='g', marker='*', s=s1)
ax.scatter(*RX[1:,1].T, c='g', s=s2)
ax.scatter([0], [0], s=s1, facecolors='none', edgecolors='b')
fig.show()
fig.savefig('rot_mat_orbits.pdf')
# %% Figure 1b
torch.manual_seed(5)
X = torch.randn(2, 3)
Y = torch.zeros(2)
Y[0] = 1
def R(θ):
θ = torch.tensor(θ)
return torch.tensor([[torch.cos(θ), -torch.sin(θ), 0],
[torch.sin(θ), torch.cos(θ), 0],
[0,0,1]])
RX = torch.stack([X@R(θ).T for θ in θs])
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.scatter(*RX[0,0].T, c='orange', marker='*', s=s1)
ax.scatter(*RX[1:,0].T, c='orange', s=s2)
ax.scatter(*RX[0,1].T, c='g', marker='*', s=s1)
ax.scatter(*RX[1:,1].T, c='g', s=s2)
ax.plot([0, 0], [0, 0], [-1.8, 2])
ax.set_xlim([-1, 1])
ax.set_ylim([-1, 1])
ax.set_zlim([-2, 2])
ax.view_init(azim=-38, elev=19)
ax.set_xticks([-1, -.5, 0, .5, 1])
ax.set_yticks([-1, -.5, 0, .5, 1])
ax.set_zticks([-2, -1, 0, 1, 2])
# ax.set_azim(-38)
# ax.set_elev(19)
ax.grid(False)
fig.savefig('rot_mat_orbits_3d.pdf')
fig.show()
# %% Figure 1c
torch.manual_seed(1)
X = torch.randn(2, 3)
def Ck(x, k):
return torch.roll(x, k, dims=[-1])
RX = torch.stack([Ck(X,k) for k in range(3)])
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.scatter(*RX[0,0].T, c='orange', marker='*', s=s1)
ax.scatter(*RX[1:,0].T, c='orange', s=s2)
ax.scatter(*RX[0,1].T, c='g', marker='*', s=s1)
ax.scatter(*RX[1:,1].T, c='g', s=s2)
ax.plot([-.3, .5], [-.3, .5], [-.3, .5])
ax.set_xlim([-1, 1])
ax.set_ylim([-1, 1])
ax.set_zlim([-2, 2])
ax.view_init(azim=-38, elev=19)
ax.set_xticks([-1, -.5, 0, .5, 1])
ax.set_yticks([-1, -.5, 0, .5, 1])
ax.set_zticks([-1, -.5, 0, .5, 1])
# ax.set_azim(-38)
# ax.set_elev(19)
ax.grid(False)
ax.axis('auto')
fig.savefig('cyc_mat_orbits_3d.pdf')
fig.show()
|
# coding: utf-8
from __future__ import unicode_literals
import unittest
import responses
from admitad.items import LinksValidator
from admitad.tests.base import BaseTestCase
class LinksValidationTestCase(BaseTestCase):
def test_link_validation_request(self):
with responses.RequestsMock() as resp:
resp.add(
resp.GET,
self.prepare_url(LinksValidator.URL, params={
'link': 'https://google.com/'
}),
match_querystring=True,
json={
'message': 'Link tested.',
'success': 'Accepted'
},
status=200
)
result = self.client.LinksValidator.get('https://google.com/')
self.assertIn('message', result)
self.assertIn('success', result)
if __name__ == '__main__':
unittest.main()
|
from datetime import datetime
from vaccineAvailabilityNotifier.client.actionsImpl import ActionsImpl
from vaccineAvailabilityNotifier.util import response_processor_utils
def get_url(params={}):
return 'https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/calendarByDistrict?district_id=' + params[
"district_id"] + '&date=' + \
params["date"]
class ProcessByDistrictId:
__action_processor = ActionsImpl()
def __init__(self, state_name, district_name, sender_email_id, sender_email_password, receiver_email, include_45,
district_id):
self.sender_email_id = sender_email_id
self.sender_email_password = sender_email_password
self.receiver_email = receiver_email
self.include_45 = include_45
self.state_name = state_name
self.district_name = district_name
self.district_id = district_id
def process(self):
print('sender\'s email : ' + self.sender_email_id)
print(self.receiver_email)
print("\n\n\n")
response = self.__action_processor.get(
url=get_url({
'district_id': self.district_id,
'date': datetime.today().strftime('%d-%m-%Y')
})
)
response_processor_utils.process(response=response, include_45=self.include_45,
receiver_email=self.receiver_email,
sender_email_id=self.sender_email_id,
sender_email_password=self.sender_email_password)
|
import sys
if sys.version_info < (3, 0):
sys.stdout.write("Sorry, requires Python 3.x, not Python 2.x\n")
sys.exit(1)
import random
import itertools
import copy
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from time import localtime, strftime
import signal
import sys
import time
import argparse
import os
# Parse arguments to fill constants
# This is to enable SPMD processing in the future
parser = argparse.ArgumentParser(description='Run a genetic algorithm to group participants into groups')
parser.add_argument('-n', '--numparticipants', type=int, help="Number of participants for grouping exercise")
parser.add_argument('-s', '--groupsize', type=int, help="Number of participants per group")
parser.add_argument('-p', '--populationsize', type=int, help="Size of the population")
parser.add_argument('-g', '--generations', type=int, help="Number of generations")
parser.add_argument('-el', '--numelitism', type=int, help="Number of individuals in population that are from the previous elite")
parser.add_argument('-rest', '--numrest', type=int, help="Number of randomly chosen non-elite parents")
parser.add_argument('-pos', '--positiveweight', type=int, help="(Testing) Weight assigned to a link between two willing group members")
parser.add_argument('-neg', '--negativeweight', type=int, help="(Testing) Weight assigned to a link between two unwilling group members")
parser.add_argument('-mchance', '--mutationchance', type=float, help="Chance of mutation for the next generation")
parser.add_argument('-mswaps', '--mutationswaps', type=int, help="Number of group member swaps to do during each mutation (mutation aggressiveness)")
parser.add_argument('-hof', '--numhalloffame', type=int, help="Number of individuals preserved in the hall of fame")
parser.add_argument('-d', '--debug', action="store_true", help="Turns on debug printing")
parser.add_argument('-nt', '--notest', action="store_true", help="Forces this out of test mode")
parser.add_argument('-gh', '--graphhide', action="store_true", help="Do not show a summary graph at the end")
parser.add_argument('-gd', '--graphdir', help="Indicates the directory to place graphs in")
parser.add_argument('-r', '--rankings', help="CSV file for rankings information")
parser.add_argument('-b', '--bruteforce', action="store_true", help="Disable genetic algorithm and use bruteforce random search instead")
args_dict = vars(parser.parse_args())
# Parameters for the problem
NUM_PARTICIPANTS = args_dict['numparticipants'] or 30
PARTICIPANTS_PER_GROUP = args_dict['groupsize'] or 3
assert(NUM_PARTICIPANTS % PARTICIPANTS_PER_GROUP == 0)
NUM_GROUPS = int(NUM_PARTICIPANTS / PARTICIPANTS_PER_GROUP)
# Bruteforce random or genetic
BRUTEFORCE = args_dict['bruteforce'] or False
# Parameters for the genetic algorithm
POPULATION_SIZE = args_dict['populationsize'] or 1000
NUM_GENERATIONS = args_dict['generations'] or 100
NUM_ELITISM = args_dict['numelitism'] or int(POPULATION_SIZE / 4)
NUM_REST_PARENTS = args_dict['numrest'] or int(POPULATION_SIZE / 4)
NUM_CHILDREN = POPULATION_SIZE - NUM_ELITISM - NUM_REST_PARENTS
POSITIVE_WEIGHT = args_dict['positiveweight'] or 100
NEGATIVE_WEIGHT = args_dict['negativeweight'] or -1000
MUTATION_CHANCE = args_dict['mutationchance'] or 0.1
MUTATION_NUM_SWAPS = args_dict['mutationswaps'] or 1
HALL_OF_FAME_SIZE = args_dict['numhalloffame'] or 5
# Printing params
DEBUG = args_dict['debug'] or False
NOTEST = args_dict['notest'] or False
GRAPHHIDE = args_dict['graphhide'] or False
GRAPHDIR = args_dict['graphdir'] or 'graphs/'
# Non-constants
# Plotting params
xs = []
ys = []
hall_of_fame = []
ranking = [[NEGATIVE_WEIGHT for x in range(NUM_PARTICIPANTS)]
for y in range(NUM_PARTICIPANTS)]
if PARTICIPANTS_PER_GROUP == 2 and (not NOTEST):
# For groups of size 2
for i in range(NUM_PARTICIPANTS):
ranking[i][i] = 0
if i % 2 == 0:
# really want the person 'next' to you (only one "correct answer")
ranking[i][i + 1] = POSITIVE_WEIGHT
else:
# really want the person 'next' to you (only one "correct answer")
ranking[i][i - 1] = POSITIVE_WEIGHT
elif PARTICIPANTS_PER_GROUP == 3 and (not NOTEST):
# For groups of size 3
for i in range(NUM_PARTICIPANTS):
ranking[i][i] = 0 # cannot rank yourself (changing this from 0 is NOT ALLOWED)
if i % 3 == 0:
ranking[i][i + 1] = POSITIVE_WEIGHT
ranking[i][i + 2] = POSITIVE_WEIGHT
elif i % 3 == 1:
ranking[i][i + 1] = POSITIVE_WEIGHT
ranking[i][i - 1] = POSITIVE_WEIGHT
elif i % 3 == 2:
ranking[i][i - 1] = POSITIVE_WEIGHT
ranking[i][i - 2] = POSITIVE_WEIGHT
else:
assert(False)
elif NOTEST:
import csv
file_location = args_dict["rankings"] or "rankings.csv"
with open(file_location) as csvfile:
ranking = list(csv.reader(csvfile, delimiter=','))
for rowidx, row in enumerate(ranking):
for colidx, cell in enumerate(row):
ranking[rowidx][colidx] = int(ranking[rowidx][colidx])
print(cell, end=' ')
print("\n")
import time
# time.sleep(10)
# Seed random?
def is_valid_grouping(grouping):
# number of groups must be correct
groups_cor = len(grouping) == NUM_GROUPS
# number of individuals per group must be correct
num_groupmem_cor = len(list(filter(lambda g: len(g) != PARTICIPANTS_PER_GROUP, grouping))) == 0
# All individuals should be included
all_included = set(list(itertools.chain.from_iterable(grouping))) == set(range(NUM_PARTICIPANTS))
return (groups_cor and num_groupmem_cor and all_included)
# Gets the list of participant numbers and randomizes splitting them up
# into groups
def generateRandomGrouping():
participants = [i for i in range(NUM_PARTICIPANTS)]
random.shuffle(participants)
idx = 0
grouping = []
for g in range(NUM_GROUPS):
group = []
for p in range(PARTICIPANTS_PER_GROUP):
group.append(participants[idx])
idx += 1
grouping.append(group)
return grouping
# Generate an initial list of of groupings by randomly creating them
def generateInitialPopulation(population_size):
population = []
for i in range(population_size):
population.append(generateRandomGrouping())
return population
def print_population(population):
for p in population:
print(p)
def print_ranking(ranking):
rank_source = 0
rank_target = 0
for row in ranking:
rank_target = 0
for col in row:
print(str(rank_source) + " -> " +
str(rank_target) + ": " + str(col))
rank_target += 1
print("------------")
rank_source += 1
# Given a single group in a grouping, evaluate that group's fitness
def group_fitness(group):
group_fitness_score = 0
# All-pairs sum of rankings
for participant in group:
for other_participant in group:
group_fitness_score += ranking[participant][other_participant]
# print(str(participant) + "-> " + str(other_participant) + " - new grp fit: " + str(group_fitness_score))
return group_fitness_score
# Given a single grouping, evaluate its overall fitness
def fitness(grouping):
fitness_score = 0
for group in grouping:
fitness_score += group_fitness(group)
return fitness_score
# We will select some number of parents to produce the next generation of offspring
# Modifies the population_with_fitness param, cannot be used after
def select_parents_to_breed(sorted_population_with_fitness):
selected_parents = []
# Sort the incoming population by their fitness with the best individuals being at the end
# population_with_fitness.sort(key=lambda x: x[1])
# Select the most elite individuals to breed and carry on to next gen as well
for i in range(NUM_ELITISM):
selected_parents.append(sorted_population_with_fitness.pop())
# Select the rest of the mating pool by random chance
# TODO: This needs to be a weighted sample!
selected_parents.extend(random.sample(sorted_population_with_fitness, NUM_REST_PARENTS))
#print("Selected parents")
#print_population(selected_parents)
# Don't return the weights
return list(map(lambda x: x[0], selected_parents))
# Potentially the most important function
# Given two sets of groupings - we need to produce a new valid grouping
def breed_two_parents(p1, p2):
child = []
# Custom copy and append (deepcopy profiling says it takes up the majority of runtime)
group_pool = [list(x) for x in p1] + [list(x) for x in p2]
child = random.sample(group_pool, NUM_GROUPS)
#print("Initial child of " + str(p1) + " and " + str(p2) + ": \n" + str(child))
# We need to "correct" the child so that it can be a valid group
# This also introduces a form of mutation
# We first need to find out where the repeats are in every group, and which participants are left out
missing_participants = set(range(NUM_PARTICIPANTS))
repeat_locations = {} # mapping between (participant num) -> [(groupidx, memberidx)]
for groupidx, group in enumerate(child):
for memberidx, participant in enumerate(group):
if participant in repeat_locations:
repeat_locations[participant].append((groupidx, memberidx))
else:
repeat_locations[participant] = [(groupidx, memberidx)]
missing_participants = missing_participants - set([participant])
# For each set of repeats, save one repeat location each for each repeated participant, but the rest need to be overwritten (therefore we're taking a sample of len(v) - 1)
repeat_locations = [random.sample(v, len(v) - 1) for (k,v) in repeat_locations.items() if len(v) > 1]
# Flatten list
repeat_locations = list(itertools.chain.from_iterable(repeat_locations))
#print("Missing participants: " + str(missing_participants))
#print("Repeat locations to replace: " + str(repeat_locations))
# Now we insert the missing participants into a random repeat location
random_locations_to_replace = random.sample(repeat_locations, len(missing_participants))
for idx, missing_participant in enumerate(missing_participants):
groupidx, memberidx = random_locations_to_replace[idx]
#print("Replacing val at : " + str(random_locations_to_replace[idx]) + " with " + str(missing_participant))
child[groupidx][memberidx] = missing_participant
#print("Final child: " + str(child))
return child
def breed(parents):
children = []
# Randomize the order of parents to allow for random breeding
randomized_parents = random.sample(parents, len(parents))
# We need to generate NUM_CHILDREN children, so breed parents until we get that
for i in range(NUM_CHILDREN):
child = breed_two_parents(randomized_parents[i % len(randomized_parents)], randomized_parents[(i + 1) % len(randomized_parents)])
children.append(child)
#print("Got child: " + str(child))
#print("Children: " + str(children))
#print()
return children
def mutate(population):
for grouping in population:
if random.random() < MUTATION_CHANCE:
# Mutate this grouping
for i in range(MUTATION_NUM_SWAPS):
# Swap random group members this many times
# Pick two random groups
groups_to_swap = random.sample(range(NUM_GROUPS), 2)
group_idx1 = groups_to_swap[0]
group_idx2 = groups_to_swap[1]
participant_idx1 = random.choice(range(PARTICIPANTS_PER_GROUP))
participant_idx2 = random.choice(range(PARTICIPANTS_PER_GROUP))
# Make the swap
temp = grouping[group_idx1][participant_idx1]
grouping[group_idx1][participant_idx1] = grouping[group_idx2][participant_idx2]
grouping[group_idx2][participant_idx2] = temp
def create_new_halloffame(old_hof, sorted_population_with_fitness):
old_hof.extend(sorted_population_with_fitness[-HALL_OF_FAME_SIZE:])
old_hof.sort(key=lambda x: x[1])
return old_hof[-HALL_OF_FAME_SIZE:]
def exit_handler(sig, frame):
print("\nEvolution complete or interrupted. \n")
#print("\n----- Final Hall Of Fame ----- ")
#print_population(hall_of_fame)
# Draw final results
fig = plt.figure(figsize=(8, 6))
ax1 = fig.add_subplot(1,1,1)
fig.suptitle('Fitness vs number of generations', fontsize=20)
ax1.set_title("N = " + str(NUM_PARTICIPANTS) + ", G = " + str(NUM_GROUPS) + ", NGEN = " + str(NUM_GENERATIONS) + ", POPSIZE = " + str(POPULATION_SIZE))
plt.xlabel('Number of Generations', fontsize=16)
plt.ylabel('Best Fitness Achieved', fontsize=16)
plt.plot(xs, ys)
os.makedirs(GRAPHDIR, exist_ok=True)
fig.savefig(GRAPHDIR + str(NUM_PARTICIPANTS) + 'p-' + str(NUM_GROUPS) + 'g-' + str(NUM_GENERATIONS) + 'gen-' + strftime("%Y-%m-%d--%H:%M:%S", localtime()) + '.png')
if not GRAPHHIDE:
plt.show()
sys.exit(0)
if __name__ == "__main__":
signal.signal(signal.SIGINT, exit_handler)
population = generateInitialPopulation(POPULATION_SIZE)
print()
print("Initial population:")
# print_population(population)
print("Ranking:")
print(ranking)
# Set up initial state for generations
generation = 0
best_match = ([], -sys.maxsize - 1)
start_time = time.time()
while generation < NUM_GENERATIONS:
print("\n---------- GENERATION " + str(generation) + " ----------")
population_with_fitness = list(map(lambda gs: (gs, fitness(gs)), population))
if DEBUG:
print("Population with fitness: ")
print_population(population_with_fitness)
# Everyone after this wants this list to be sorted, so immediately sort
population_with_fitness.sort(key=lambda x: x[1])
# Update the "Hall of Fame"
hall_of_fame = create_new_halloffame(hall_of_fame, population_with_fitness)
if DEBUG:
print("Hall of Fame: ")
print_population(hall_of_fame)
# Note a "best grouping"
best_grouping = max(population_with_fitness, key=lambda x: x[1])
if best_grouping[1] > best_match[1]:
best_match = copy.deepcopy(best_grouping)
if not BRUTEFORCE:
parents = select_parents_to_breed(population_with_fitness)
if DEBUG:
print("Parents: " + str(parents))
children = breed(parents)
if DEBUG:
print("Children: ")
print_population(children)
# Create new population, append parents and children together
new_population = []
new_population.extend(parents)
new_population.extend(children)
if DEBUG:
print("Pre-mutation: ")
print_population(new_population)
mutate(new_population)
if DEBUG:
print("Post-mutation: ")
print_population(new_population)
population = new_population
assert(all(map(is_valid_grouping, new_population)))
else:
# Bruteforce - no mutation
population = generateInitialPopulation(POPULATION_SIZE);
# Just a check to make sure all of the new generation are valid groups
#best_fitness_so_far = hall_of_fame[-1][1]
best_fitness_so_far = best_match[1]#hall_of_fame[-1][1]
print("Best fitness at generation " + str(generation) + " = " + str(best_fitness_so_far))
xs.append(generation)
ys.append(best_fitness_so_far)
# Measure time
iter_time = time.time()
time_per_generation = (iter_time - start_time) / (generation + 1)
time_remaining_seconds = time_per_generation * (NUM_GENERATIONS - generation - 1)
print("Time remaining: " + str(round(time_remaining_seconds, 2)) + " s |or| " + str(round(time_remaining_seconds/60, 2)) + " min |or| " + str(round(time_remaining_seconds / 3600, 2)) + " hours")
# Move on to next generation
generation += 1
print("best optimal group")
print_population(best_match)
# Comon exit point for signals and at end of algo
exit_handler(None, None)
|
from clients import installed_games, steamapi
if __name__ == '__main__':
api = steamapi.SteamApiClient()
games = api.get_player_owned_games()
for game in games:
game['installed'] = installed_games.is_game_in_installed_games_list(game)
for game in sorted(games, key=lambda game: game['name']):
installed = 'Yes' if game['installed'] else 'No'
print (game['name'])
print(' Installed: ' + installed)
print(' Playtime in Minutes: ' + str(game['playtime_forever']))
print('')
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import re
import numpy as np
def test_vector_comparison():
if not tvm.runtime.enabled("vulkan"):
print("Skipping due to no Vulkan module")
return
target = 'vulkan'
def check_correct_assembly(dtype):
n = (1024,)
A = tvm.placeholder(n, dtype=dtype, name='A')
B = tvm.compute(
A.shape,
lambda i: tvm.expr.Select(
A[i] >= 0, A[i] + tvm.const(1, dtype),
tvm.const(0, dtype)), name='B')
s = tvm.create_schedule(B.op)
(bx, tx) = s[B].split(s[B].op.axis[0], factor=128)
(tx, vx) = s[B].split(tx, factor=4)
s[B].bind(bx, tvm.thread_axis("blockIdx.x"))
s[B].bind(tx, tvm.thread_axis("threadIdx.x"))
s[B].vectorize(vx)
f = tvm.build(s, [A, B], target)
# Verify we generate the boolx4 type declaration and the OpSelect
# v4{float,half,int} instruction
assembly = f.imported_modules[0].get_source()
matches = re.findall("%v4bool = OpTypeVector %bool 4", assembly)
assert len(matches) == 1
matches = re.findall("OpSelect %v4.*", assembly)
assert len(matches) == 1
check_correct_assembly('float32')
check_correct_assembly('int32')
check_correct_assembly('float16')
tx = tvm.thread_axis("threadIdx.x")
bx = tvm.thread_axis("blockIdx.x")
def test_vulkan_copy():
def check_vulkan(dtype, n):
if not tvm.vulkan(0).exist or not tvm.runtime.enabled("vulkan"):
print("skip because vulkan is not enabled..")
return
A = tvm.placeholder((n,), name='A', dtype=dtype)
ctx = tvm.vulkan(0)
a_np = np.random.uniform(size=(n,)).astype(A.dtype)
a = tvm.nd.empty((n,), A.dtype, ctx).copyfrom(a_np)
b_np = a.asnumpy()
tvm.testing.assert_allclose(a_np, b_np)
tvm.testing.assert_allclose(a_np, a.asnumpy())
for _ in range(100):
dtype = np.random.choice(["float32", "float16", "int8", "int32"])
logN = np.random.randint(1, 15)
peturb = np.random.uniform(low=0.5, high=1.5)
check_vulkan(dtype, int(peturb * (2 ** logN)))
def test_vulkan_vectorize_add():
num_thread = 8
def check_vulkan(dtype, n, lanes):
if not tvm.vulkan(0).exist or not tvm.runtime.enabled("vulkan"):
print("skip because vulkan is not enabled..")
return
A = tvm.placeholder((n,), name='A', dtype="%sx%d" % (dtype, lanes))
B = tvm.compute((n,), lambda i: A[i]+tvm.const(1, A.dtype), name='B')
s = tvm.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], factor=num_thread)
s[B].bind(xo, bx)
s[B].bind(xi, tx)
fun = tvm.build(s, [A, B], "vulkan")
ctx = tvm.vulkan(0)
a = tvm.nd.empty((n,), A.dtype, ctx).copyfrom(
np.random.uniform(size=(n, lanes)))
c = tvm.nd.empty((n,), B.dtype, ctx)
fun(a, c)
tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + 1)
check_vulkan("float32", 64, 2)
check_vulkan("float16", 64, 2)
def test_vulkan_stress():
"""
Launch a randomized test with multiple kernels per stream, multiple uses of
kernels per stream, over multiple threads.
"""
import random
import threading
n = 1024
num_thread = 64
def run_stress():
def worker():
if not tvm.vulkan(0).exist or not tvm.runtime.enabled("vulkan"):
print("skip because vulkan is not enabled..")
return
A = tvm.placeholder((n,), name='A', dtype="float32")
B = tvm.placeholder((n,), name='B', dtype="float32")
functions = [
(lambda: tvm.compute((n,), lambda i: 2 * A[i] + 3 * B[i]),
lambda a, b: 2 * a + 3 * b),
(lambda: tvm.compute((n,), lambda i: A[i]+B[i]),
lambda a, b: a + b),
(lambda: tvm.compute((n,), lambda i: A[i]+2 * B[i]),
lambda a, b: a + 2 * b),
]
def build_f(f_ref):
(C_f, ref) = f_ref
C = C_f()
s = tvm.create_schedule(C.op)
xo, xi = s[C].split(C.op.axis[0], factor=num_thread)
s[C].bind(xo, bx)
s[C].bind(xi, tx)
fun = tvm.build(s, [A, B, C], "vulkan")
return (fun, ref)
fs = [build_f(random.choice(functions))
for _ in range(np.random.randint(low=1, high=10))]
ctx = tvm.vulkan(0)
a = tvm.nd.empty((n,), A.dtype, ctx).copyfrom(
np.random.uniform(size=(n,)))
b = tvm.nd.empty((n,), B.dtype, ctx).copyfrom(
np.random.uniform(size=(n,)))
cs = [tvm.nd.empty((n,), A.dtype, ctx) for _ in fs]
for ((f, _), c) in zip(fs, cs):
f(a, b, c)
for ((_, ref), c) in zip(fs, cs):
tvm.testing.assert_allclose(
c.asnumpy(), ref(a.asnumpy(), b.asnumpy()))
ts = [threading.Thread(target=worker)
for _ in range(np.random.randint(1, 10))]
for t in ts:
t.start()
for t in ts:
t.join()
run_stress()
if __name__ == "__main__":
test_vector_comparison()
test_vulkan_copy()
test_vulkan_vectorize_add()
test_vulkan_stress()
|
import sys
import time
import json
import requests
from PyQt5 import QtGui
from PyQt5 import QtWidgets
from PyQt5 import QtCore
from PyQt5.QtCore import QThread, pyqtSignal, pyqtSlot
import design
class getPostsThread(QThread):
add_post_signal = pyqtSignal(str)
def __init__(self, subreddits):
"""
Make a new thread instance with the specified
subreddits as the first argument. The subreddits argument
will be stored in an insatance variable called subreddits
which then cab be accessed by all other class instance functions
:param subreddits: A list of subreddit names
:type subreddits: list
"""
super().__init__()
self.subreddits = subreddits
def __del__(self):
self.wait()
def _get_top_post(self, subreddit):
"""
Return a pre-formed string with top post title, author,
and subreddit name from the subreddit passed as the only required
argument
:param subreddit: A valid subreddit name
:type subreddit: str
:return: A string with top post title, author, and
subreddit name from that subreddit
:rtype: str
"""
url = f'https://www.reddit.com/r/{subreddit}.json?limit=1'
headers = {'User-Agent': 'imdff0803@gmai.com for tutorial'}
res = requests.get(url, headers=headers)
data = json.loads(res.text)
top_post = data['data']['children'][0]['data']
return "'{title}' by {author} in {subreddit}".format(**top_post)
def run(self):
"""
Go over every item in the self.subreddits list
(which was supplied during __init__)
and for every item assume it's a string with valid subreddit
name and fetch the top post using the _get_top_post method
from reddit. Store the result in a local variable named
top_post and then emit a SIGNAL add_post(QString) where
QString is equal to the top_post variable that was set by the
_get_top_post function
"""
for subreddit in self.subreddits:
top_post = self._get_top_post(subreddit)
self.add_post_signal.emit(top_post)
time.sleep(2)
class ThreadingTutorial(QtWidgets.QMainWindow, design.Ui_MainWindow):
def __init__(self):
super().__init__()
self.setupUi(self)
self.btn_start.clicked.connect(self.start_getting_top_posts)
def _get_top_post(self, subreddit):
# Get the subreddits user entered into an QLineEdit field
# this will be equal to '' if there is no text engtered
url = f'https://www.reddit.com/r/{subreddit}.json?limit=1'
headers = {'User-Agent': 'imdff0803@gmai.com for tutorial'}
res = requests.get(url, headers=headers)
data = json.loads(res.text)
top_post = data['data']['children'][0]['data']
return "'{title}' by {author} in {subreddit}".format(**top_post)
def _get_top_from_subreddits(self, subreddits):
for subreddit in subreddits:
yield self._get_top_post(subreddit)
time.sleep(2)
def start_getting_top_posts(self):
subreddit_list = str(self.edit_subreddits.text()).split(',')
if subreddit_list == ['']:
QtWidgets.QMessageBox.critical(self, "No subreddits",
"You didn't enter any subreddits.",
QtWidgets.QMessageBox.Ok)
return
# Set the maximum value of progress bar, can be any int and it will
# be automatically converted to x/100% values
# e.g. max_value = 3, current_value = 1, the progress bar will show 33%
self.progress_bar.setMaximum(len(subreddit_list))
# Setting the value on every run to 0
self.progress_bar.setValue(0)
# We have a list of subreddits which we use to create a new getPostsThread
# instance and we pass that list to the thread
self.get_thread = getPostsThread(subreddit_list)
# Next we need to connect the events from that thread to functions we want
# to be run when those signals get fired
# Adding post will be handled in the add_post method and the signal that
# the thread will emit is SIGNAL("add_post(QString)")
# thre rest is same as we can use to connect any signal
self.get_thread.add_post_signal.connect(self.add_post)
# This is pretty self explanatory
# regardless of whether the thread is finishes or the user terminates it
# we want to show the notification to the user that adding is done
# and regardless of whether it was terminated or finished by itself
# the finished signal will be go off. So we don't need to catch the
# terminated one specifically, but we could if we wanted
self.get_thread.finished.connect(self.done)
# We have all the events we need connected we can start the thread
self.get_thread.start()
# At this point we want to allow user to stop/terminate the thread
# so we enable the button
self.btn_stop.setEnabled(True)
# And we connect the click of that button to the built in
# terminate method that all QThread instances have
self.btn_stop.clicked.connect(self.get_thread.terminate)
# We don't want to enable user to start another thread while this one is
# running so we disable the start button
self.btn_start.setEnabled(False)
def add_post(self, post_text):
"""
Add the text that's given to this function to the
list_submissions QListWidget we have in our GUI and
increase the current value of preogress bar by 1
:param post_text: text of the item to add to the list
:type post_text
"""
self.list_submissions.addItem(post_text)
self.progress_bar.setValue(self.progress_bar.value() + 1)
def done(self):
"""
Show the message that fetching posts is done.
Disable Stop button, enable the Start one and reset progress bar to 0
"""
self.btn_stop.setEnabled(False)
self.btn_start.setEnabled(True)
self.progress_bar.setValue(0)
QtWidgets.QMessageBox.information(self, "Done!", "Done fetching posts")
def main():
app = QtWidgets.QApplication(sys.argv)
form = ThreadingTutorial()
form.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
#!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base and Combinators; these classes and functions allow you to compose
reporters together into compound reporters.
"""
from abc import ABCMeta
from typing import Any, Callable, Dict, Optional
import uv.reader.base as rb
import uv.types as t
import uv.util.attachment as a
class AbstractReporter(metaclass=ABCMeta):
"""Base class for all reporters. A reporter is a type that is able to log
timeseries of values for different t.MetricKey instances, one item at a time.
NOTE - by default, report_all and report are implemented in terms of one
another. This means that you can choose which method you'd like to override,
or override both... but if you don't override any you'll see infinite
recursion.
Be careful not to abuse the kindness!
"""
def report_param(self, k: str, v: str) -> None:
"""Accepts a key and value parameter and logs these as parameters alongside the
reported metrics.
"""
return None
def report_params(self, m: Dict[str, str]) -> None:
"""Accepts a dict of parameter name -> value, and logs these as parameters
alongside the reported metrics.
"""
return None
def report_all(self, step: int, m: Dict[t.MetricKey, t.Metric]) -> None:
"""Accepts a step (an ordered int referencing some timestep) and a dictionary
of metric key => metric value, and persists the metric into some underlying
store.
Extending classes are expected to perform some side effect that's either
visually useful, as in a live-plot, or recoverable via some matching
extension of AbstractReader.
"""
for k, v in m.items():
self.report(step, k, v)
def report(self, step: int, k: t.MetricKey, v: t.Metric) -> None:
"""Accepts a step (an ordered int referencing some timestep), a metric key and
a value, and persists the metric into some underlying store.
"""
return self.report_all(step, {k: v})
def reader(self) -> Optional[rb.AbstractReader]:
"""Returns an implementation of AbstractReader that can access the data in this
store.
Returns None by default; extending classes are encouraged, but not
required, to override.
"""
return None
def close(self) -> None:
"""Release any resources held open by this reporter instance."""
return None
# Combinators.
def with_prefix(self, prefix: t.Prefix):
"""Returns an instance of PrefixedReporter wrapping the current instance. This
reporter attaches the supplied prefix to every metric key it sees before
passing metrics on.
"""
return PrefixedReporter(self, prefix)
def with_suffix(self, suffix: t.Suffix):
"""Returns an instance of SuffixedReporter wrapping the current instance. This
reporter attaches the supplied suffix to every metric key it sees before
passing metrics on.
"""
return SuffixedReporter(self, suffix)
def plus(self, *others: "AbstractReporter"):
"""Returns an instance of MultiReporter wrapping the current instance. This
reporter broadcasts its inputs to this instance, plus any other reporters
supplied to this method, every time it sees a metric passed in via report
or report_all.
"""
return MultiReporter(self, *others)
def filter_step(self,
pred: Callable[[int], bool],
on_false: Optional["AbstractReporter"] = None):
"""Accepts a predicate function from step to boolean, and returns a reporter
that tests every step against the supplied function. If the function
returns true, metrics get passed on to this reporter; else, they get
filtered out.
If a reporter is supplied to on_false, any time the predicate returns false
items are routes to that store instead of base.
"""
def step_pred(step, _):
return pred(step)
return FilterValuesReporter(self, step_pred, on_false_reporter=on_false)
def filter_values(self,
pred: Callable[[int, t.Metric], bool],
on_false: Optional["AbstractReporter"] = None):
""""Accepts a function from (step, metric) to boolean; every (step, metric)
pair passed to report and report_all are passed into this function. If the
predicate returns true, the metric is passed on; else, it's filtered.
"""
return FilterValuesReporter(self, pred, on_false_reporter=on_false)
def map_values(self, fn: Callable[[int, t.Metric], t.Metric]):
""""Accepts a function from (step, metric) to some new metric; every (step,
metric) pair passed to report and report_all are passed into this function,
and the result is passed down the chain to this, the calling reporter.
"""
return MapValuesReporter(self, fn)
def stepped(self, step_key: Optional[str] = None):
"""Returns a new reporter that modifies incoming metrics by wrapping them in a
dict of this form before passing them down to this instance of reporter:
{step_key: step, "value": metric_value}
where step_key is the supplied argument, and equal to "step" by default.
This is useful for keeping track of each metric's timestamp.
"""
return stepped_reporter(self, step_key=step_key)
def report_each_n(self, n: int):
"""Returns a new reporter that only reports every n steps; specifically, the
new reporter will only accept metrics where step % n == 0.
If n <= 1, this reporter, untouched, is returned directly.
"""
n = max(1, n)
if n > 1:
return self.filter_step(lambda step: step % n == 0)
else:
return self
def from_thunk(self, thunk: Callable[[], Dict[t.MetricKey, t.Metric]]):
"""Returns a new Reporter that passes all AbstractReporter methods through, but
adds a new method called "thunk()" that, when called, will pass the emitted
map of metric key to metric down to the underlying store.
thunk() returns the value emitted by the no-arg function passed here via
`thunk`.
"""
return ThunkReporter(self, thunk)
# Combinators
def stepped_reporter(base: AbstractReporter,
step_key: Optional[str] = None) -> AbstractReporter:
"""Returns a new reporter that modifies incoming metric by wrapping them in a
dict of this form before passing them down to base:
{step_key: step, "value": metric_value}
where step_key is the supplied argument, and equal to "step" by default. This
is useful for keeping track of each metric's timestamp.
"""
if step_key is None:
step_key = "step"
def _augment(step: int, v: Any) -> Dict[str, Any]:
return {step_key: step, "value": v}
return MapValuesReporter(base, _augment)
class FilterValuesReporter(AbstractReporter):
"""Reporter that filters incoming metrics by applying a predicate from (step,
t.Metric). If true, the reporter passes the result on to the underlying
reporter. Else, nothing.
Args:
base: Backing reporter. All report and report_all calls proxy here.
predicate: function from (step, metric) to metric.
"""
def __init__(self,
base: AbstractReporter,
predicate: Callable[[int, t.Metric], bool],
on_false_reporter: Optional[AbstractReporter] = None):
self._base = base
self._pred = predicate
self._on_false_reporter = on_false_reporter
def report_param(self, k: str, v: str) -> None:
return self._base.report_param(k, v)
def report_params(self, m: Dict[str, str]) -> None:
return self._base.report_params(m)
def report_all(self, step: int, m: Dict[t.MetricKey, t.Metric]) -> None:
good = {k: v for k, v in m.items() if self._pred(step, v)}
bad = {k: v for k, v in m.items() if not self._pred(step, v)}
if good:
self._base.report_all(step, good)
if self._on_false_reporter and bad:
self._on_false_reporter.report_all(step, bad)
def report(self, step: int, k: t.MetricKey, v: t.Metric) -> None:
if self._pred(step, v):
self._base.report(step, k, v)
elif self._on_false_reporter:
self._on_false_reporter.report(step, k, v)
def reader(self) -> Optional[rb.AbstractReader]:
return self._base.reader()
def close(self) -> None:
self._base.close()
if self._on_false_reporter:
self._on_false_reporter.close()
class MapValuesReporter(AbstractReporter):
"""Reporter that modifies incoming metrics by applying a function from (step,
t.Metric) to a new metric before passing the result on to the underlying
reporter.
Args:
base: Backing reporter. All report and report_all calls proxy here.
fn: function from (step, metric) to metric.
"""
def __init__(self, base: AbstractReporter, fn: Callable[[int, t.Metric],
t.Metric]):
self._base = base
self._fn = fn
def report_param(self, k: str, v: str) -> None:
return self._base.report_param(k, v)
def report_params(self, m: Dict[str, str]) -> None:
return self._base.report_params(m)
def report_all(self, step: int, m: Dict[t.MetricKey, t.Metric]) -> None:
self._base.report_all(step, {k: self._fn(step, v) for k, v in m.items()})
def report(self, step: int, k: t.MetricKey, v: t.Metric) -> None:
self._base.report(step, k, self._fn(step, v))
def reader(self) -> Optional[rb.AbstractReader]:
return self._base.reader()
def close(self) -> None:
self._base.close()
class MultiReporter(AbstractReporter):
"""Reporter that broadcasts out metrics to all N reporters supplied to its
constructor.
Args:
reporters: instances of t.AbstractReporter that will receive all calls to
this instance's methods.
"""
def __init__(self, *reporters: AbstractReporter):
self._reporters = reporters
def report_all(self, step: int, m: Dict[t.MetricKey, t.Metric]) -> None:
for r in self._reporters:
r.report_all(step, m)
def report(self, step: int, k: t.MetricKey, v: t.Metric) -> None:
for r in self._reporters:
r.report(step, k, v)
def close(self) -> None:
for r in self._reporters:
r.close()
class PrefixedReporter(AbstractReporter):
"""Reporter that prepends a prefix to all keys before passing requests on to
the supplied backing reporter.
Args:
base: Backing reporter. All report and report_all calls proxy here.
prefix: the prefix to attach to all keys supplied to any method.
"""
def __init__(self, base: AbstractReporter, prefix: t.Prefix):
self._base = base
self._prefix = prefix
def report_param(self, k: str, v: str) -> None:
return self._base.report_param(k, v)
def report_params(self, m: Dict[str, str]) -> None:
return self._base.report_params(m)
def report_all(self, step: int, m: Dict[t.MetricKey, t.Metric]) -> None:
self._base.report_all(step, a.attach(m, self._prefix, prefix=True))
def report(self, step: int, k: t.MetricKey, v: t.Metric) -> None:
newk = a.attach_s(k, self._prefix, prefix=True)
self._base.report(step, newk, v)
def reader(self) -> Optional[rb.AbstractReader]:
return rb.PrefixedReader(self._base.reader(), self._prefix)
def close(self) -> None:
self._base.close()
class SuffixedReporter(AbstractReporter):
"""Reporter that prepends a prefix to all keys before passing requests on to
the supplied backing reporter.
Args:
base: Backing reporter. All report and report_all calls proxy here.
suffix: the suffix to attach to all keys supplied to any method.
"""
def __init__(self, base: AbstractReporter, suffix: t.Suffix):
self._base = base
self._suffix = suffix
def report_param(self, k: str, v: str) -> None:
return self._base.report_param(k, v)
def report_params(self, m: Dict[str, str]) -> None:
return self._base.report_params(m)
def report_all(self, step: int, m: Dict[t.MetricKey, t.Metric]) -> None:
self._base.report_all(step, a.attach(m, self._suffix, prefix=False))
def report(self, step: int, k: t.MetricKey, v: t.Metric) -> None:
newk = a.attach_s(k, self._suffix, prefix=False)
self._base.report(step, newk, v)
def reader(self) -> Optional[rb.AbstractReader]:
return rb.SuffixedReader(self._base.reader(), self._suffix)
def close(self) -> None:
self._base.close()
class ThunkReporter(AbstractReporter):
"""Reporter that passes all AbstractReporter methods through, but adds a new
method called "thunk()" that, when called, will pass the emitted map of
metric key to metric down to the underlying store.
Args:
base: Backing reporter. All report and report_all calls proxy here.
thunk: no-arg lambda that returns a metric dictionary.
"""
def __init__(self, base: AbstractReporter, thunk):
self._base = base
self._thunk = thunk
def thunk(self, step: int) -> None:
self.report_all(step, self._thunk())
def report_param(self, k: str, v: str) -> None:
return self._base.report_param(k, v)
def report_params(self, m: Dict[str, str]) -> None:
return self._base.report_params(m)
def report_all(self, step: int, m: Dict[t.MetricKey, t.Metric]) -> None:
self._base.report_all(step, m)
def report(self, step: int, k: t.MetricKey, v: t.Metric) -> None:
self._base.report(step, k, v)
def reader(self) -> Optional[rb.AbstractReader]:
return self._base.reader()
def close(self) -> None:
self._base.close()
|
import FWCore.ParameterSet.Config as cms
import math
muonEfficiencyThresholds = [16, 20, 25]
# define binning for efficiency plots
# pt
effVsPtBins = range(0, 50, 2)
effVsPtBins += range(50, 70, 5)
effVsPtBins += range(70, 100, 10)
effVsPtBins += range(100, 200, 25)
effVsPtBins += range(200, 300, 50)
effVsPtBins += range(300, 500, 100)
effVsPtBins.append(500)
# phi
nPhiBins = 24
phiMin = -math.pi
phiMax = math.pi
effVsPhiBins = [i*(phiMax-phiMin)/nPhiBins + phiMin for i in range(nPhiBins+1)]
# eta
nEtaBins = 50
etaMin = -2.5
etaMax = 2.5
effVsEtaBins = [i*(etaMax-etaMin)/nEtaBins + etaMin for i in range(nEtaBins+1)]
l1tMuonDQMOffline = cms.EDAnalyzer("L1TMuonDQMOffline",
histFolder = cms.untracked.string('L1T/L1TMuon'),
gmtPtCuts = cms.untracked.vint32(muonEfficiencyThresholds),
muonInputTag = cms.untracked.InputTag("muons"),
gmtInputTag = cms.untracked.InputTag("gmtStage2Digis","Muon"),
vtxInputTag = cms.untracked.InputTag("offlinePrimaryVertices"),
bsInputTag = cms.untracked.InputTag("offlineBeamSpot"),
triggerNames = cms.untracked.vstring(
"HLT_IsoMu18_v*",
"HLT_IsoMu20_v*",
"HLT_IsoMu22_v*",
"HLT_IsoMu24_v*",
"HLT_IsoMu27_v*",
"HLT_Mu30_v*",
"HLT_Mu40_v*"
),
trigInputTag = cms.untracked.InputTag("hltTriggerSummaryAOD", "", "HLT"),
trigProcess = cms.untracked.string("HLT"),
trigProcess_token = cms.untracked.InputTag("TriggerResults","","HLT"),
efficiencyVsPtBins = cms.untracked.vdouble(effVsPtBins),
efficiencyVsPhiBins = cms.untracked.vdouble(effVsPhiBins),
efficiencyVsEtaBins = cms.untracked.vdouble(effVsEtaBins),
verbose = cms.untracked.bool(False)
)
|
# -*- coding: utf-8 -*-
"""
author: zengbin93
email: zeng_bin8888@163.com
create_dt: 2021/10/30 20:18
describe: A股市场感应器若干,主要作为编写感应器的示例
强势个股传感器
强势板块传感器
强势行业传感器
大盘指数传感器
"""
import os.path
import traceback
from datetime import timedelta, datetime
from collections import OrderedDict
import pandas as pd
from tqdm import tqdm
from ..utils import WordWriter
from ..data.ts_cache import TsDataCache
from ..signals import get_selector_signals
from ..objects import Operate, Signal, Factor, Event, Freq
from ..utils.kline_generator import KlineGeneratorD
from ..utils import io
from ..traders.daily import CzscDailyTrader
class StrongStocksSensor:
"""强势个股传感器
输入:市场个股全部行情、概念板块成分信息
输出:强势个股列表以及概念板块分布
"""
def __init__(self, dc: TsDataCache):
self.name = self.__class__.__name__
self.data = OrderedDict()
self.dc = dc
self.strong_event = Event(name="选股", operate=Operate.LO, factors=[
Factor(name="月线KDJ金叉_日线MACD强势", signals_all=[
Signal("月线_KDJ状态_任意_金叉_任意_任意_0"),
Signal('日线_MACD状态_任意_DIFF大于0_DEA大于0_柱子增大_0'),
Signal('日线_MA5状态_任意_收盘价在MA5上方_任意_任意_0'),
]),
Factor(name="月线KDJ金叉_日线潜在三买", signals_all=[
Signal("月线_KDJ状态_任意_金叉_任意_任意_0"),
Signal('日线_倒0笔_潜在三买_构成中枢_近3K在中枢上沿附近_近7K突破中枢GG_0'),
Signal('日线_MA5状态_任意_收盘价在MA5上方_任意_任意_0'),
]),
Factor(
name="月线KDJ金叉_周线三笔强势",
signals_all=[
Signal("月线_KDJ状态_任意_金叉_任意_任意_0"),
Signal('日线_MA5状态_任意_收盘价在MA5上方_任意_任意_0'),
],
signals_any=[
Signal('周线_倒1笔_三笔形态_向下不重合_任意_任意_0'),
Signal('周线_倒1笔_三笔形态_向下奔走型_任意_任意_0'),
Signal('周线_倒1笔_三笔形态_向下盘背_任意_任意_0'),
]
),
Factor(name="月线KDJ金叉_周线MACD强势", signals_all=[
Signal("月线_KDJ状态_任意_金叉_任意_任意_0"),
Signal('周线_MACD状态_任意_DIFF大于0_DEA大于0_柱子增大_0'),
Signal('日线_MA5状态_任意_收盘价在MA5上方_任意_任意_0'),
]),
])
def get_share_czsc_signals(self, ts_code: str, trade_date: datetime):
"""获取 ts_code 在 trade_date 的信号字典"""
start_date = trade_date - timedelta(days=5000)
bars = self.dc.pro_bar(ts_code=ts_code, start_date=start_date, end_date=trade_date,
freq='D', asset="E", raw_bar=True)
assert bars[-1].dt.date() == trade_date.date()
kgd = KlineGeneratorD(freqs=[Freq.D.value, Freq.W.value, Freq.M.value])
for bar in bars:
kgd.update(bar)
ct = CzscDailyTrader(kgd, get_selector_signals)
return dict(ct.s)
def process_one_day(self, trade_date: [datetime, str]):
if isinstance(trade_date, str):
trade_date = pd.to_datetime(trade_date)
dc = self.dc
stocks = dc.stock_basic()
stocks = stocks[stocks.list_date <= (trade_date - timedelta(days=365)).strftime('%Y%m%d')]
records = stocks.to_dict('records')
event = self.strong_event
results = []
for row in tqdm(records, desc=f"{trade_date} selector"):
symbol = row['ts_code']
try:
s = self.get_share_czsc_signals(symbol, trade_date)
m, f = event.is_match(s)
if m:
dt_fmt = "%Y%m%d"
res = {
'symbol': symbol,
'name': row['name'],
'reason': f,
'end_dt': trade_date.strftime(dt_fmt),
'F10': f"http://basic.10jqka.com.cn/{symbol.split('.')[0]}",
'Kline': f"https://finance.sina.com.cn/realstock/company/{symbol[-2:].lower()}{symbol[:6]}/nc.shtml"
}
results.append(res)
print(res)
except:
print("fail on {}".format(symbol))
traceback.print_exc()
return results
def get_share_hist_signals(self, ts_code: str, trade_date: datetime):
"""获取单个标的全部历史信号"""
file_pkl = os.path.join(self.dc.cache_path, f"{ts_code}_all_hist_signals.pkl")
if os.path.exists(file_pkl):
all_hist_signals = io.read_pkl(file_pkl)
else:
start_date = pd.to_datetime(self.dc.sdt) - timedelta(days=1000)
bars = self.dc.pro_bar(ts_code=ts_code, start_date=start_date, end_date=self.dc.edt,
freq='D', asset="E", raw_bar=True)
kgd = KlineGeneratorD(freqs=[Freq.D.value, Freq.W.value, Freq.M.value])
for bar in bars[:250]:
kgd.update(bar)
ct = CzscDailyTrader(kgd, get_selector_signals)
all_hist_signals = {}
for bar in tqdm(bars[250:], desc=f"{ts_code} all hist signals"):
ct.update(bar)
all_hist_signals[bar.dt.strftime('%Y%m%d')] = dict(ct.s)
io.save_pkl(all_hist_signals, file_pkl)
return all_hist_signals.get(trade_date.strftime("%Y%m%d"), None)
def get_share_hist_returns(self, ts_code: str, trade_date: datetime):
"""获取单个标 trade_date 后的 n bar returns"""
df = self.dc.pro_bar(ts_code=ts_code, start_date=trade_date, end_date=trade_date,
freq='D', asset="E", raw_bar=False)
if df.empty:
return None
else:
assert len(df) == 1
return df.iloc[0].to_dict()
def validate(self, sdt='20200101', edt='20201231'):
"""验证传感器在一段时间内的表现
:param sdt: 开始时间
:param edt: 结束时间
:return:
"""
stocks = self.dc.stock_basic()
trade_cal = self.dc.trade_cal()
trade_cal = trade_cal[(trade_cal.cal_date >= sdt) & (trade_cal.cal_date <= edt) & trade_cal.is_open]
trade_dates = trade_cal.cal_date.to_list()
event = self.strong_event
results = []
for trade_date in trade_dates:
trade_date = pd.to_datetime(trade_date)
min_list_date = (trade_date - timedelta(days=365)).strftime('%Y%m%d')
rows = stocks[stocks.list_date <= min_list_date].to_dict('records')
for row in tqdm(rows, desc=trade_date.strftime('%Y%m%d')):
ts_code = row['ts_code']
try:
s = self.get_share_hist_signals(ts_code, trade_date)
if not s:
continue
n = self.get_share_hist_returns(ts_code, trade_date)
m, f = event.is_match(s)
if m:
res = {
'symbol': ts_code,
'name': row['name'],
'reason': f,
'trade_date': trade_date.strftime("%Y%m%d"),
}
res.update(n)
results.append(res)
print(res)
except:
traceback.print_exc()
df = pd.DataFrame(results)
df_m = df.groupby('trade_date').apply(lambda x: x[['n1b', 'n2b', 'n3b', 'n5b', 'n10b', 'n20b']].mean())
return df, df_m
def report(self, writer: WordWriter):
"""撰写报告"""
raise NotImplementedError
|
import numpy as np
import math
import random
import matplotlib.pyplot as plt
from scipy import stats
# declare number of particles used for object track estimation
particles = 100
# declare arrays
likelihood = np.empty(particles) # calculate likelihood of estimate provided by the particle position
estimated = np.empty(observations) # stores estimated path of the particle
# initial particle position
particle_estimate = np.random.uniform(-0.5,1,(particles))
# particle filter
i = 0
while i <observations:
particle_estimate = particle_estimate + np.random.normal(0,5*sigmax,(particles)) # perturb previous particle position for fresh estimate
j = 0
while j < np.shape(particle_estimate)[0]:
likelihood[j] = math.exp(-5*((m[i]-ax*math.exp(particle_estimate[j]))**2)) # calculate likelihood based on estimated particle position and observation
j = j+1
likelihood = likelihood/np.sum(likelihood) # normalize likelihood
custm = stats.rv_discrete(name='custm', values=(particle_estimate*10000000, likelihood)) # generate distribution from likelihood
particle_estimate = custm.rvs(size=particles)/10000000 # resample particles using generated likelihood
estimated[i] = np.mean(particle_estimate) # estimate particle location
i= i+1
# plotting
plt.plot(x) # original position
plt.plot(estimated) # estimated position
plt.show()
|
#!/usr/bin/python
import math
import numpy as np
import pandas as pd
# random generators
import random
from numpy.random import default_rng
from scipy.stats import t # student distribution
# matplotlib
import matplotlib.pyplot as plt
#-------------------------------------------------------------------------------
def print_ascii_histograms(selected_atoms, atoms, positives, negatives):
pos_histogram = [0 for _ in range(len(selected_atoms)+1)]
neg_histogram = [0 for _ in range(len(selected_atoms)+1)]
for pos in positives:
score = 0
for ind_atom in selected_atoms:
atom = atoms[ind_atom]
if pos[atom[0]] != atom[1]:
score += 1
pos_histogram[score] += 1
for neg in negatives:
score = 0
for ind_atom in selected_atoms:
atom = atoms[ind_atom]
if neg[atom[0]] != atom[1]:
score += 1
neg_histogram[score] += 1
for score in range(len(pos_histogram)):
pos_histogram[score] /= float(len(positives))
for score in range(len(neg_histogram)):
neg_histogram[score] /= float(len(negatives))
# print('positive histogram: ')
# print(pos_histogram)
#
# print('negative histogram:')
# print(neg_histogram)
height = 10
output = ''
for y in range(height, 0, -1):
for score in pos_histogram:
if score*height*2 >= y:
output += '*'
else:
output += ' '
output += '\n'
for score in pos_histogram:
output += '.'
print(output)
output = ''
for y in range(height, 0, -1):
for score in neg_histogram:
if score*height*2 >= y:
output += '*'
else:
output += ' '
output += '\n'
for score in neg_histogram:
output += '.'
print(output)
return
#-------------------------------------------------------------------------------
def compute_atom_scores(nVar, sample_score, sample_uniform, nPos, nNeg):
a = nNeg
b = -nPos
atoms = [(ind_var, 0) for ind_var in range(nVar)]
atoms += [(ind_var, 1) for ind_var in range(nVar)]
atom_scores = []
for ind_atom in range(len(sample_score)):
c = sample_score[ind_atom]*nPos*nNeg
if c < 0:
p1 = (-c/nPos, 0)
p2 = (nPos, nPos+c/nNeg)
else:
p1 = (0, c/nPos)
p2 = (nNeg-c/nPos, nNeg)
v = (p2[0]-p1[0], p2[1]-p1[1])
lateral = sample_uniform[ind_atom]
p_star = (p1[0]+v[0]*lateral, p1[1]+v[1]*lateral)
p_star = (math.floor(p_star[0]), math.floor(p_star[1]))
atom_scores.append(p_star)
return atoms, atom_scores
#-------------------------------------------------------------------------------
def generate_positives_negatives(atom_scores, nVar, nPos, nNeg):
positives = [ [0 for _ in range(nVar)] for _ in range(nPos) ]
negatives = [ [0 for _ in range(nVar)] for _ in range(nNeg) ]
for ind_atom in range(nVar):
score = atom_scores[ind_atom]
# positive examples
pos_indexes = [ind for ind in range(nPos)]
np.random.shuffle(pos_indexes)
for ind_pos in range(nPos):
if ind_pos < score[0]:
positives[pos_indexes[ind_pos]][ind_atom] = 1
else:
positives[pos_indexes[ind_pos]][ind_atom] = 0
# negative examples
neg_indexes = [ind for ind in range(nNeg)]
np.random.shuffle(neg_indexes)
for ind_neg in range(nNeg):
if ind_neg < score[1]:
negatives[neg_indexes[ind_neg]][ind_atom] = 1
else:
negatives[neg_indexes[ind_neg]][ind_atom] = 0
return positives, negatives
#-------------------------------------------------------------------------------
def generate_dataset(nVariables, nPositives, nNegatives, filename):
nPos = 1000
nNeg = 1000
nVar = 10000
nAtoms = nVar*2
random_seed = 46
# random_seed = 52 # worst dataset
random.seed(random_seed)
np.random.seed(random_seed)
# sampling over the normal distribution
rng = default_rng(random_seed)
# mu, sigma = 0, 0.1 # mean and standard deviation
# sample_normal = rng.normal(mu, sigma, nAtoms)
sample_uniform = rng.uniform(0, 1, nVar*2)
# generate the atom scores between 1 and -1 (generate only the score of one of the two atoms)
# (the other one is symmetrical)
df = 10
sample_score = t.rvs(df, size=nVar)
# normal is normalized
max_normal = np.max(sample_score)
min_normal = np.min(sample_score)
# print(min_normal, max_normal)
max_abs = max(max_normal, -min_normal) + 0.2
for ind in range(len(sample_score)):
sample_score[ind] = sample_score[ind]/max_abs
sample_score[ind] *= 0.80
# plot the distributions of the generated values
# fig, axs = plt.subplots(2)
# axs[0].hist(sample_score, edgecolor='k', bins=100)
# axs[1].hist(sample_uniform, edgecolor='k', bins=100)
# generate the atoms and the positive and negative errors of the atoms based on their scores
atoms, atom_scores = compute_atom_scores(nVar, sample_score, sample_uniform, nPos, nNeg)
# generating positive and negative examples
positives, negatives = generate_positives_negatives(atom_scores, nVar, nPos, nNeg)
# create the symmetrical atoms
atom_scores_sym = []
for score in atom_scores:
atom_scores_sym.append( (nPos-score[0], nNeg-score[1]) )
atom_scores += atom_scores_sym
# plot the atom errors
fig,ax = plt.subplots()
ax.scatter([elt[0] for elt in atom_scores], [elt[1] for elt in atom_scores], marker='x')
ax.plot([0, nPos], [0, nNeg], color='k', label='score=0.0')
ax.set_xlim([0, nPos])
ax.set_ylim([0, nNeg])
ax.legend(loc='lower right')
ax.set_xlabel('atom positive error')
ax.set_ylabel('atom negative error')
ax.set_title('positive and negative errors for all the atoms')
ax.set_aspect('equal')
plt.show()
# compute the best 20 atoms to create a rule
ind_atoms_sorted = [ind for ind in range(len(atom_scores))]
score = [-nNeg*error[0] +nPos*error[1] for error in atom_scores]
ind_atoms_sorted.sort(key = lambda ind: score[ind], reverse=True)
selected_atoms = ind_atoms_sorted[:20]
# plot the scores of the atoms as colors
# fig, ax = plt.subplots()
# ax.scatter([atom_scores[ind][0] for ind in ind_atoms_sorted], [atom_scores[ind][1] for ind in ind_atoms_sorted], c=[score[ind] for ind in ind_atoms_sorted], marker='x')
# ax.set_xlim([0, nPos])
# ax.set_ylim([0, nNeg])
# ax.plot([0, nPos], [0, nNeg], color='red')
# plot histograms of the rule with the best 20 atoms
# print_ascii_histograms(selected_atoms, atoms, positives, negatives)
# create a csv file with the matrix (ordered positive and negative examples)
matrix = positives + negatives
variables = ['v_'+str(ind) for ind in range(nVar)]
examples = ['s_' + str(ind) for ind in range(nPos+nNeg)]
# exportation into a dataframe
# global dataframe
dataframe = pd.DataFrame(matrix, columns = variables, index = examples)
dataframe.to_csv(filename)
return
# generate_dataset(10000, 1000, 1000, '')
|
import cv2
import numpy as np
# Load image, create mask, grayscale, and Otsu's threshold
image = cv2.imread('2.png')
mask = np.zeros(image.shape, dtype=np.uint8)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
# Perform morph operations
open_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3))
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, open_kernel, iterations=1)
close_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (9,9))
close = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, close_kernel, iterations=3)
# Find horizontal sections and draw rectangle on mask
horizontal_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (25,3))
detect_horizontal = cv2.morphologyEx(close, cv2.MORPH_OPEN, horizontal_kernel, iterations=2)
cnts = cv2.findContours(detect_horizontal, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
x,y,w,h = cv2.boundingRect(c)
cv2.rectangle(mask, (x, y), (x + w, y + h), (255,255,255), -1)
cv2.rectangle(mask, (x, y), (x + w, y + h), (255,255,255), 2)
# Find vertical sections and draw rectangle on mask
vertical_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3,25))
detect_vertical = cv2.morphologyEx(close, cv2.MORPH_OPEN, vertical_kernel, iterations=2)
cnts = cv2.findContours(detect_vertical, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
x,y,w,h = cv2.boundingRect(c)
cv2.rectangle(mask, (x, y), (x + w, y + h), (255,255,255), -1)
cv2.rectangle(mask, (x, y), (x + w, y + h), (255,255,255), 2)
# Color mask onto original image
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
image[mask==255] = [0,0,0]
cv2.imshow('opening', opening)
cv2.imshow('close', close)
cv2.imshow('image', image)
cv2.imshow('thresh', thresh)
cv2.imshow('mask', mask)
cv2.waitKey()
|
import yaml
import pandas as pd
import numpy as np
import os
import yaml
this_dir, this_filename = os.path.split(os.path.abspath(__file__))
DATA_PATH = os.path.join(this_dir, '..', "data")
def load_mismatch_scores(name_or_path):
""" Loads a formatted penalty matrix.
Parameters
----------
name_or_path : str
Specific named reference or path/to/yaml definition
Returns
-------
matrix : pd.DataFrame
Scores for specific binding pairs
pams : pd.Series
Encoding for the second 2 NT in the PAM
"""
if name_or_path == 'CFD':
path = os.path.join(DATA_PATH, 'models', 'CFD.yaml')
else:
path = name_or_path
with open(path) as handle:
obj = next(yaml.load_all(handle))
matrix = pd.DataFrame(obj['scores'])
matrix = matrix.reindex(columns=sorted(matrix.columns))
pams = pd.Series(obj['pams'])
pams = pams.reindex(sorted(pams.index))
return matrix, pams
|
# Copyright (C) 2015 SlimRoms Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import os
def get_routes(base_module_name, base_module_file, app_name=''):
"""get all the routes within the apps director, adding the required
base urls.
Each python file in the apps directory is assumed to require a route
adding. Rather than mapping each route out in one place, this will
allow an app to have their own area, and this routine will prepend
the apps route with a base route.
Example:
apps/example.py contains a request handler called simple.
A global variable must be created, called app_router.
To this, an iterable must be set:
app_router = (
('path', ExampleHandler),
)
add_routes will detect this, and add it to routers, which is then
returned after everything is done.
:param base_module_name: __name__ of the calling module
:param base_module_file: __file__ of the calling module
"""
app_dir = os.path.join(os.path.dirname(base_module_file), 'apps')
# Don't bother checking here. let it fall through
routers = []
for py_file in os.listdir(app_dir):
if (py_file.startswith('_') or not py_file.endswith('.py') or
os.path.isdir(os.path.join(app_dir, py_file))):
continue
name = py_file[:-3]
if app_name and name.lower() != app_name.lower():
continue
app = importlib.import_module('.apps.%s' % name, base_module_name)
if not hasattr(app, 'app_router'):
continue
app_router = getattr(app, 'app_router', [])
for router in app_router:
r = list(router)
r[0] = '/%s/%s' % (name.lower(), router[0].lstrip('/'))
routers.append(tuple(r))
if app_name: # it was a match, or it we aren't here
break
return routers
|
# version
VERSION_NAME = 'versionName'
VERSION_CODE = 'versionCode'
|
from abc import ABC
import gym
from gym import spaces
from gym.utils import seeding
from gym.envs.registration import register
import numpy as np
import heapq
import time
import random
import json
import os
import sys
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from common import sender_obs, config
from common.simple_arg_parse import arg_or_default
from loguru import logger
MAX_RATE = 1000
MIN_RATE = 40
REWARD_SCALE = 0.001
MAX_STEPS = 400
EVENT_TYPE_SEND = 'S'
EVENT_TYPE_ACK = 'A'
BYTES_PER_PACKET = 1500
LATENCY_PENALTY = 1.0
LOSS_PENALTY = 1.0
class Link:
def __init__(self, bandwidth, delay, queue_size, loss_rate):
self.bw = float(bandwidth)
self.dl = delay
self.lr = loss_rate
self.queue_delay = 0.0
self.queue_delay_update_time = 0.0
self.max_queue_delay = queue_size / self.bw
# logger.info(f'bandwidth={bandwidth}|delay={delay}|max_queue_delay={self.max_queue_delay}|loss_rate={loss_rate}')
def get_cur_queue_delay(self, event_time):
return max(0.0, self.queue_delay - (event_time - self.queue_delay_update_time))
def get_cur_latency(self, event_time):
return self.dl + self.get_cur_queue_delay(event_time)
def packet_enters_link(self, event_time):
if (random.random() < self.lr):
return False
self.queue_delay = self.get_cur_queue_delay(event_time)
self.queue_delay_update_time = event_time
extra_delay = 1.0 / self.bw
# print("Extra delay: %f, Current delay: %f, Max delay: %f" % (extra_delay, self.queue_delay, self.max_queue_delay))
if extra_delay + self.queue_delay > self.max_queue_delay:
# print("\tDrop!")
return False
self.queue_delay += extra_delay
# print("\tNew delay = %f" % self.queue_delay)
return True
def print_debug(self):
print("Link:")
print("Bandwidth: %f" % self.bw)
print("Delay: %f" % self.dl)
print("Queue Delay: %f" % self.queue_delay)
print("Max Queue Delay: %f" % self.max_queue_delay)
print("One Packet Queue Delay: %f" % (1.0 / self.bw))
def reset(self):
self.queue_delay = 0.0
self.queue_delay_update_time = 0.0
class Network:
def __init__(self, senders, links, throughput_coef: float, latency_coef: float, loss_coef: float, special_loss):
self.q = []
self.cur_time = 0.0
self.senders = senders
self.links = links
self.throughput_coef = throughput_coef
self.latency_coef = latency_coef
self.loss_coef = loss_coef
self.special_loss = special_loss
self.queue_initial_packets()
def queue_initial_packets(self):
for sender in self.senders:
sender.register_network(self)
sender.reset_obs()
heapq.heappush(self.q, (1.0 / sender.rate, sender, EVENT_TYPE_SEND, 0, 0.0, False))
def run_for_dur(self, dur):
for sender in self.senders:
sender.reset_obs()
end_time = self.cur_time + dur
while self.cur_time < end_time:
event_time, sender, event_type, next_hop, cur_latency, dropped = heapq.heappop(self.q)
self.cur_time = event_time
new_event_time = event_time
new_event_type = event_type
new_next_hop = next_hop
new_latency = cur_latency
new_dropped = dropped
push_new_event = False
if event_type == EVENT_TYPE_ACK:
if next_hop == len(sender.path):
if dropped:
sender.on_packet_lost()
else:
sender.on_packet_acked(cur_latency)
else:
new_next_hop = next_hop + 1
link_latency = sender.path[next_hop].get_cur_latency(self.cur_time)
new_latency += link_latency
new_event_time += link_latency
push_new_event = True
if event_type == EVENT_TYPE_SEND:
if next_hop == 0:
# print("Packet sent at time %f" % self.cur_time)
sender.on_packet_sent()
push_new_event = True
heapq.heappush(self.q,
(self.cur_time + (1.0 / sender.rate), sender, EVENT_TYPE_SEND, 0, 0.0, False))
else:
push_new_event = True
if next_hop == sender.dest:
new_event_type = EVENT_TYPE_ACK
new_next_hop = next_hop + 1
link_latency = sender.path[next_hop].get_cur_latency(self.cur_time)
new_latency += link_latency
new_event_time += link_latency
new_dropped = not sender.path[next_hop].packet_enters_link(self.cur_time)
if push_new_event:
heapq.heappush(self.q, (new_event_time, sender, new_event_type, new_next_hop, new_latency, new_dropped))
throughputs = []
latencys = []
losses = []
for sender in self.senders:
sender_mi = sender.get_run_data()
throughputs.append(sender_mi.get("recv rate"))
latencys.append(sender_mi.get("avg latency"))
losses.append(sender_mi.get("loss ratio"))
if self.special_loss is None:
reward = self.throughput_coef * sum(throughputs) / (8 * BYTES_PER_PACKET) + \
self.latency_coef * sum(latencys) + \
self.loss_coef * sum(losses)
elif self.special_loss == 'fairness1':
higher_throughput = throughputs[0] if throughputs[0] > throughputs[1] else throughputs[1]
lower_throughput = throughputs[0] if throughputs[0] < throughputs[1] else throughputs[1]
reward = self.throughput_coef * (sum(throughputs) - (higher_throughput-lower_throughput)) / (8 * BYTES_PER_PACKET) + \
self.latency_coef * sum(latencys) + \
self.loss_coef * sum(losses)
elif self.special_loss == 'fairness2':
higher_throughput = throughputs[0] if throughputs[0] > throughputs[1] else throughputs[1]
lower_throughput = throughputs[0] if throughputs[0] < throughputs[1] else throughputs[1]
if higher_throughput == lower_throughput:
reward = self.throughput_coef * (higher_throughput + lower_throughput) / (8 * BYTES_PER_PACKET) + \
self.latency_coef * sum(latencys) + \
self.loss_coef * sum(losses)
else:
reward = self.throughput_coef * ((higher_throughput + lower_throughput) /
(higher_throughput - lower_throughput)) / (8 * BYTES_PER_PACKET) + \
self.latency_coef * sum(latencys) + \
self.loss_coef * sum(losses)
return reward * REWARD_SCALE
class Sender:
def __init__(self, rate, path, dest, features, history_len=10):
self.id = Sender._get_next_id()
self.starting_rate = rate
self.rate = rate
self.sent = 0
self.acked = 0
self.lost = 0
self.bytes_in_flight = 0
self.min_latency = None
self.rtt_samples = []
self.sample_time = []
self.net = None
self.path = path
self.dest = dest
self.history_len = history_len
self.features = features
self.history = sender_obs.SenderHistory(self.history_len,
self.features, self.id)
logger.info(f'Sender id={self.id}|starting_rate={self.starting_rate}|path={path}|dest={dest}')
_next_id = 0
@staticmethod
def _get_next_id():
id = Sender._next_id
Sender._next_id += 1
return id
def apply_rate_delta(self, delta):
delta *= config.DELTA_SCALE
if delta >= 0.0:
self.set_rate(self.rate * (1.0 + delta))
else:
self.set_rate(self.rate / (1.0 - delta))
def register_network(self, net):
self.net = net
def on_packet_sent(self):
self.sent += 1
self.bytes_in_flight += BYTES_PER_PACKET
def on_packet_acked(self, rtt):
self.acked += 1
self.rtt_samples.append(rtt)
if (self.min_latency is None) or (rtt < self.min_latency):
self.min_latency = rtt
self.bytes_in_flight -= BYTES_PER_PACKET
def on_packet_lost(self):
self.lost += 1
self.bytes_in_flight -= BYTES_PER_PACKET
def set_rate(self, new_rate):
self.rate = new_rate
if self.rate > MAX_RATE:
self.rate = MAX_RATE
if self.rate < MIN_RATE:
self.rate = MIN_RATE
def record_run(self):
smi = self.get_run_data()
self.history.step(smi)
def get_obs(self):
return self.history.as_array()
def get_run_data(self):
obs_end_time = self.net.cur_time
return sender_obs.SenderMonitorInterval(
self.id,
bytes_sent=self.sent * BYTES_PER_PACKET,
bytes_acked=self.acked * BYTES_PER_PACKET,
bytes_lost=self.lost * BYTES_PER_PACKET,
send_start=self.obs_start_time,
send_end=obs_end_time,
recv_start=self.obs_start_time,
recv_end=obs_end_time,
rtt_samples=self.rtt_samples,
packet_size=BYTES_PER_PACKET
)
def reset_obs(self):
self.sent = 0
self.acked = 0
self.lost = 0
self.rtt_samples = []
self.obs_start_time = self.net.cur_time
def print_debug(self):
print("Sender:")
print("Obs: %s" % str(self.get_obs()))
print("Rate: %f" % self.rate)
print("Sent: %d" % self.sent)
print("Acked: %d" % self.acked)
print("Lost: %d" % self.lost)
print("Min Latency: %s" % str(self.min_latency))
def reset(self):
print(f"Resetting sender {self.id}!")
self.rate = self.starting_rate
self.bytes_in_flight = 0
self.min_latency = None
self.reset_obs()
self.history = sender_obs.SenderHistory(self.history_len, self.features, self.id)
def __gt__(self, sender2):
return False
class SimulatedNetworkEnv(gym.Env, ABC):
def __init__(
self,
bw=None, min_bw=100, max_bw=500,
latency=None, min_latency=0.05, max_latency=0.5,
queue=None, min_queue=0, max_queue=8,
loss=None, min_loss=0.0, max_loss=0.05,
min_send_rate_factor=0.3, max_send_rate_factor=1.5,
throughput_coef=10.0, latency_coef= -1e3, loss_coef= -2e3,
mi_len=None, episode_len=MAX_STEPS,
special_loss=None,
history_len=arg_or_default(
"--history-len", default=10
),
features=arg_or_default(
"--input-features",
default="sent latency inflation,latency ratio,send ratio"
)
):
if bw is None:
self.min_bw, self.max_bw = min_bw, max_bw
else:
self.min_bw, self.max_bw = bw, bw
if latency is None:
self.min_lat, self.max_lat = min_latency, max_latency
else:
self.min_lat, self.max_lat = latency, latency
if queue is None:
self.min_queue, self.max_queue = min_queue, max_queue
else:
self.min_queue, self.max_queue = queue, queue
if loss is None:
self.min_loss, self.max_loss = min_loss, max_loss
else:
self.min_loss, self.max_loss = loss, loss
self.min_send_rate_factor = min_send_rate_factor
self.max_send_rate_factor = max_send_rate_factor
self.throughput_coef = throughput_coef
self.latency_coef = latency_coef
self.loss_coef = loss_coef
self.special_loss = special_loss
self.mi_len = mi_len
self.episode_len = episode_len
self.reward_sum = 0.0
self.reward_ewma = 0.0
self.episodes_run = -1
self.done = True
self.history_len = history_len
self.features = features.split(",")
self.action_space = spaces.Box(np.array([-1e12, -1e12]), np.array([1e12, 1e12]), dtype=np.float32)
single_obs_min_vec = sender_obs.get_min_obs_vector(self.features)
single_obs_max_vec = sender_obs.get_max_obs_vector(self.features)
self.observation_space = spaces.Box(
np.tile(single_obs_min_vec, self.history_len * 2), # 2 for two senders
np.tile(single_obs_max_vec, self.history_len * 2), # 2 for two senders
dtype=np.float32
)
def _get_all_sender_obs(self):
sender_obs = []
for i in range(0, 2):
sender_obs.append(self.senders[i].get_obs())
sender_obs = np.stack(sender_obs)
sender_obs = np.array(sender_obs).reshape(-1, )
return sender_obs
def step(self, action):
for i in range(0, 2):
self.senders[i].apply_rate_delta(action[i])
reward = self.net.run_for_dur(self.mi_len)
for sender in self.senders:
sender.record_run()
self.steps_taken += 1
event = {"Name": "Step", "Time": self.steps_taken, "Reward": reward}
for i, sender in enumerate(self.senders):
sender_mi = sender.get_run_data()
event[f"Send Rate {i}"] = sender_mi.get("send rate")
event[f"Throughput {i}"] = sender_mi.get("recv rate")
event[f"Latency {i}"] = sender_mi.get("avg latency")
event[f"Loss Rate {i}"] = sender_mi.get("loss ratio")
event[f"Latency Inflation {i}"] = sender_mi.get("sent latency inflation")
event[f"Latency Ratio {i}"] = sender_mi.get("latency ratio")
event[f"Send Ratio {i}"] = sender_mi.get("send ratio")
self.event_record["Events"].append(event)
self.reward_sum += reward
sender_obs = self._get_all_sender_obs()
self.done = self.steps_taken >= self.episode_len
return sender_obs, reward, self.done, {}
def print_debug(self):
print("---Link Debug---")
for link in self.links:
link.print_debug()
print("---Sender Debug---")
for sender in self.senders:
sender.print_debug()
def create_new_links_and_senders(self):
bw = random.uniform(self.min_bw, self.max_bw)
lat = random.uniform(self.min_lat, self.max_lat)
queue = 1 + int(np.exp(random.uniform(self.min_queue, self.max_queue)))
loss = random.uniform(self.min_loss, self.max_loss)
logger.info(f'bw={bw}|lat={lat}|queue={queue}|loss={loss}')
self.links = [
Link(bw, lat, queue, loss),
Link(bw, lat, queue, loss),
]
self.senders = [
Sender(random.uniform(self.min_send_rate_factor, self.max_send_rate_factor) * bw,
[self.links[0], self.links[1]], 0, self.features, history_len=self.history_len),
Sender(random.uniform(self.min_send_rate_factor, self.max_send_rate_factor) * bw,
[self.links[0], self.links[1]], 0, self.features, history_len=self.history_len)
]
if self.mi_len is None:
self.mi_len = 3 * lat
def reset(self):
# if not self.done:
# raise ValueError('Agent called reset before the environment has done') # TODO: Due to the agent existing a training iteration without finishing the episode
self.done = False
self.create_new_links_and_senders()
self.net = Network(self.senders, self.links, throughput_coef=self.throughput_coef, latency_coef=self.latency_coef, loss_coef=self.loss_coef, special_loss=self.special_loss)
self.steps_taken = 0
self.episodes_run += 1
if self.episodes_run > 0 and self.episodes_run % 100 == 0:
self.dump_events_to_file("pcc_env_log_run_%d.json" % self.episodes_run)
self.event_record = {"Events": []}
self.net.run_for_dur(self.mi_len)
self.reward_ewma *= 0.99
self.reward_ewma += 0.01 * self.reward_sum
logger.info("Reward: %0.2f, Ewma Reward: %0.2f" % (self.reward_sum, self.reward_ewma))
self.reward_sum = 0.0
return self._get_all_sender_obs()
def dump_events_to_file(self, filename):
logger.info(f'dump events to file: {filename}')
with open(filename, 'w') as f:
json.dump(self.event_record, f, indent=4)
# Default mode (legacy)
register(id='PccNs-v10', entry_point='network_sim_2_senders:SimulatedNetworkEnv')
# Zero random loss set-up
register(id='PccNs-v11', entry_point='network_sim_2_senders:SimulatedNetworkEnv', kwargs={'loss': 0.0})
register(id='PccNs_eval-v11', entry_point='network_sim_2_senders:SimulatedNetworkEnv', kwargs={'loss': 0.0,
'mi_len': 10.0})
# Zero congestive loss set-up (very large queue)
register(id='PccNs-v12', entry_point='network_sim_2_senders:SimulatedNetworkEnv', kwargs={'queue': 8})
# Starting sending rate always equal to half the link bw
register(id='PccNs-v13', entry_point='network_sim_2_senders:SimulatedNetworkEnv', kwargs={'min_send_rate_factor': 0.5,
'max_send_rate_factor': 0.5})
# Same as PccNs-v13, but with no random loss at all
register(id='PccNs-v14', entry_point='network_sim_2_senders:SimulatedNetworkEnv', kwargs={'loss': 0.0,
'min_send_rate_factor': 0.5,
'max_send_rate_factor': 0.5})
# Same as PccNs-v14, but specifically with low Latency reward.
register(id='PccNs-v15', entry_point='network_sim_2_senders:SimulatedNetworkEnv', kwargs={'loss': 0.0,
'min_send_rate_factor': 0.5,
'max_send_rate_factor': 0.5,
'throughput_coef': 2.0,
'latency_coef': -1e3,
'loss_coef': -2e3})
# Same as PccNs-v14, but with long Monitor Intervals of 3[s] (regular mode is 3 * link latency)
register(id='PccNs-v16', entry_point='network_sim_2_senders:SimulatedNetworkEnv', kwargs={'loss': 0.0,
'min_send_rate_factor': 0.5,
'max_send_rate_factor': 0.5,
'mi_len': 3.0})
# Same as PccNs-v13, but with long episodes (800 MIs)
register(id='PccNs-v17', entry_point='network_sim_2_senders:SimulatedNetworkEnv', kwargs={'loss': 0.0,
'min_send_rate_factor': 0.5,
'max_send_rate_factor': 0.5,
'episode_len': 800})
# Same as PccNs-v11, but specifically with low Latency reward.
register(id='PccNs-v18', entry_point='network_sim_2_senders:SimulatedNetworkEnv', kwargs={'loss': 0.0,
'throughput_coef': 2.0,
'latency_coef': -1e3,
'loss_coef': -2e3})
register(id='PccNs_eval-v18', entry_point='network_sim_2_senders:SimulatedNetworkEnv', kwargs={'loss': 0.0,
'throughput_coef': 2.0,
'latency_coef': -1e3,
'loss_coef': -2e3,
'mi_len': 10.0})
# Same as PccNs-v11, but with special loss to enforce fairness between agents. The idea is to panelize the reward on the difference between the throughputs.
register(id='PccNs-v19', entry_point='network_sim_2_senders:SimulatedNetworkEnv', kwargs={'loss': 0.0,
'special_loss': 'fairness1'})
register(id='PccNs_eval-v19', entry_point='network_sim_2_senders:SimulatedNetworkEnv', kwargs={'loss': 0.0,
'special_loss': 'fairness1',
'mi_len': 10.0})
# Same as PccNs-v18, but with special loss to enforce fairness between agents
register(id='PccNs-v20', entry_point='network_sim_2_senders:SimulatedNetworkEnv', kwargs={'loss': 0.0,
'throughput_coef': 2.0,
'latency_coef': -1e3,
'loss_coef': -2e3,
'special_loss': 'fairness1'})
register(id='PccNs_eval-v20', entry_point='network_sim_2_senders:SimulatedNetworkEnv', kwargs={'loss': 0.0,
'throughput_coef': 2.0,
'latency_coef': -1e3,
'loss_coef': -2e3,
'special_loss': 'fairness1',
'mi_len': 10.0})
# Same as PccNs-v19, but with another special loss, designed to keep x-y in scale with x+y
register(id='PccNs-v21', entry_point='network_sim_2_senders:SimulatedNetworkEnv', kwargs={'loss': 0.0,
'special_loss': 'fairness2'})
register(id='PccNs_eval-v21', entry_point='network_sim_2_senders:SimulatedNetworkEnv', kwargs={'loss': 0.0,
'special_loss': 'fairness2',
'mi_len': 10.0})
# Same as PccNs-v20, but with another special loss, designed to keep x-y in scale with x+y
register(id='PccNs-v22', entry_point='network_sim_2_senders:SimulatedNetworkEnv', kwargs={'loss': 0.0,
'throughput_coef': 2.0,
'latency_coef': -1e3,
'loss_coef': -2e3,
'special_loss': 'fairness2'})
register(id='PccNs_eval-v22', entry_point='network_sim_2_senders:SimulatedNetworkEnv', kwargs={'loss': 0.0,
'throughput_coef': 2.0,
'latency_coef': -1e3,
'loss_coef': -2e3,
'special_loss': 'fairness2',
'mi_len': 10.0})
|
import socket
import struct
import threading
import platform
msgDecoded = b""
filenameDecoded = ""
def send(filepath:str, socket:socket.socket):
try:
fObj = open(filepath, "rb")
except Exception as err:
print(err)
return
f = fObj.read()
bLenFilecontents = struct.pack("!I", len(f))
if platform.system() == "Windows":
filename = filepath.split("\\")
else:
filename = filepath.split("/")
bLenFilename = struct.pack("!I", len(filename[0]))
bMessage = bLenFilename + filename[len(filename) - 1].encode("utf8") + bLenFilecontents + f
socket.send(bMessage)
def receive(socket:socket.socket) -> tuple:
def worker(socket):
global msgDecoded
global filenameDecoded
bMessage = socket.recv(1024)
filenameLen = struct.unpack("!I", bMessage[:4])[0]
filenameDecoded = bMessage[4:4 + filenameLen]
filenameDecoded = filenameDecoded.decode("utf8")
msgLen = struct.unpack("!I", bMessage[4 + filenameLen:4 + filenameLen + 4])[0]
msgDecoded = bMessage[4 + filenameLen + 4:4 + filenameLen + 4 + msgLen]
while True:
if len(msgDecoded) < msgLen:
bMessage = socket.recv(1024)
msgDecoded += bMessage
print(str(len(msgDecoded)) + " / " + str(msgLen) + " kb")
else:
break
WorkerThread = threading.Thread(target=worker, kwargs={"socket":socket})
WorkerThread.start()
return msgDecoded, filenameDecoded
|
'''
Created on Jun 24, 2017
@author: lawrencezeng
'''
def is_goal(position, goal):
for i in xrange(0, len(goal)):
for j in xrange(0, len(goal[0])):
if position[i + 1][j + 1] != goal[i][j]:
return False
return True
def mismatched_tiles(position, goal):
""" Return number of mismatched tiles"""
mismatched = 0
for i in xrange(0, len(goal)):
for j in xrange(0, len(goal[0])):
if position[i + 1][j + 1] != goal[i][j]:
mismatched = mismatched + 1
return mismatched
def legal_moves(position, current_space_cell = None):
space_cell = current_space_cell or _find_space_tile(position)
moves = list()
_append_move_if_valid(moves, _above(space_cell), space_cell)
_append_move_if_valid(moves, _left_of(space_cell), space_cell)
_append_move_if_valid(moves, _right_of(space_cell), space_cell)
_append_move_if_valid(moves, _below(space_cell), space_cell)
return moves
def _find_space_tile(position):
for i, row in enumerate(position):
for j, tile in enumerate(row):
if (tile == ' '):
return [i, j]
def _left_of(cell):
x, y = cell
return [x, y - 1]
def _right_of(cell):
x, y = cell
return [x, y + 1]
def _above(cell):
x, y = cell
return [x - 1, y]
def _below(cell):
x, y = cell
return [x + 1, y]
def _is_valid_cell(cell):
x, y = cell
return (( x >= 0 and x < 5) and ( y >= 0 and y < 5))
def _append_move_if_valid(moves, from_cell, to_cell):
if (_is_valid_cell(from_cell)):
move = [from_cell, to_cell]
moves.append(move)
def move(a_postion, a_move):
from_xy, to_xy = a_move
fr_x, fr_y = from_xy
to_x, to_y = to_xy
after_position = list(a_postion)
after_position[to_x] = _replace(after_position[to_x], to_y, a_postion[fr_x][fr_y])
if fr_x == to_x:
after_position[fr_x][fr_y] = a_postion[to_x][to_y]
else:
after_position[fr_x] = _replace(after_position[fr_x], fr_y, a_postion[to_x][to_y])
return after_position
def _replace(row, i, value):
copied_row = list(row)
copied_row[i] = value
return copied_row
|
'''
Nutanix REST API Bootcamp.
Lesson05.
Complex sample.
Create Image(vDisk or ISO) from NFS Server.
'''
import time
import json
import requests
import urllib3
from urllib3.exceptions import InsecureRequestWarning
urllib3.disable_warnings(InsecureRequestWarning)
IP = '10.149.20.41'
USER = 'admin'
PASSWORD = 'Nutanix/4u123!'
NFS_IP = '10.149.245.50'
NFS_PORT = 2049
#IMAGE_URL = 'nfs://10.149.245.50/Public/bootcamp/centos7_min_raw'
IMAGE_URL = 'nfs://10.149.245.50/Public/bootcamp/centos7_min.iso'
#IMAGE_NAME = 'IMG_CENT7_REST'
IMAGE_NAME = 'ISO_CENT7_REST'
CONTAINER_NAME = 'container'
# (0) Check whether NFS Server is open or not.
import socket
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((NFS_IP, NFS_PORT))
s.shutdown(2)
print('Able to connect to NFS Server {}:{} from this PC.'.format(NFS_IP, NFS_PORT))
except:
print('Unable to connect to NFS Server {}:{} from this PC.'.format(NFS_IP, NFS_PORT))
print('Abort')
exit()
print('Make session to Nutanix Server')
session = requests.Session()
session.auth = (USER, PASSWORD)
session.verify = False
session.headers.update({'Content-Type': 'application/json; charset=utf-8'})
# (1) GET CONTAINER UUID
print('(1) Get container UUID')
url = 'https://{}:9440/PrismGateway/services/rest/v1/containers'.format(IP)
response = session.get(url)
if not response.ok:
print('Abort. response code is not 200')
print('response.text')
exit(1)
#print(json.dumps(json.loads(response.text), indent=2))
d = json.loads(response.text)
#print(json.dumps(d, indent=2))
container_uuid = ''
for container in d['entities']:
if container['name'] == CONTAINER_NAME:
container_uuid = container['containerUuid']
if container_uuid == '':
print('Abort. Container "{}" doesn\'t exist'.format(CONTAINER_NAME))
exit(1)
print('uuid={}'.format(container_uuid))
print()
# (2) Create image and get Task UUID
print('(2) Create image from NFS Server to the container and get the task UUID')
is_iso = IMAGE_URL.lower().endswith('.iso')
image_type = 'ISO_IMAGE' if is_iso else 'DISK_IMAGE'
print('imageType={}'.format(image_type))
body_dict = {
"name": IMAGE_NAME,
"annotation": "",
"imageType": image_type,
"imageImportSpec": {
"containerUuid": container_uuid,
"url": IMAGE_URL,
}
}
body_text = json.dumps(body_dict)
url = 'https://{}:9440/api/nutanix/v0.8/images'.format(IP)
response = session.post(url , data=body_text)
if not response.ok:
exit(1)
print('image creation task was created.')
d = json.loads(response.text)
task_uuid = d['taskUuid']
print('task_uuid={}'.format(task_uuid))
print()
# (3) Polling Task status till image creation task finished
print('(3) Polling image creation task status till image creattion task finish.')
url = 'https://{}:9440/api/nutanix/v0.8/tasks/{}'.format(IP, task_uuid)
while True:
response = session.get(url)
if not response.ok:
exit(1)
d = json.loads(response.text)
task_name = d['metaRequest']['methodName']
task_percent = d.get('percentageComplete', 0)
task_status = d['progressStatus']
print('Task name:{}, Status:{}, Percent:{}'.format(task_name, task_status, task_percent))
if task_percent == 100:
break
time.sleep(0.5)
print('finish.')
|
"""Test evaluation base."""
# pylint: disable=import-error,protected-access
from unittest.mock import AsyncMock
from supervisor.coresys import CoreSys
from supervisor.resolution.const import ContextType, SuggestionType
from supervisor.resolution.data import Suggestion
from supervisor.resolution.fixups.create_full_snapshot import FixupCreateFullSnapshot
async def test_fixup(coresys: CoreSys):
"""Test fixup."""
create_full_snapshot = FixupCreateFullSnapshot(coresys)
assert not create_full_snapshot.auto
coresys.resolution.suggestions = Suggestion(
SuggestionType.CREATE_FULL_SNAPSHOT, ContextType.SYSTEM
)
mock_snapshots = AsyncMock()
coresys.snapshots.do_snapshot_full = mock_snapshots
await create_full_snapshot()
mock_snapshots.assert_called()
assert len(coresys.resolution.suggestions) == 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.