content
stringlengths 5
1.05M
|
|---|
import sys
import numpy as np
def main(argv):
matrix_file = argv[1]
output_filename = argv[2]
matrix = np.loadtxt(matrix_file)
def f(x): return 1 / float(x)
f = np.vectorize(f)
matrix = f(matrix)
transformed_matrix = [([0] * len(matrix[0])) for _ in xrange(len(matrix[0]))]
for i, row in enumerate(matrix):
for j, col in enumerate(row):
transformed_matrix[i][j] = matrix[i][j] + matrix[j][i]
np.savetxt(output_filename, np.array(transformed_matrix), fmt='%.10f')
if __name__ == '__main__':
main(sys.argv)
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from google.appengine.ext import ndb
from gallery.gallery_model import Gallery
from config.template_middleware import TemplateResponse
from gaecookie.decorator import no_csrf
from routes.gallerys import edit
from routes.gallerys.new import salvar
from tekton.gae.middleware.redirect import RedirectResponse
from tekton.router import to_path
from gaepermission.decorator import login_not_required
@login_not_required
@no_csrf
def index():
query = Gallery.query_order_by_name()
edit_path_base = to_path(edit)
deletar_path_base = to_path(deletar)
gallerys = query.fetch()
for cat in gallerys:
key = cat.key
key_id = key.id()
cat.edit_path = to_path(edit_path_base, key_id)
cat.deletar_path = to_path(deletar_path_base, key_id)
ctx = {'salvar_path': to_path(salvar),
'gallerys': gallerys}
return TemplateResponse(ctx, 'gallerys/gallery_home.html')
@login_not_required
def deletar(student_id):
key = ndb.Key(Gallery, int(student_id))
key.delete()
return RedirectResponse(index)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-10-03 18:18
from __future__ import unicode_literals
from django.db import migrations
def backwards_data(apps, schema_editor):
pass
def load_data(apps, schema_editor):
questionnaire_model = apps.get_model("experiments", "Questionnaire")
for questionnaire in questionnaire_model.objects.all():
if not questionnaire.code:
questionnaire.code = 'Q' + str(questionnaire.id)
questionnaire.save()
class Migration(migrations.Migration):
dependencies = [
('experiments', '0063_questionnaire_code'),
]
operations = [
migrations.RunPython(load_data, backwards_data)
]
|
#
import numpy as np
import itertools
import sys
import pdb
from itertools import product
import glob
import os
import math
path='./'
filename = os.path.join(path, '*.out')
for fname in glob.glob(filename):
print fname
P0 = []
P1 = []
P2 = []
with open(fname) as gout:
for line in gout:
if 'DIRECT LATTICE VECTORS CARTESIAN COMPONENTS (ANGSTROM)' in line:
final_optimized_geometry = True
done = gout.next()
done = gout.next()
p00 = done.split()[0]
P0.append(p00)
p01 = done.split()[1]
P0.append(p01)
p02 = done.split()[2]
P0.append(p02)
done = gout.next()
p10 = done.split()[0]
P1.append(p10)
p11 = done.split()[1]
P1.append(p11)
p12 = done.split()[2]
P1.append(p12)
done = gout.next()
p20 = done.split()[0]
P2.append(p20)
p21 = done.split()[1]
P2.append(p21)
p22 = done.split()[2]
P2.append(p22)
P0 = np.array(P0)
P1 = np.array(P1)
P2 = np.array(P2)
P0 = P0.astype(np.float)
P1 = P1.astype(np.float)
P2 = P2.astype(np.float)
A = np.vstack((P0, P1, P2))
print 'A array = ', A
# Alternatively, you can provide here the direct matrix lattice vectors (primitive cell):
# Aragonite:
#A =np.array([[0.496160000000e+01, 0.000000000000e+00 , 0.000000000000e+00],
# [0.000000000000e+00, 0.797050000000e+01, 0.000000000000e+00],
# [0.000000000000e+00, 0.000000000000e+00, 0.573940000000e+01]])
def unit_vector(vector):
""" Returns the unit vector of the vector. """
return vector / np.linalg.norm(vector)
def angle_between(v1, v2):
""" Returns the angle in radians between vectors 'v1' and 'v2'::
>>> angle_between((1, 0, 0), (0, 1, 0))
1.5707963267948966
>>> angle_between((1, 0, 0), (1, 0, 0))
0.0
>>> angle_between((1, 0, 0), (-1, 0, 0))
3.141592653589793
"""
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
vector = P0
print 'unit_vector(vector) = ', unit_vector(vector)
#sys.exit()
# Supercell expansion matrix generator:
K = 3
N = 3
E = [np.reshape(np.array(i), (K, N)) for i in itertools.product([0, 1, -1, 2, -2], repeat = K*N)]
tol_1=10
tol_2=2
tol_3=50
print "tol_1 = ", tol_1
print "tol_2 = ", tol_2
print 'type(E) = ', type(E) # Each E candidate is saved in a list
print 'len(E) = ', len(E) # No. combinations = (#integers)**9
for indx_E in E:
A_SC = np.dot(indx_E,A)
a1_SC = np.linalg.norm(A_SC[0])
a2_SC = np.linalg.norm(A_SC[1])
a3_SC = np.linalg.norm(A_SC[2])
det_indx_E = np.linalg.det(indx_E)
# If you want to print each iteration, uncomment this block:
# print 'a1_SC = ', a1_SC
# print 'a2_SC = ', a2_SC
# print 'a3_SC = ', a3_SC
# print 'det_indx_E = ', det_indx_E
# print abs(a1_SC - a2_SC) == tol_2 # All False, thus we have to use <=
# print abs(a1_SC - a2_SC) <= tol_2
# Calculation of angles:
alpha = angle_between(A_SC[1], A_SC[2])
beta = angle_between(A_SC[0], A_SC[2])
gamma = angle_between(A_SC[0], A_SC[1])
alpha_deg = alpha*180/math.pi
beta_deg = beta*180/math.pi
gamma_deg = gamma*180/math.pi
if a1_SC > tol_1\
and a2_SC > tol_1\
and a3_SC > tol_1\
and abs(a1_SC - a2_SC) <= tol_2\
and abs(a1_SC - a3_SC) <= tol_2\
and abs(a2_SC - a3_SC) <= tol_2\
\
and abs(alpha_deg - beta_deg) <= tol_3\
and abs(alpha_deg - gamma_deg) <= tol_3\
and abs(beta_deg - gamma_deg) <= tol_3\
\
and det_indx_E > 0.0:
print 'A_SC = ', A_SC
print 'a1_SC = ', a1_SC
print 'a2_SC = ', a2_SC
print 'a3_SC = ', a3_SC
print 'alpha_deg = ', alpha_deg
print 'beta_deg = ', beta_deg
print 'gamma_deg = ', gamma_deg
print 'det_indx_E = ', det_indx_E
E_sol = np.dot(A_SC, np.linalg.inv(A))
E_sol_int = E_sol.astype(int)
print 'Supercell Expansion Matrix = '
print('\n'.join([''.join(['{:4}'.format(item) for item in row])
for row in E_sol_int]))
print 'END ++++++++++'
#
# Redirect to an output, i.e python *py > *out
# Search for the candidate in the *out as:
# pcregrep -M "\[\[ 1 2 1].*\n.*\[ 0 2 1].*\n.*\[-1 0 1]]" calcite_14__tol2_2.out
# BETTER:
# pcregrep -n -M " 0 0 -1.*\n.* -1 1 0.*\n.* 2 -2 0" calcite_14__tol2_2.out
|
# -*- coding: utf-8 -*-
"""Utilities for high-level API."""
import json
from typing import Optional
import bioversions
from ..utils.path import prefix_directory_join
__all__ = [
"get_version",
]
def get_version(prefix: str) -> Optional[str]:
"""Get the version for the resource, if available.
:param prefix: the resource name
:return: The version if available else None
"""
try:
version = bioversions.get_version(prefix)
except IOError:
raise IOError(f"[{prefix}] could not get version from bioversions")
except KeyError:
pass # this prefix isn't available from bioversions
else:
if version:
return version
metadata_json_path = prefix_directory_join(prefix, name="metadata.json", ensure_exists=False)
if metadata_json_path.exists():
data = json.loads(metadata_json_path.read_text())
return data["version"]
return None
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^listing', views.listing, name='listing'),
url(r'^jsonlisting', views.jsonlisting, name='jsonlisting'),
url(r'^timingListing', views.timingListing, name='timingListing'),
url(r'^iterlist', views.iterlist, name='iterlist'),
url(r'^subsetq', views.subsetq, name='subsetq'),
url(r'^postarg', views.postarg, name='postarg'),
url(r'^postbout', views.postBout, name='postBout'),
url(r'^topscores', views.topscores, name='topscores'),
url(r'^selgrid', views.selgrid, name='selgrid'),
]
|
import hashlib
def open_file(path, mode = 'r'):
with open(path, mode) as source:
return source.read()
def encode(string, typ = "utf8"):
return string.encode(typ)
def generate_hash(data):
if not isinstance(data, (str, bytes)):
data = str(data)
if not isinstance(data, bytes):
data = data.encode()
m = hashlib.md5() #sha256
m.update(data)
return m.hexdigest() #digest()
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ResourceResp:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'extra_info': 'ResourceExtraInfo',
'id': 'str',
'name': 'str',
'protect_status': 'str',
'size': 'int',
'type': 'str',
'backup_size': 'int',
'backup_count': 'int'
}
attribute_map = {
'extra_info': 'extra_info',
'id': 'id',
'name': 'name',
'protect_status': 'protect_status',
'size': 'size',
'type': 'type',
'backup_size': 'backup_size',
'backup_count': 'backup_count'
}
def __init__(self, extra_info=None, id=None, name=None, protect_status=None, size=None, type=None, backup_size=None, backup_count=None):
"""ResourceResp - a model defined in huaweicloud sdk"""
self._extra_info = None
self._id = None
self._name = None
self._protect_status = None
self._size = None
self._type = None
self._backup_size = None
self._backup_count = None
self.discriminator = None
if extra_info is not None:
self.extra_info = extra_info
self.id = id
self.name = name
if protect_status is not None:
self.protect_status = protect_status
if size is not None:
self.size = size
self.type = type
if backup_size is not None:
self.backup_size = backup_size
if backup_count is not None:
self.backup_count = backup_count
@property
def extra_info(self):
"""Gets the extra_info of this ResourceResp.
:return: The extra_info of this ResourceResp.
:rtype: ResourceExtraInfo
"""
return self._extra_info
@extra_info.setter
def extra_info(self, extra_info):
"""Sets the extra_info of this ResourceResp.
:param extra_info: The extra_info of this ResourceResp.
:type: ResourceExtraInfo
"""
self._extra_info = extra_info
@property
def id(self):
"""Gets the id of this ResourceResp.
待备份资源id
:return: The id of this ResourceResp.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ResourceResp.
待备份资源id
:param id: The id of this ResourceResp.
:type: str
"""
self._id = id
@property
def name(self):
"""Gets the name of this ResourceResp.
待备份资源名称
:return: The name of this ResourceResp.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ResourceResp.
待备份资源名称
:param name: The name of this ResourceResp.
:type: str
"""
self._name = name
@property
def protect_status(self):
"""Gets the protect_status of this ResourceResp.
保护状态
:return: The protect_status of this ResourceResp.
:rtype: str
"""
return self._protect_status
@protect_status.setter
def protect_status(self, protect_status):
"""Sets the protect_status of this ResourceResp.
保护状态
:param protect_status: The protect_status of this ResourceResp.
:type: str
"""
self._protect_status = protect_status
@property
def size(self):
"""Gets the size of this ResourceResp.
资源已分配容量,单位为GB
:return: The size of this ResourceResp.
:rtype: int
"""
return self._size
@size.setter
def size(self, size):
"""Sets the size of this ResourceResp.
资源已分配容量,单位为GB
:param size: The size of this ResourceResp.
:type: int
"""
self._size = size
@property
def type(self):
"""Gets the type of this ResourceResp.
待备份资源的类型, 云服务器: OS::Nova::Server, 云硬盘: OS::Cinder::Volume, 裸金属服务器: OS::Ironic::BareMetalServer, 线下本地服务器: OS::Native::Server, 弹性文件系统: OS::Sfs::Turbo
:return: The type of this ResourceResp.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this ResourceResp.
待备份资源的类型, 云服务器: OS::Nova::Server, 云硬盘: OS::Cinder::Volume, 裸金属服务器: OS::Ironic::BareMetalServer, 线下本地服务器: OS::Native::Server, 弹性文件系统: OS::Sfs::Turbo
:param type: The type of this ResourceResp.
:type: str
"""
self._type = type
@property
def backup_size(self):
"""Gets the backup_size of this ResourceResp.
副本大小
:return: The backup_size of this ResourceResp.
:rtype: int
"""
return self._backup_size
@backup_size.setter
def backup_size(self, backup_size):
"""Sets the backup_size of this ResourceResp.
副本大小
:param backup_size: The backup_size of this ResourceResp.
:type: int
"""
self._backup_size = backup_size
@property
def backup_count(self):
"""Gets the backup_count of this ResourceResp.
副本数量
:return: The backup_count of this ResourceResp.
:rtype: int
"""
return self._backup_count
@backup_count.setter
def backup_count(self, backup_count):
"""Sets the backup_count of this ResourceResp.
副本数量
:param backup_count: The backup_count of this ResourceResp.
:type: int
"""
self._backup_count = backup_count
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResourceResp):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
from .crawling_py.main_crawling import run_crawling
from .views import save_articles
# db에 저장
def article_crawling_job():
politic_article_list, economy_article_list, society_article_list = run_crawling()
print('run_crawling 끝!!')
try:
save_articles(politic_article_list, economy_article_list, society_article_list)
except Exception as e:
print(e)
print("success!!")
# save_articles([{"title":"save_test","contents":"save_test","url":"https://velog.io/@magnoliarfsit/ReDjango-4.-장고-ORM을-사용해서-DB-CRUD-구현하기","category":"정치"}, {"title":"save_test4","contents":"save_test4","url":"https://velog.io/@magnoliarfsit/ReDjango-4.-장고-ORM을-사용해서-DB-CRUD-구현하기","category":"정치"}, {"title":"save_test5","contents":"save_test5","url":"https://velog.io/@magnoliarfsit/ReDjango-4.-장고-ORM을-사용해서-DB-CRUD-구현하기","category":"정치"}], [{"title":"save_test2","contents":"save_test2","url":"https://velog.io/@magnoliarfsit/ReDjango-4.-장고-ORM을-사용해서-DB-CRUD-구현하기","category":"경제"}], [{"title":"save_test3","contents":"save_test3","url":"https://velog.io/@magnoliarfsit/ReDjango-4.-장고-ORM을-사용해서-DB-CRUD-구현하기","category":"사회"}])
|
# This problem was asked by Facebook.
# Given a circular array, compute its maximum subarray sum in O(n) time. A subarray can be empty, and in this case the sum is 0.
# For example, given [8, -1, 3, 4], return 15 as we choose the numbers 3, 4, and 8 where the 8 is obtained from wrapping around.
# Given [-4, 5, 1, 0], return 6 as we choose the numbers 5 and 1.
# Solution:
#Bascially Algo. in Steps here:
#1. Find maximum subarray sum using kadane's algorithm (max)
#2. Find minimum subarray sum using kadane's algorithm (min)
#3. Find total sum of the array (sum)
#4. Now, if sum == min return max
#5. Otherwise, return maximum ( max, sum - min )
class Solution:
def maxSubarraySumCircular(self, A):
if len(A) == 0:#in case of empty sub-array
return 0
maxTotal,maxSoFar,minSoFar,minTotal,sum = A[0], A[0], A[0], A[0],A[0]
for i in range(1, len(A)):
maxSoFar = max(A[i], maxSoFar + A[i])
maxTotal = max(maxTotal, maxSoFar)
minSoFar = min(A[i], minSoFar + A[i])
minTotal = min(minTotal, minSoFar)
sum += A[i]
if(sum == minTotal):
return maxTotal
return max(sum - minTotal, maxTotal);
KDEAlgo=Solution()
#A = [8, -1, 3, 4]#input case
#A = [-8, -3, -6, -2, -5, -4]#input case of all negatives
#A=[-4, 5, 1, 0]
A=[] #input case of empty
print("The maximum circular subarray sum is",KDEAlgo.maxSubarraySumCircular(A))
|
from __future__ import annotations
import typing
from ctc import spec
from . import erc20_metadata
async def async_normalize_erc20_quantity(
quantity: typing.SupportsFloat,
token: typing.Optional[spec.ERC20Address] = None,
provider: spec.ProviderSpec = None,
decimals: typing.Optional[typing.SupportsInt] = None,
block: typing.Optional[spec.BlockNumberReference] = None,
) -> float:
"""convert raw erc20 quantity by adjusting radix by (10 ** decimals)"""
if quantity == 0:
return 0
# get decimals
if decimals is None:
if token is None:
raise Exception('must specify token or decimals')
decimals = await erc20_metadata.async_get_erc20_decimals(
token,
provider=provider,
block=block,
)
else:
decimals = int(decimals)
# normalize
return float(quantity) / (10 ** decimals)
async def async_normalize_erc20_quantities(
quantities: typing.Sequence[typing.SupportsInt] | spec.Series,
token: spec.ERC20Address | None = None,
provider: spec.ProviderSpec = None,
decimals: typing.Optional[typing.SupportsInt] = None,
block: typing.Optional[spec.BlockNumberReference] = None,
) -> list[float]:
if all(quantity == 0 for quantity in quantities):
return [float(0) for quantity in quantities]
if decimals is None:
if token is None:
raise Exception('must specify token or decimals')
decimals = await erc20_metadata.async_get_erc20_decimals(
token=token,
block=block,
provider=provider,
)
else:
decimals = int(decimals)
return [quantity / (10 ** decimals) for quantity in quantities]
async def async_normalize_erc20s_quantities(
quantities: typing.Sequence[typing.SupportsInt] | spec.Series,
tokens: typing.Optional[typing.Sequence[spec.ERC20Address]] = None,
decimals: typing.Optional[typing.Sequence[typing.SupportsInt]] = None,
block: typing.Optional[spec.BlockNumberReference] = None,
provider: spec.ProviderSpec = None,
) -> list[float]:
# take subset of non zero values
mask = [quantity != 0 for quantity in quantities]
any_zero = not all(mask)
if any_zero:
old_quantities = quantities
quantities = [
quantity for quantity, nonzero in zip(quantities, mask) if nonzero
]
if tokens is not None:
tokens = [token for token, nonzero in zip(tokens, mask) if nonzero]
if decimals is not None:
decimals = [
decimal for decimal, nonzero in zip(decimals, mask) if nonzero
]
if decimals is None:
if tokens is None:
raise Exception('must specify tokens or decimals')
use_decimals = await erc20_metadata.async_get_erc20s_decimals(
tokens=tokens,
block=block,
provider=provider,
)
else:
use_decimals = [int(decimal) for decimal in decimals]
if len(use_decimals) != len(quantities):
raise Exception('number of quantities must match number of decimals')
# put back in zero values
if any_zero:
quantities = old_quantities
new_use_decimals = []
use_decimals_iterator = iter(use_decimals)
for nonzero in mask:
if nonzero:
new_use_decimals.append(next(use_decimals_iterator))
else:
new_use_decimals.append(1)
use_decimals = new_use_decimals
return [
quantity / (10 ** decimal)
for quantity, decimal in zip(quantities, use_decimals)
]
async def async_normalize_erc20_quantities_by_block(
quantities: typing.Sequence[typing.SupportsInt] | spec.Series,
blocks: typing.Sequence[spec.BlockNumberReference],
token: typing.Optional[spec.ERC20Address] = None,
decimals: typing.Optional[list[typing.SupportsInt]] = None,
provider: spec.ProviderSpec = None,
) -> list[float]:
# take subset of non zero values
mask = [quantity != 0 for quantity in quantities]
any_zero = not all(mask)
if any_zero:
old_quantities = quantities
quantities = [
quantity for quantity, nonzero in zip(quantities, mask) if nonzero
]
blocks = [
block for block, nonzero in zip(blocks, mask) if nonzero
]
if decimals is not None:
decimals = [
decimal for decimal, nonzero in zip(decimals, mask) if nonzero
]
if decimals is None:
if token is None:
raise Exception('must specify token or decimals')
use_decimals = await erc20_metadata.async_get_erc20_decimals_by_block(
token=token,
blocks=blocks,
provider=provider,
)
else:
use_decimals = [int(decimal) for decimal in decimals]
if len(use_decimals) != len(quantities):
raise Exception('number of quantities must match number of decimals')
if any_zero:
quantities = old_quantities
new_use_decimals = []
use_decimals_iterator = iter(use_decimals)
for nonzero in mask:
if nonzero:
new_use_decimals.append(next(use_decimals_iterator))
else:
new_use_decimals.append(1)
use_decimals = new_use_decimals
return [
quantity / (10 ** decimal)
for quantity, decimal in zip(quantities, use_decimals)
]
|
"""
Laplacian of a compressed-sparse graph
"""
import numpy as np
from scipy.sparse import isspmatrix
from scipy.sparse.linalg import LinearOperator
###############################################################################
# Graph laplacian
def laplacian(
csgraph,
normed=False,
return_diag=False,
use_out_degree=False,
*,
copy=True,
form="array",
dtype=None,
symmetrized=False,
):
"""
Return the Laplacian of a directed graph.
Parameters
----------
csgraph : array_like or sparse matrix, 2 dimensions
compressed-sparse graph, with shape (N, N).
normed : bool, optional
If True, then compute symmetrically normalized Laplacian.
Default: False.
return_diag : bool, optional
If True, then also return an array related to vertex degrees.
Default: False.
use_out_degree : bool, optional
If True, then use out-degree instead of in-degree.
This distinction matters only if the graph is asymmetric.
Default: False.
copy: bool, optional
If False, then change `csgraph` in place if possible,
avoiding doubling the memory use.
Default: True, for backward compatibility.
form: 'array', or 'function', or 'lo'
Determines the format of the output Laplacian:
* 'array' is a numpy array;
* 'function' is a pointer to evaluating the Laplacian-vector
or Laplacian-matrix product;
* 'lo' results in the format of the `LinearOperator`.
Choosing 'function' or 'lo' always avoids doubling
the memory use, ignoring `copy` value.
Default: 'array', for backward compatibility.
dtype: None or one of numeric numpy dtypes, optional
The dtype of the output. If ``dtype=None``, the dtype of the
output matches the dtype of the input csgraph, except for
the case ``normed=True`` and integer-like csgraph, where
the output dtype is 'float' allowing accurate normalization,
but dramatically increasing the memory use.
Default: None, for backward compatibility.
symmetrized: bool, optional
If True, then the output Laplacian is symmetric/Hermitian.
The symmetrization is done by ``csgraph + csgraph.T.conj``
without dividing by 2 to preserve integer dtypes if possible
prior to the construction of the Laplacian.
The symmetrization will increase the memory footprint of
sparse matrices unless the sparsity pattern is symmetric or
`form` is 'function' or 'lo'.
Default: False, for backward compatibility.
Returns
-------
lap : ndarray, or sparse matrix, or `LinearOperator`
The N x N Laplacian of csgraph. It will be a NumPy array (dense)
if the input was dense, or a sparse matrix otherwise, or
the format of a function or `LinearOperator` if
`form` equals 'function' or 'lo', respectively.
diag : ndarray, optional
The length-N main diagonal of the Laplacian matrix.
For the normalized Laplacian, this is the array of square roots
of vertex degrees or 1 if the degree is zero.
Notes
-----
The Laplacian matrix of a graph is sometimes referred to as the
"Kirchhoff matrix" or just the "Laplacian", and is useful in many
parts of spectral graph theory.
In particular, the eigen-decomposition of the Laplacian can give
insight into many properties of the graph, e.g.,
is commonly used for spectral data embedding and clustering.
The constructed Laplacian doubles the memory use if ``copy=True`` and
``form="array"`` which is the default.
Choosing ``copy=False`` has no effect unless ``form="array"``
or the matrix is sparse in the ``coo`` format, or dense array, except
for the integer input with ``normed=True`` that forces the float output.
Sparse input is reformatted into ``coo`` if ``form="array"``,
which is the default.
If the input adjacency matrix is not symmetic, the Laplacian is
also non-symmetric unless ``symmetrized=True`` is used.
Diagonal entries of the input adjacency matrix are ignored and
replaced with zeros for the purpose of normalization where ``normed=True``.
The normalization uses the inverse square roots of row-sums of the input
adjacency matrix, and thus may fail if the row-sums contain
negative or complex with a non-zero imaginary part values.
The normalization is symmetric, making the normalized Laplacian also
symmetric if the input csgraph was symmetric.
References
----------
.. [1] Laplacian matrix. https://en.wikipedia.org/wiki/Laplacian_matrix
Examples
--------
>>> from scipy.sparse import csgraph
Our first illustration is the symmetric graph
>>> G = np.arange(4) * np.arange(4)[:, np.newaxis]
>>> G
array([[0, 0, 0, 0],
[0, 1, 2, 3],
[0, 2, 4, 6],
[0, 3, 6, 9]])
and its symmetric Laplacian matrix
>>> csgraph.laplacian(G)
array([[ 0, 0, 0, 0],
[ 0, 5, -2, -3],
[ 0, -2, 8, -6],
[ 0, -3, -6, 9]])
The non-symmetric graph
>>> G = np.arange(9).reshape(3, 3)
>>> G
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
has different row- and column sums, resulting in two varieties
of the Laplacian matrix, using an in-degree, which is the default
>>> L_in_degree = csgraph.laplacian(G)
>>> L_in_degree
array([[ 9, -1, -2],
[-3, 8, -5],
[-6, -7, 7]])
or alternatively an out-degree
>>> L_out_degree = csgraph.laplacian(G, use_out_degree=True)
>>> L_out_degree
array([[ 3, -1, -2],
[-3, 8, -5],
[-6, -7, 13]])
Constructing a symmetric Laplacian matrix, one can add the two as
>>> L_in_degree + L_out_degree.T
array([[ 12, -4, -8],
[ -4, 16, -12],
[ -8, -12, 20]])
or use the ``symmetrized=True`` option
>>> csgraph.laplacian(G, symmetrized=True)
array([[ 12, -4, -8],
[ -4, 16, -12],
[ -8, -12, 20]])
that is equivalent to symmetrizing the original graph
>>> csgraph.laplacian(G + G.T)
array([[ 12, -4, -8],
[ -4, 16, -12],
[ -8, -12, 20]])
The goal of normalization is to make the non-zero diagonal entries
of the Laplacian matrix to be all unit, also scaling off-diagonal
entries correspondingly. The normalization can be done manually, e.g.,
>>> G = np.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]])
>>> L, d = csgraph.laplacian(G, return_diag=True)
>>> L
array([[ 2, -1, -1],
[-1, 2, -1],
[-1, -1, 2]])
>>> d
array([2, 2, 2])
>>> scaling = np.sqrt(d)
>>> scaling
array([1.41421356, 1.41421356, 1.41421356])
>>> (1/scaling)*L*(1/scaling)
array([[ 1. , -0.5, -0.5],
[-0.5, 1. , -0.5],
[-0.5, -0.5, 1. ]])
Or using ``normed=True`` option
>>> L, d = csgraph.laplacian(G, return_diag=True, normed=True)
>>> L
array([[ 1. , -0.5, -0.5],
[-0.5, 1. , -0.5],
[-0.5, -0.5, 1. ]])
which now instead of the diagonal returns the scaling coefficients
>>> d
array([1.41421356, 1.41421356, 1.41421356])
Zero scaling coefficients are substituted with 1s, where scaling
has thus no effect, e.g.,
>>> G = np.array([[0, 0, 0], [0, 0, 1], [0, 1, 0]])
>>> G
array([[0, 0, 0],
[0, 0, 1],
[0, 1, 0]])
>>> L, d = csgraph.laplacian(G, return_diag=True, normed=True)
>>> L
array([[ 0., -0., -0.],
[-0., 1., -1.],
[-0., -1., 1.]])
>>> d
array([1., 1., 1.])
Only the symmetric normalization is implemented, resulting
in a symmetric Laplacian matrix if and only if its graph is symmetric
and has all non-negative degrees, like in the examples above.
The output Laplacian matrix is by default a dense array or a sparse matrix
inferring its shape, format, and dtype from the input graph matrix:
>>> G = np.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]]).astype(np.float32)
>>> G
array([[0., 1., 1.],
[1., 0., 1.],
[1., 1., 0.]], dtype=float32)
>>> csgraph.laplacian(G)
array([[ 2., -1., -1.],
[-1., 2., -1.],
[-1., -1., 2.]], dtype=float32)
but can alternatively be generated matrix-free as a LinearOperator:
>>> L = csgraph.laplacian(G, form="lo")
>>> L
<3x3 _CustomLinearOperator with dtype=float32>
>>> L(np.eye(3))
array([[ 2., -1., -1.],
[-1., 2., -1.],
[-1., -1., 2.]])
or as a lambda-function:
>>> L = csgraph.laplacian(G, form="function")
>>> L
<function _laplace.<locals>.<lambda> at 0x0000012AE6F5A598>
>>> L(np.eye(3))
array([[ 2., -1., -1.],
[-1., 2., -1.],
[-1., -1., 2.]])
The Laplacian matrix is used for
spectral data clustering and embedding
as well as for spectral graph partitioning.
Our final example illustrates the latter
for a noisy directed linear graph.
>>> from scipy.sparse import diags, random
>>> from scipy.sparse.linalg import lobpcg
Create a directed linear graph with ``N=35`` vertices
using a sparse adjacency matrix ``G``:
>>> N = 35
>>> G = diags(np.ones(N-1), 1, format="csr")
Fix a random seed ``rng`` and add a random sparse noise to the graph ``G``:
>>> rng = np.random.default_rng()
>>> G += 1e-2 * random(N, N, density=0.1, random_state=rng)
Set initial approximations for eigenvectors:
>>> X = rng.random((N, 2))
The constant vector of ones is always a trivial eigenvector
of the non-normalized Laplacian to be filtered out:
>>> Y = np.ones((N, 1))
Alternating (1) the sign of the graph weights allows determining
labels for spectral max- and min- cuts in a single loop.
Since the graph is undirected, the option ``symmetrized=True``
must be used in the construction of the Laplacian.
The option ``normed=True`` cannot be used in (2) for the negative weights
here as the symmetric normalization evaluates square roots.
The option ``form="lo"`` in (2) is matrix-free, i.e., guarantees
a fixed memory footprint and read-only access to the graph.
Calling the eigenvalue solver ``lobpcg`` (3) computes the Fiedler vector
that determines the labels as the signs of its components in (5).
Since the sign in an eigenvector is not deterministic and can flip,
we fix the sign of the first component to be always +1 in (4).
>>> for cut in ["max", "min"]:
... G = -G # 1.
... L = csgraph.laplacian(G, symmetrized=True, form="lo") # 2.
... _, eves = lobpcg(L, X, Y=Y, largest=False, tol=1e-3) # 3.
... eves *= np.sign(eves[0, 0]) # 4.
... print(cut + "-cut labels:\\n", 1 * (eves[:, 0]>0)) # 5.
max-cut labels:
[1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1]
min-cut labels:
[1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
As anticipated for a (slightly noisy) linear graph,
the max-cut strips all the edges of the graph coloring all
odd vertices into one color and all even vertices into another one,
while the balanced min-cut partitions the graph
in the middle by deleting a single edge.
Both determined partitions are optimal.
"""
if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
raise ValueError('csgraph must be a square matrix or array')
if normed and (
np.issubdtype(csgraph.dtype, np.signedinteger)
or np.issubdtype(csgraph.dtype, np.uint)
):
csgraph = csgraph.astype(np.float64)
if form == "array":
create_lap = (
_laplacian_sparse if isspmatrix(csgraph) else _laplacian_dense
)
else:
create_lap = (
_laplacian_sparse_flo
if isspmatrix(csgraph)
else _laplacian_dense_flo
)
degree_axis = 1 if use_out_degree else 0
lap, d = create_lap(
csgraph,
normed=normed,
axis=degree_axis,
copy=copy,
form=form,
dtype=dtype,
symmetrized=symmetrized,
)
if return_diag:
return lap, d
return lap
def _setdiag_dense(m, d):
step = len(d) + 1
m.flat[::step] = d
def _laplace(m, d):
return lambda v: v * d[:, np.newaxis] - m @ v
def _laplace_normed(m, d, nd):
laplace = _laplace(m, d)
return lambda v: nd[:, np.newaxis] * laplace(v * nd[:, np.newaxis])
def _laplace_sym(m, d):
return (
lambda v: v * d[:, np.newaxis]
- m @ v
- np.transpose(np.conjugate(np.transpose(np.conjugate(v)) @ m))
)
def _laplace_normed_sym(m, d, nd):
laplace_sym = _laplace_sym(m, d)
return lambda v: nd[:, np.newaxis] * laplace_sym(v * nd[:, np.newaxis])
def _linearoperator(mv, shape, dtype):
return LinearOperator(matvec=mv, matmat=mv, shape=shape, dtype=dtype)
def _laplacian_sparse_flo(graph, normed, axis, copy, form, dtype, symmetrized):
# The keyword argument `copy` is unused and has no effect here.
del copy
if dtype is None:
dtype = graph.dtype
graph_sum = graph.sum(axis=axis).getA1()
graph_diagonal = graph.diagonal()
diag = graph_sum - graph_diagonal
if symmetrized:
graph_sum += graph.sum(axis=1 - axis).getA1()
diag = graph_sum - graph_diagonal - graph_diagonal
if normed:
isolated_node_mask = diag == 0
w = np.where(isolated_node_mask, 1, np.sqrt(diag))
if symmetrized:
md = _laplace_normed_sym(graph, graph_sum, 1.0 / w)
else:
md = _laplace_normed(graph, graph_sum, 1.0 / w)
if form == "function":
return md, w.astype(dtype, copy=False)
elif form == "lo":
m = _linearoperator(md, shape=graph.shape, dtype=dtype)
return m, w.astype(dtype, copy=False)
else:
raise ValueError(f"Invalid form: {form!r}")
else:
if symmetrized:
md = _laplace_sym(graph, graph_sum)
else:
md = _laplace(graph, graph_sum)
if form == "function":
return md, diag.astype(dtype, copy=False)
elif form == "lo":
m = _linearoperator(md, shape=graph.shape, dtype=dtype)
return m, diag.astype(dtype, copy=False)
else:
raise ValueError(f"Invalid form: {form!r}")
def _laplacian_sparse(graph, normed, axis, copy, form, dtype, symmetrized):
# The keyword argument `form` is unused and has no effect here.
del form
if dtype is None:
dtype = graph.dtype
needs_copy = False
if graph.format in ('lil', 'dok'):
m = graph.tocoo()
else:
m = graph
if copy:
needs_copy = True
if symmetrized:
m += m.T.conj()
w = m.sum(axis=axis).getA1() - m.diagonal()
if normed:
m = m.tocoo(copy=needs_copy)
isolated_node_mask = (w == 0)
w = np.where(isolated_node_mask, 1, np.sqrt(w))
m.data /= w[m.row]
m.data /= w[m.col]
m.data *= -1
m.setdiag(1 - isolated_node_mask)
else:
if m.format == 'dia':
m = m.copy()
else:
m = m.tocoo(copy=needs_copy)
m.data *= -1
m.setdiag(w)
return m.astype(dtype, copy=False), w.astype(dtype)
def _laplacian_dense_flo(graph, normed, axis, copy, form, dtype, symmetrized):
if copy:
m = np.array(graph)
else:
m = np.asarray(graph)
if dtype is None:
dtype = m.dtype
graph_sum = m.sum(axis=axis)
graph_diagonal = m.diagonal()
diag = graph_sum - graph_diagonal
if symmetrized:
graph_sum += m.sum(axis=1 - axis)
diag = graph_sum - graph_diagonal - graph_diagonal
if normed:
isolated_node_mask = diag == 0
w = np.where(isolated_node_mask, 1, np.sqrt(diag))
if symmetrized:
md = _laplace_normed_sym(m, graph_sum, 1.0 / w)
else:
md = _laplace_normed(m, graph_sum, 1.0 / w)
if form == "function":
return md, w.astype(dtype, copy=False)
elif form == "lo":
m = _linearoperator(md, shape=graph.shape, dtype=dtype)
return m, w.astype(dtype, copy=False)
else:
raise ValueError(f"Invalid form: {form!r}")
else:
if symmetrized:
md = _laplace_sym(m, graph_sum)
else:
md = _laplace(m, graph_sum)
if form == "function":
return md, diag.astype(dtype, copy=False)
elif form == "lo":
m = _linearoperator(md, shape=graph.shape, dtype=dtype)
return m, diag.astype(dtype, copy=False)
else:
raise ValueError(f"Invalid form: {form!r}")
def _laplacian_dense(graph, normed, axis, copy, form, dtype, symmetrized):
if form != "array":
raise ValueError(f'{form!r} must be "array"')
if dtype is None:
dtype = graph.dtype
if copy:
m = np.array(graph)
else:
m = np.asarray(graph)
if dtype is None:
dtype = m.dtype
if symmetrized:
m += m.T.conj()
np.fill_diagonal(m, 0)
w = m.sum(axis=axis)
if normed:
isolated_node_mask = (w == 0)
w = np.where(isolated_node_mask, 1, np.sqrt(w))
m /= w
m /= w[:, np.newaxis]
m *= -1
_setdiag_dense(m, 1 - isolated_node_mask)
else:
m *= -1
_setdiag_dense(m, w)
return m.astype(dtype, copy=False), w.astype(dtype, copy=False)
|
import asyncio
import logging
import os
import tempfile
from typing import Any, Dict, Text
import gamla
import yaml
class _HelmException(Exception):
pass
async def _run_in_shell(args, path: str) -> Text:
logging.info(f"Running shell command: {args}.")
process = await asyncio.subprocess.create_subprocess_shell(
cmd=" ".join(args),
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
cwd=path,
)
stdout, stderr = await process.communicate()
if process.returncode != 0:
raise _HelmException(stderr.decode("utf-8"))
return stdout.decode("utf-8")
async def releases():
return gamla.pipe(
await _run_in_shell(["helm", "list", "-q"], "./"),
lambda output: output.split("\n"),
frozenset,
)
async def install_release(
chart_name: str,
release_name: str,
chart_values: Dict[Text, Any],
chart_physical_dir: str,
):
handle, filename = tempfile.mkstemp()
del handle
try:
with open(filename, "w") as values_file:
values_file.write(yaml.dump(chart_values))
await _run_in_shell(
["helm", "upgrade", release_name, chart_name, "--install", "-f", filename],
chart_physical_dir,
)
finally:
os.remove(filename)
async def delete_release(release_name: str):
try:
await _run_in_shell(["helm", "uninstall", release_name], "./")
except _HelmException:
logging.debug(f"Unable to delete release {release_name}.")
|
WHITELIST_COUNTIES = [
"Kajiado",
"Nairobi",
]
_CONSTITUENCIES = "Constituencies"
_SUB_COUNTIES = "Sub Counties"
ADMINISTRATIVE_UNITS = {
"Nairobi": {
_CONSTITUENCIES: (
"Dagoretti North",
"Dagoretti South",
"Embakasi Central",
"Embakasi East",
"Embakasi North",
"Embakasi South",
"Embakasi West",
"Kamukunji",
"Kasarani",
"Kibra",
"Langata",
"Makadara",
"Mathare",
"Roysambu",
"Ruaraka",
"Starehe",
"Westlands",
),
_SUB_COUNTIES: {
"Dagoretti North": ("Gatini", "Kabiro", "Kawangware", "Kileleshwa", "Kilimani"),
"Dagoretti South": ("Mutu-Ini", "Ngando", "Riruta", "Uthiru/Ruthimitu", "Waithaka"),
"Embakasi Central": (
"Kayole Central",
"Kayole North",
"Kayole South",
"Komarock",
"Matopeni/Spring Valley",
),
"Embakasi East": (
"Embakasi",
"Lower Savannah",
"Mihango",
"Upper Savannah",
"Utawala",
),
"Embakasi North": (
"Dandora Area I",
"Dandora Area II",
"Dandora Area III",
"Dandora Area Iv",
"Karioboangi North",
),
"Embakasi South": ("Imara Daima", "Kwa Njenga", "Kwa Rueben", "Kware", "Pipeline"),
"Embakasi West": (
"Kariobangi South",
"Maringo/Hamza",
"Mowlem",
"Umoja I",
"Umoja II",
),
"Kamukunji": (
"Airbase",
"California",
"Eastleigh North",
"Eastleigh South",
"Pumwani",
),
"Kasarani": ("Clay City", "Kasarani", "Mwiki", "Njiru", "Ruai"),
"Kibra": (
"Laini Saba",
"Lindi",
"Makina" "Sarang'ombe",
"Woodley/Kenyatta Golf Course",
),
"Langata": ("Karen", "Mugumo-ini", "Nairobi West", "Nyayo Highrise", "South C"),
"Makadara": ("Harambee", "Makongeni", "Maringo/Hamza", "Viwandani"),
"Mathare": ("Hospital", "Huruma", "Kiamaiko", "Mabatini", "Ngei"),
"Roysambu": ("Githurai", "Kahawa", "Kahawa West", "Roysambu", "Zimmerman"),
"Ruaraka": ("Babandogo", "Korogocho", "Lucky Summer", "Mathare North", "Utalii"),
"Starehe": (
"Landimawe",
"Nairobi Central",
"Nairobi South",
"Ngara",
"Pangani",
"Ziwani/Kariokor",
),
"Westlands": ("Kangemi", "Karura", "Kitisuru", "Mountain View", "Parklands/Highridge"),
},
},
"Kajiado": {
_CONSTITUENCIES: (
"Kajiado Central",
"Kajiado East",
"Kajiado North",
"Kajiado West",
"Magadi",
),
_SUB_COUNTIES: {
"Kajiado Central": (
"Dalalekutuk",
"Ildamat",
"Matapato North",
"Matapato South",
"Purko",
),
"Kajiado East": (
"Imaroro",
"Kaputiei North",
"Kenyawa-poka",
"Kitengela",
"Oloosirkon/Sholinke",
),
"Kajiado North": ("Ngong", "Nkaimurunya", "Olkeri", "Oloolua", "Ongata Rongai"),
"Kajiado West": (
"Ewuaso Oo Nkidong'i",
"Iloodokilani",
"Keekonyokie",
"Magadi",
"Mosiro",
),
"Loitokitok": (
"Entonet/Lenkism",
"Imbrikani/Eselelnkei",
"Kimana",
"Kuku",
"Rombo",
),
},
},
}
COUNTRY_CODES = (
("ABW", "Aruba"),
("AFG", "Afghanistan"),
("AGO", "Angola"),
("AIA", "Anguilla"),
("ALA", "\u00c5land Islands"),
("ALB", "Albania"),
("AND", "Andorra"),
("ARE", "United Arab Emirates"),
("ARG", "Argentina"),
("ARM", "Armenia"),
("ASM", "American Samoa"),
("ATA", "Antarctica"),
("ATF", "French Southern Territories"),
("ATG", "Antigua and Barbuda"),
("AUS", "Australia"),
("AUT", "Austria"),
("AZE", "Azerbaijan"),
("BDI", "Burundi"),
("BEL", "Belgium"),
("BEN", "Benin"),
("BES", "Bonaire, Sint Eustatius and Saba"),
("BFA", "Burkina Faso"),
("BGD", "Bangladesh"),
("BGR", "Bulgaria"),
("BHR", "Bahrain"),
("BHS", "Bahamas"),
("BIH", "Bosnia and Herzegovina"),
("BLM", "Saint Barth\u00e9lemy"),
("BLR", "Belarus"),
("BLZ", "Belize"),
("BMU", "Bermuda"),
("BOL", "Bolivia (Plurinational State of)"),
("BRA", "Brazil"),
("BRB", "Barbados"),
("BRN", "Brunei Darussalam"),
("BTN", "Bhutan"),
("BVT", "Bouvet Island"),
("BWA", "Botswana"),
("CAF", "Central African Republic"),
("CAN", "Canada"),
("CCK", "Cocos (Keeling) Islands"),
("CHE", "Switzerland"),
("CHL", "Chile"),
("CHN", "China"),
("CIV", "C\u00f4te d'Ivoire"),
("CMR", "Cameroon"),
("COD", "Congo (Democratic Republic of the)"),
("COG", "Congo"),
("COK", "Cook Islands"),
("COL", "Colombia"),
("COM", "Comoros"),
("CPV", "Cabo Verde"),
("CRI", "Costa Rica"),
("CUB", "Cuba"),
("CUW", "Cura\u00e7ao"),
("CXR", "Christmas Island"),
("CYM", "Cayman Islands"),
("CYP", "Cyprus"),
("CZE", "Czech Republic"),
("DEU", "Germany"),
("DJI", "Djibouti"),
("DMA", "Dominica"),
("DNK", "Denmark"),
("DOM", "Dominican Republic"),
("DZA", "Algeria"),
("ECU", "Ecuador"),
("EGY", "Egypt"),
("ERI", "Eritrea"),
("ESH", "Western Sahara"),
("ESP", "Spain"),
("EST", "Estonia"),
("ETH", "Ethiopia"),
("FIN", "Finland"),
("FJI", "Fiji"),
("FLK", "Falkland Islands (Malvinas)"),
("FRA", "France"),
("FRO", "Faroe Islands"),
("FSM", "Micronesia (Federated States of)"),
("GAB", "Gabon"),
("GBR", "United Kingdom of Great Britain and Northern Ireland"),
("GEO", "Georgia"),
("GGY", "Guernsey"),
("GHA", "Ghana"),
("GIB", "Gibraltar"),
("GIN", "Guinea"),
("GLP", "Guadeloupe"),
("GMB", "Gambia"),
("GNB", "Guinea-Bissau"),
("GNQ", "Equatorial Guinea"),
("GRC", "Greece"),
("GRD", "Grenada"),
("GRL", "Greenland"),
("GTM", "Guatemala"),
("GUF", "French Guiana"),
("GUM", "Guam"),
("GUY", "Guyana"),
("HKG", "Hong Kong"),
("HMD", "Heard Island and McDonald Islands"),
("HND", "Honduras"),
("HRV", "Croatia"),
("HTI", "Haiti"),
("HUN", "Hungary"),
("IDN", "Indonesia"),
("IMN", "Isle of Man"),
("IND", "India"),
("IOT", "British Indian Ocean Territory"),
("IRL", "Ireland"),
("IRN", "Iran (Islamic Republic of)"),
("IRQ", "Iraq"),
("ISL", "Iceland"),
("ISR", "Israel"),
("ITA", "Italy"),
("JAM", "Jamaica"),
("JEY", "Jersey"),
("JOR", "Jordan"),
("JPN", "Japan"),
("KAZ", "Kazakhstan"),
("KEN", "Kenya"),
("KGZ", "Kyrgyzstan"),
("KHM", "Cambodia"),
("KIR", "Kiribati"),
("KNA", "Saint Kitts and Nevis"),
("KOR", "Korea (Republic of)"),
("KWT", "Kuwait"),
("LAO", "Lao People's Democratic Republic"),
("LBN", "Lebanon"),
("LBR", "Liberia"),
("LBY", "Libya"),
("LCA", "Saint Lucia"),
("LIE", "Liechtenstein"),
("LKA", "Sri Lanka"),
("LSO", "Lesotho"),
("LTU", "Lithuania"),
("LUX", "Luxembourg"),
("LVA", "Latvia"),
("MAC", "Macao"),
("MAF", "Saint Martin (French part)"),
("MAR", "Morocco"),
("MCO", "Monaco"),
("MDA", "Moldova (Republic of)"),
("MDG", "Madagascar"),
("MDV", "Maldives"),
("MEX", "Mexico"),
("MHL", "Marshall Islands"),
("MKD", "Macedonia (the former Yugoslav Republic of)"),
("MLI", "Mali"),
("MLT", "Malta"),
("MMR", "Myanmar"),
("MNE", "Montenegro"),
("MNG", "Mongolia"),
("MNP", "Northern Mariana Islands"),
("MOZ", "Mozambique"),
("MRT", "Mauritania"),
("MSR", "Montserrat"),
("MTQ", "Martinique"),
("MUS", "Mauritius"),
("MWI", "Malawi"),
("MYS", "Malaysia"),
("MYT", "Mayotte"),
("NAM", "Namibia"),
("NCL", "New Caledonia"),
("NER", "Niger"),
("NFK", "Norfolk Island"),
("NGA", "Nigeria"),
("NIC", "Nicaragua"),
("NIU", "Niue"),
("NLD", "Netherlands"),
("NOR", "Norway"),
("NPL", "Nepal"),
("NRU", "Nauru"),
("NZL", "New Zealand"),
("OMN", "Oman"),
("PAK", "Pakistan"),
("PAN", "Panama"),
("PCN", "Pitcairn"),
("PER", "Peru"),
("PHL", "Philippines"),
("PLW", "Palau"),
("PNG", "Papua New Guinea"),
("POL", "Poland"),
("PRI", "Puerto Rico"),
("PRK", "Korea (Democratic People's Republic of)"),
("PRT", "Portugal"),
("PRY", "Paraguay"),
("PSE", "Palestine, State of"),
("PYF", "French Polynesia"),
("QAT", "Qatar"),
("REU", "Reunion"),
("ROU", "Romania"),
("RUS", "Russian Federation"),
("RWA", "Rwanda"),
("SAU", "Saudi Arabia"),
("SDN", "Sudan"),
("SEN", "Senegal"),
("SGP", "Singapore"),
("SGS", "South Georgia and the South Sandwich Islands"),
("SHN", "Saint Helena, Ascension and Tristan da Cunha"),
("SJM", "Svalbard and Jan Mayen"),
("SLB", "Solomon Islands"),
("SLE", "Sierra Leone"),
("SLV", "El Salvador"),
("SMR", "San Marino"),
("SOM", "Somalia"),
("SPM", "Saint Pierre and Miquelon"),
("SRB", "Serbia"),
("SSD", "South Sudan"),
("STP", "Sao Tome and Principe"),
("SUR", "Suriname"),
("SVK", "Slovakia"),
("SVN", "Slovenia"),
("SWE", "Sweden"),
("SWZ", "Swaziland"),
("SXM", "Sint Maarten (Dutch part)"),
("SYC", "Seychelles"),
("SYR", "Syrian Arab Republic"),
("TCA", "Turks and Caicos Islands"),
("TCD", "Chad"),
("TGO", "Togo"),
("THA", "Thailand"),
("TJK", "Tajikistan"),
("TKL", "Tokelau"),
("TKM", "Turkmenistan"),
("TLS", "Timor-Leste"),
("TON", "Tonga"),
("TTO", "Trinidad and Tobago"),
("TUN", "Tunisia"),
("TUR", "Turkey"),
("TUV", "Tuvalu"),
("TWN", "Taiwan, Province of China"),
("TZA", "Tanzania, United Republic of"),
("UGA", "Uganda"),
("UKR", "Ukraine"),
("UMI", "United States Minor Outlying Islands"),
("URY", "Uruguay"),
("USA", "United States of America"),
("UZB", "Uzbekistan"),
("VAT", "Holy See"),
("VCT", "Saint Vincent and the Grenadines"),
("VEN", "Venezuela (Bolivarian Republic of)"),
("VGB", "Virgin Islands (British)"),
("VIR", "Virgin Islands (U.S.)"),
("VNM", "Viet Nam"),
("VUT", "Vanuatu"),
("WLF", "Wallis and Futuna"),
("WSM", "Samoa"),
("YEM", "Yemen"),
("ZAF", "South Africa"),
("ZMB", "Zambia"),
("ZWE", "Zimbabwe"),
)
CONTENT_TYPES = (
("image/png", "PNG"),
("image/jpeg", "JPEG"),
("application/pdf", "PDF"),
("application/vnd.ms-excel", "xlsx"),
("application/msword", "doc"),
(
"application/vnd.openxmlformats-officedocument.wordprocessingml.document.docx",
"docx",
),
(
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
"xlsx",
),
("text/plain", "text"),
("video/mp4", "MP4 Video"),
("audio/mp4", "MP4 Audio"),
)
IMAGE_TYPES = ["image/png", "image/jpeg"]
|
from flask import Flask, render_template, request,jsonify
import requests
import pandas as pd
import numpy as np
from keras.models import Sequential
from keras.layers import LSTM
from keras.layers.core import Dense, Dropout
from keras.models import load_model
import os
# Create flask instance
app = Flask(__name__,static_folder='static')
def init():
global model
model = load_model('model.h5')
@app.route('/', methods=['GET', 'POST'])
def home():
return render_template("home.html")
@app.route('/predict', methods = ['POST'])
def predict():
#int_features = [int(x) for x in request.form.values()]
#final_features = [np.array(int_features)]
#class_prediction = model.predict(final_features)
init()
class_predicition = model.predict([np.array(list(data.values()))])
if class_prediction == 0:
product = "WALKING"
elif class_prediction == 1:
product = "WALKING_UPSTAIRS"
elif class_prediction == 2:
product = "WALKING_DOWNSTAIRS"
elif class_prediction == 3:
product = "SITTING"
elif class_prediction == 4:
product = "STANDING"
elif class_prediction == 5:
product = "LAYING"
return render_template('result.html', prediction_text='The Activity of Human {}'.format(product))
'''@app.route('/predict_api',methods=['POST'])
def predict_api():
data = request.get_json(force=True)
prediction = predict()
output = prediction[0]
return jsonify(output)'''
if __name__ == "__main__":
app.run(debug=True)
|
""" Tabulate the results """
import tabulate
import numpy as np
import pandas as pd
headers= ['model','precision','recall','accuracy','f1']
# Read result file
df=pd.read_json('results.json',lines=True)
df=df.drop(['model','time'],axis=1)
table=list(df.values)
table.sort(key=lambda r:r[-1], reverse=True)
print(tabulate.tabulate(table,headers=headers))
|
import pytest
from commits import get_min_max_amount_of_commits
@pytest.mark.parametrize('year, expected', [
(None, ('2018-02', '2017-01')), # parse the whole file
(2017, ('2017-11', '2017-01')),
(2018, ('2018-02', '2018-10')),
(2019, ('2019-01', '2019-03')),
])
def test_get_min_max_amount_of_commits(year, expected):
actual = get_min_max_amount_of_commits(year=year)
assert actual == expected
|
import os
from bot import Bot
token = os.getenv("TOKEN")
cvv_uname = os.getenv("CVV_UNAME")
cvv_passwd = os.getenv("CVV_PASSWD")
accu_weather_token = os.getenv("ACCU_WEATHER_TOKEN")
if token == "":
raise Exception("Invalid Token")
print("Started")
Bot(token, cvv_uname, cvv_passwd, accu_weather_token).start()
|
# -*- coding: utf-8 -*-
from protocols.ppatProtocol import Circuit,Gate,Worker,Client
from Crypto.Random.random import randint
import nizkproofs.nizkpok as nizk
import mathTools.invMatrix as iM
import mathTools.field as field
import math
import numpy
class LinearSystemPPATSWorker(Worker):
def makeCircuit(self):
inputs = []
for client in self.clientsList :
inputgate = Gate(label = ('input',str(client)),operandname = '',valuedict={'commitment':None},proofdict={'rangeProof':None})
inputs.append(inputgate)
c = Circuit(inputs=inputs,outputs = [], gates=inputs,label = "Linear System")
self.circuit = c
def fillInputGates(self,maxexp,printing=False):
self.maxexp = maxexp
for i in range(len(self.clientsList)) :
client = self.clientsList[i]
inputgate = self.circuit.inputs[i]
e,cproof,mproof = client.getCCEInput(self.maxexp)
c = self.publicKey.derivCom(e)
inputgate.valuedict['commitment'] = c
inputgate.valuedict['ciphertext'] = e
inputgate.proofdict['consist'] = cproof
inputgate.proofdict['rangeProof'] = mproof
if printing : print "input of",str(client),"received"
def recomInputs(self):
self.recommitInputs(['input'])
for g in self.circuit.gates :
if 'recommitedInput' in g.label :
inputgate = g.inputs[0]
clientId = inputgate.label[1]
g.label = ('recommitedInput',clientId)
def decryptAndCheckProofs(self,printing=False):
self.decryptInputs()
def getargconsistproof(gate):
cce = gate.valuedict['ciphertext']
consistproof = gate.proofdict['consist']
return cce, consistproof
def checkconsistproof(cce,consistproof):
return nizk.consistencyProofCheck(self.publicKey,cce,consistproof)
def getargrangeproof(gate):
com = gate.valuedict['commitment']
rproof = gate.proofdict['rangeProof']
return com,rproof
def checkrangeproof(com,rproof):
return True in nizk.base2RangeProofCheck(self.publicKey,com,rproof)
proofCheckOperations = {'consist':(getargconsistproof,checkconsistproof),'rangeProof':(getargrangeproof,checkrangeproof)}
return self.checkInputsProofs(proofCheckOperations,printing)
def solveSystem(self,B=None,printing=False):
def findFactor(n):
a = int(math.ceil(math.sqrt(n)))
b = int(math.floor(math.sqrt(n)))
while not a==0 or not b==0 :
c = int(n/a)
if c*a == n:
return c,a
else :
a -= 1
d = int(n/b)
if d*b == n:
return d,b
else :
d += 1
l = len(self.clientsList)
m,n = findFactor(l)
Fp = self.publicKey.PPATSpp.h.ECG.F
mList = []
comList = []
randList = []
for g in self.circuit.gates :
if 'recommitedInput' in g.label :
mes = g.valuedict['message']
com = g.valuedict['commitment']
rand = g.valuedict['openingclear']
mList.append(field.FieldElem(mes,Fp))
comList.append(com)
randList.append(rand)
tempL = numpy.array(mList,dtype=object)
tempcomL = numpy.array(comList,dtype=object)
temprandL = numpy.array(randList,dtype=object)
#print tempL,len(tempL)
#assert k == len(tempL)
self.A = numpy.reshape(tempL,(m,n)) # matrix to invert
self.D = numpy.reshape(tempcomL,(m,n)) # same matrix on commitments
self.R = numpy.reshape(temprandL,(m,n)) # same matrix on openings
if printing :
print 'rectangular decomposition of the number of clients', l, 'is', m,',',n,', this is the size of matrix A:\n',self.A
zero = Fp.zero()
one = Fp.one()
Id = iM.eye(m,zero,one)
C,self.Ainv,b = iM.invertmatrix(self.A,Id,zero,one) # D is the inverse matrix of A
if printing : print 'the inverse of A is :',self.Ainv
assert iM.equal(C,Id) is True
assert b is True
assert iM.equal(numpy.dot(self.A,self.Ainv),Id) is True
if B == None :
B = numpy.zeros(m,dtype=object)# array containing the public independent coefficients
for i in range(len(B)):
B[i] = randint(0,2**self.maxexp)
Z = numpy.dot(self.Ainv,B) # Solution of the system of size (m,1)
v = {'solution':Z,'matrixsize':(m,n)}
gate = Gate(label=('output','solution'),valuedict = v)
self.circuit.outputs.append(gate)
self.circuit.gates.append(gate)
#ZCom = Z.copy()
#ZOpe = Z.copy()
#for j in range(n):
#Zj = ZCom[j]
#comZj,rj = self.publicKey.commit(Zj)
#ZCom[j] = comZj
#ZOpe[j] = rj
for i in range(m):
mBpi = 0
comBpi,rBpi = self.publicKey.commit(0,0)
order = self.publicKey.PPATSpp.order
for j in range(n):
mBpi = (mBpi+Z[j].val*self.A[i][j].val)%order
comBpi = comBpi+self.D[i][j]*(Z[j].val%order)
rBpi = (rBpi+self.R[i][j]*Z[j].val)%order
assert self.publicKey.verifyCommitment(comBpi,mBpi,rBpi) is True
v = {'message':mBpi,'commitment':comBpi,'openingclear':rBpi}
gate = Gate(label=('output','opening row',str(i)),valuedict = v)
self.circuit.outputs.append(gate)
self.circuit.gates.append(gate)
self.Z = Z
class LinearSystemPPATSClient(Client):
def getCCEInput(self,maxexp=16):
self.maxexp = maxexp
constraint = 2**self.maxexp
m = randint(0,constraint)
r = self.publicKey.random()
cce, cproof = self.publicKey.encryptwithCproof(m,r)
com = self.publicKey.derivCom(cce)
mproof = nizk.base2RangeProof(self.publicKey,com,m,r,maxexp,False)
self.addInputsSend((cce,com,cproof,mproof))
return cce,cproof,mproof
def addInputsSend(self,arg=None):
if arg == None and self.inputsSendDict == None :
self.inputsSendDict = {'all':{},'com':{}}
elif arg == None :
pass
else :
k = len(self.inputsSendDict['all'])
cce,com,cproof,mproof = arg
self.inputsSendDict['all'][k] = (cce,cproof,mproof)
self.inputsSendDict['com'][k] = com
def checkLSCircuitProofs(self,printing=False):
def condRecomProof(gate):
return 'recommitProof' in gate.proofdict
def getargRecomProof(gate):
com1 = gate.inputs[0].valuedict['commitment']
com2 = gate.valuedict['commitment']
recomproof = gate.proofdict['recommitProof']
return com1,com2, recomproof
def checkRecomProof(com1,com2,recomproof):
m3, o3, com3 = recomproof
res1 = m3 == 0
res2 = self.publicKey.verifyOpening(com3,m3,o3)
res3 = com1 == com2+com3
return res1 and res2 and res3
def condRangeproof(gate):
return 'rangeProof' in gate.proofdict
def getargRangeproof(gate):
if 'rangeProof' in gate.proofdict :
com = gate.valuedict['commitment']
rproof = gate.proofdict['rangeProof']
return com,rproof
else :
return None,None
def checkRangeproof(com,rproof):
if not com==None and not rproof==None :
return True in nizk.base2RangeProofCheck(self.publicKey,com,rproof)
else :
return True
def condmygate(gate):
return (str(self) in gate.label) and gate in self.circuit.inputs
def getcommitment(gate):
com = gate.valuedict['commitment']
return (com,)
def checkmygate(com):
return com in self.inputsSendDict['com'].values()
def condsolgate(gate):
return 'solution' in gate.label
def getsolarg(gate):
openingList = []
for g in self.circuit.outputs :
if 'solution' in g.label:
Z = g.valuedict['solution']
m,n = g.valuedict['matrixsize']
else :
a,b,c = g.label
openingList.append((c,g.valuedict))
openingList.sort()
return Z,m,n,openingList
def checkSolution(Z,m,n,openingList):
comList = []
for g in self.circuit.gates :
if 'recommitedInput' in g.label :
com = g.valuedict['commitment']
comList.append(com)
tempcomL = numpy.array(comList,dtype=object)
D = numpy.reshape(tempcomL,(m,n)) # matrix on commitments
res1 = True
for i in range(m):
comBpi,r = self.publicKey.commit(0,0)
order = self.publicKey.PPATSpp.order
for j in range(n):
comBpi = comBpi+D[i][j]*(Z[j].val%order)
oi = openingList[i]
mi = oi[1]['message']
ri = oi[1]['openingclear']
res1 = res1 and self.publicKey.verifyCommitment(comBpi,mi,ri)
return res1
proofCheckOperationsDict = {'input':{'rangeProof':(condRangeproof,getargRangeproof,checkRangeproof)},'output':{'solution':(condsolgate,getsolarg,checkSolution)},'other':{'recomProof':(condRecomProof,getargRecomProof,checkRecomProof)},'my':{'checkmygate':(condmygate,getcommitment,checkmygate)}}
return self.checkCircuitProofs(proofCheckOperationsDict,printing=printing)
|
"""This module was made to fork the rogue access point."""
import os
import sys
import subprocess
import time
from subprocess import check_output
import roguehostapd.apctrl as apctrl
import roguehostapd.config.hostapdconfig as hostapdconfig
DNS_CONF_PATH = '/tmp/dnsmasq.conf'
DHCP_LEASE = "10.0.0.2,10.0.0.100,12h"
PUBLIC_DNS = "8.8.8.8"
NETWORK_GW_IP = "10.0.0.1"
DN = open(os.devnull, 'w')
NETWORK_MASK = "255.255.255.0"
NETWORK_IP = "10.0.0.0"
class AccessPoint(object):
"""This class forks the softAP."""
# Instance will be stored here.
__instance = None
@staticmethod
def get_instance():
"""Return the instance of the class or create new if none exists."""
if AccessPoint.__instance is None:
AccessPoint()
return AccessPoint.__instance
def __init__(self):
# type: () -> None
"""Initialize the class."""
if AccessPoint.__instance:
raise Exception("Error: AccessPoint class is a singleton!")
else:
AccessPoint.__instance = self
self.interface = ""
self.internet_interface = ""
self.channel = ""
self.essid = ""
self.presharedkey = ""
self.force_hostapd = False
# roguehostapd object
self.hostapd_object = None
self.deny_mac_addrs = []
self.dns_conf_path = DNS_CONF_PATH
def start_dhcp_dns(self):
# type: () -> None
"""Start the dhcp server."""
config = ('no-resolv\n' 'interface=%s\n' 'dhcp-range=%s\n')
with open(self.dns_conf_path, 'w') as dhcpconf:
dhcpconf.write(config % (self.interface, DHCP_LEASE))
with open(self.dns_conf_path, 'a+') as dhcpconf:
if self.internet_interface:
dhcpconf.write("server=%s" % (PUBLIC_DNS, ))
else:
# dhcpconf.write("address=/bing.com/127.0.0.1\n")
# dhcpconf.write("address=/www.bing.com/127.0.0.1\n")
# dhcpconf.write("address=/http.com/10.0.0.1\n")
# dhcpconf.write("address=/www.http.com/10.0.0.1\n")
# dhcpconf.write("address=/goole.com/127.0.0.1\n")
# dhcpconf.write("address=/www.google.com/127.0.0.1\n")
# dhcpconf.write("address=/google.com/172.217.5.78\n")
# dhcpconf.write("address=/clients3.google.com/172.217.11.174\n")
dhcpconf.write("address=/#/%s " % (NETWORK_GW_IP, ))
# catch the exception if dnsmasq is not installed
try:
subprocess.Popen(
['dnsmasq', '-C', self.dns_conf_path],
stdout=subprocess.PIPE,
stderr=sys.stdout)
except OSError:
print("[{}!{}] dnsmasq is not installed!".format(
R, W))
raise Exception
subprocess.Popen(
['ifconfig', str(self.interface), 'mtu', '1400'],
stdout=DN,
stderr=DN)
subprocess.Popen(
[
'ifconfig',
str(self.interface), 'up', NETWORK_GW_IP, 'netmask',
NETWORK_MASK
],
stdout=DN,
stderr=DN)
# Give it some time to avoid "SIOCADDRT: Network is unreachable"
time.sleep(1)
# Make sure that we have set the network properly.
proc = subprocess.check_output(['ifconfig', str(self.interface)])
if NETWORK_GW_IP not in proc.decode('utf-8'):
return False
subprocess.call(('route add -net %s netmask %s gw %s' %
(NETWORK_IP, NETWORK_MASK,
NETWORK_GW_IP)),
shell=True)
def start(self, disable_karma=False):
"""Start the softAP."""
# create the configuration for roguehostapd
hostapd_config = {
"ssid": self.essid,
"interface": self.interface,
"channel": self.channel,
"deny_macs": self.deny_mac_addrs,
}
if self.presharedkey:
hostapd_config['wpa2password'] = self.presharedkey
self.hostapd_object = apctrl.Hostapd()
if not self.force_hostapd:
try:
# Enable KARMA attack if needed
if not disable_karma:
hostapd_config["karma_enable"] = 1
# Enable WPSPBC KARMA attack
hostapd_config["wpspbc"] = True
hostapd_options = {
'mute': True,
'timestamp': False,
"eloop_term_disable": True
}
self.hostapd_object.start(hostapd_config, hostapd_options)
except KeyboardInterrupt:
raise Exception
except BaseException:
print(
"[{}!{}] Roguehostapd is not installed in the system! Please install"
" roguehostapd manually (https://github.com/wifiphisher/roguehostapd)"
" and rerun the script. Otherwise, you can run the tool with the"
" --force-hostapd option to use hostapd but please note that using"
" Wifiphisher with hostapd instead of roguehostapd will turn off many"
" significant features of the tool.")
# just raise exception when hostapd is not installed
raise Exception
else:
# use the hostapd on the users' system
self.hostapd_object.create_hostapd_conf_file(hostapd_config, {})
try:
self.hostapd_object = subprocess.Popen(
['hostapd', hostapdconfig.ROGUEHOSTAPD_RUNTIME_CONFIGPATH],
stdout=DN,
stderr=DN)
except OSError:
print(
"[{}!{}] hostapd is not installed in the system! Please download it"
" using your favorite package manager (e.g. apt-get install hostapd) and "
"rerun the script.")
# just raise exception when hostapd is not installed
raise Exception
time.sleep(2)
if self.hostapd_object.poll() is not None:
print("[{}!{}] hostapd failed to lunch!")
raise Exception
def on_exit(self):
# type: () -> None
"""Clean up the resoures when exits."""
subprocess.call('pkill dnsmasq', shell=True)
time.sleep(0.5)
subprocess.Popen(['airmon-ng', 'start', sys.argv[1]], stdout=DN, stderr=DN)
time.sleep(2)
subprocess.Popen(['airmon-ng', 'stop', sys.argv[1]], stdout=DN, stderr=DN)
try:
self.hostapd_object.stop()
except BaseException:
subprocess.call('pkill hostapd', shell=True)
if os.path.isfile(hostapdconfig.ROGUEHOSTAPD_RUNTIME_CONFIGPATH):
os.remove(hostapdconfig.ROGUEHOSTAPD_RUNTIME_CONFIGPATH)
if os.path.isfile(hostapdconfig.ROGUEHOSTAPD_DENY_MACS_CONFIGPATH):
os.remove(hostapdconfig.ROGUEHOSTAPD_DENY_MACS_CONFIGPATH)
if os.path.isfile('/var/lib/misc/dnsmasq.leases'):
os.remove('/var/lib/misc/dnsmasq.leases')
if os.path.isfile('/tmp/dhcpd.conf'):
os.remove('/tmp/dhcpd.conf')
# sleep 2 seconds to wait all the hostapd process is
# killed
time.sleep(2)
access_point = AccessPoint()
access_point.interface = sys.argv[1]
access_point.essid = sys.argv[2]
access_point.channel = sys.argv[3]
access_point.start(bool(sys.argv[4]))
# access_point.start_dhcp_dns()
try:
time.sleep(int(sys.argv[5])*60)
access_point.on_exit()
except KeyboardInterrupt:
access_point.on_exit()
|
# Generated by Django 3.2.7 on 2021-12-30 16:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ask_a_mentor', '0003_auto_20211230_2333'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='time',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='post',
name='time',
field=models.DateTimeField(auto_now_add=True),
),
]
|
from math import floor, ceil
HVal=2.0
def _normalize_vals(vals):
if len(vals) > 5: return ''
vals = [float(i) for i in vals]
mn = float(min(vals))
norm_vals = [ v-mn for v in vals ]
starter = norm_vals[0]
for i in range(5-len(norm_vals)):
norm_vals.insert(0, starter)
mx = float(max(norm_vals))
return mx, norm_vals
def chartmoji(vals):
mx,norm_vals = _normalize_vals(vals)
return ":chart_ln{}:".format("".join(
[str(int(round( (float(v)/mx) * HVal ))) for v in norm_vals ]
))
def barmoji(vals):
mx,norm_vals = _normalize_vals(vals)
return ":chart_bar{}:".format("".join(
[str(int(round( (float(v)/mx) * HVal ))) for v in norm_vals ]
))
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['ImageAccessArgs', 'ImageAccess']
@pulumi.input_type
class ImageAccessArgs:
def __init__(__self__, *,
image_id: pulumi.Input[str],
member_id: pulumi.Input[str],
region: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a ImageAccess resource.
:param pulumi.Input[str] image_id: The image ID.
:param pulumi.Input[str] member_id: The member ID, e.g. the target project ID.
:param pulumi.Input[str] region: The region in which to obtain the V2 Glance client.
A Glance client is needed to manage Image members. If omitted, the `region`
argument of the provider is used. Changing this creates a new resource.
:param pulumi.Input[str] status: The member proposal status. Optional if admin wants to
force the member proposal acceptance. Can either be `accepted`, `rejected` or
`pending`. Defaults to `pending`. Foridden for non-admin users.
"""
pulumi.set(__self__, "image_id", image_id)
pulumi.set(__self__, "member_id", member_id)
if region is not None:
pulumi.set(__self__, "region", region)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="imageId")
def image_id(self) -> pulumi.Input[str]:
"""
The image ID.
"""
return pulumi.get(self, "image_id")
@image_id.setter
def image_id(self, value: pulumi.Input[str]):
pulumi.set(self, "image_id", value)
@property
@pulumi.getter(name="memberId")
def member_id(self) -> pulumi.Input[str]:
"""
The member ID, e.g. the target project ID.
"""
return pulumi.get(self, "member_id")
@member_id.setter
def member_id(self, value: pulumi.Input[str]):
pulumi.set(self, "member_id", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[str]]:
"""
The region in which to obtain the V2 Glance client.
A Glance client is needed to manage Image members. If omitted, the `region`
argument of the provider is used. Changing this creates a new resource.
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
The member proposal status. Optional if admin wants to
force the member proposal acceptance. Can either be `accepted`, `rejected` or
`pending`. Defaults to `pending`. Foridden for non-admin users.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@pulumi.input_type
class _ImageAccessState:
def __init__(__self__, *,
created_at: Optional[pulumi.Input[str]] = None,
image_id: Optional[pulumi.Input[str]] = None,
member_id: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
schema: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
updated_at: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering ImageAccess resources.
:param pulumi.Input[str] created_at: The date the image access was created.
:param pulumi.Input[str] image_id: The image ID.
:param pulumi.Input[str] member_id: The member ID, e.g. the target project ID.
:param pulumi.Input[str] region: The region in which to obtain the V2 Glance client.
A Glance client is needed to manage Image members. If omitted, the `region`
argument of the provider is used. Changing this creates a new resource.
:param pulumi.Input[str] schema: The member schema.
:param pulumi.Input[str] status: The member proposal status. Optional if admin wants to
force the member proposal acceptance. Can either be `accepted`, `rejected` or
`pending`. Defaults to `pending`. Foridden for non-admin users.
:param pulumi.Input[str] updated_at: The date the image access was last updated.
"""
if created_at is not None:
pulumi.set(__self__, "created_at", created_at)
if image_id is not None:
pulumi.set(__self__, "image_id", image_id)
if member_id is not None:
pulumi.set(__self__, "member_id", member_id)
if region is not None:
pulumi.set(__self__, "region", region)
if schema is not None:
pulumi.set(__self__, "schema", schema)
if status is not None:
pulumi.set(__self__, "status", status)
if updated_at is not None:
pulumi.set(__self__, "updated_at", updated_at)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> Optional[pulumi.Input[str]]:
"""
The date the image access was created.
"""
return pulumi.get(self, "created_at")
@created_at.setter
def created_at(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "created_at", value)
@property
@pulumi.getter(name="imageId")
def image_id(self) -> Optional[pulumi.Input[str]]:
"""
The image ID.
"""
return pulumi.get(self, "image_id")
@image_id.setter
def image_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image_id", value)
@property
@pulumi.getter(name="memberId")
def member_id(self) -> Optional[pulumi.Input[str]]:
"""
The member ID, e.g. the target project ID.
"""
return pulumi.get(self, "member_id")
@member_id.setter
def member_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "member_id", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[str]]:
"""
The region in which to obtain the V2 Glance client.
A Glance client is needed to manage Image members. If omitted, the `region`
argument of the provider is used. Changing this creates a new resource.
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
@property
@pulumi.getter
def schema(self) -> Optional[pulumi.Input[str]]:
"""
The member schema.
"""
return pulumi.get(self, "schema")
@schema.setter
def schema(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "schema", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
The member proposal status. Optional if admin wants to
force the member proposal acceptance. Can either be `accepted`, `rejected` or
`pending`. Defaults to `pending`. Foridden for non-admin users.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@property
@pulumi.getter(name="updatedAt")
def updated_at(self) -> Optional[pulumi.Input[str]]:
"""
The date the image access was last updated.
"""
return pulumi.get(self, "updated_at")
@updated_at.setter
def updated_at(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "updated_at", value)
class ImageAccess(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
image_id: Optional[pulumi.Input[str]] = None,
member_id: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages members for the shared OpenStack Glance V2 Image within the source
project, which owns the Image.
## Example Usage
### Unprivileged user
Create a shared image and propose a membership to the
`bed6b6cbb86a4e2d8dc2735c2f1000e4` project ID.
```python
import pulumi
import pulumi_openstack as openstack
rancheros = openstack.images.Image("rancheros",
container_format="bare",
disk_format="qcow2",
image_source_url="https://releases.rancher.com/os/latest/rancheros-openstack.img",
properties={
"key": "value",
},
visibility="shared")
rancheros_member = openstack.images.ImageAccess("rancherosMember",
image_id=rancheros.id,
member_id="bed6b6cbb86a4e2d8dc2735c2f1000e4")
```
### Privileged user
Create a shared image and set a membership to the
`bed6b6cbb86a4e2d8dc2735c2f1000e4` project ID.
```python
import pulumi
import pulumi_openstack as openstack
rancheros = openstack.images.Image("rancheros",
container_format="bare",
disk_format="qcow2",
image_source_url="https://releases.rancher.com/os/latest/rancheros-openstack.img",
properties={
"key": "value",
},
visibility="shared")
rancheros_member = openstack.images.ImageAccess("rancherosMember",
image_id=rancheros.id,
member_id="bed6b6cbb86a4e2d8dc2735c2f1000e4",
status="accepted")
```
## Import
Image access can be imported using the `image_id` and the `member_id`, separated by a slash, e.g.
```sh
$ pulumi import openstack:images/imageAccess:ImageAccess openstack_images_image_access_v2 89c60255-9bd6-460c-822a-e2b959ede9d2/bed6b6cbb86a4e2d8dc2735c2f1000e4
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] image_id: The image ID.
:param pulumi.Input[str] member_id: The member ID, e.g. the target project ID.
:param pulumi.Input[str] region: The region in which to obtain the V2 Glance client.
A Glance client is needed to manage Image members. If omitted, the `region`
argument of the provider is used. Changing this creates a new resource.
:param pulumi.Input[str] status: The member proposal status. Optional if admin wants to
force the member proposal acceptance. Can either be `accepted`, `rejected` or
`pending`. Defaults to `pending`. Foridden for non-admin users.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ImageAccessArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages members for the shared OpenStack Glance V2 Image within the source
project, which owns the Image.
## Example Usage
### Unprivileged user
Create a shared image and propose a membership to the
`bed6b6cbb86a4e2d8dc2735c2f1000e4` project ID.
```python
import pulumi
import pulumi_openstack as openstack
rancheros = openstack.images.Image("rancheros",
container_format="bare",
disk_format="qcow2",
image_source_url="https://releases.rancher.com/os/latest/rancheros-openstack.img",
properties={
"key": "value",
},
visibility="shared")
rancheros_member = openstack.images.ImageAccess("rancherosMember",
image_id=rancheros.id,
member_id="bed6b6cbb86a4e2d8dc2735c2f1000e4")
```
### Privileged user
Create a shared image and set a membership to the
`bed6b6cbb86a4e2d8dc2735c2f1000e4` project ID.
```python
import pulumi
import pulumi_openstack as openstack
rancheros = openstack.images.Image("rancheros",
container_format="bare",
disk_format="qcow2",
image_source_url="https://releases.rancher.com/os/latest/rancheros-openstack.img",
properties={
"key": "value",
},
visibility="shared")
rancheros_member = openstack.images.ImageAccess("rancherosMember",
image_id=rancheros.id,
member_id="bed6b6cbb86a4e2d8dc2735c2f1000e4",
status="accepted")
```
## Import
Image access can be imported using the `image_id` and the `member_id`, separated by a slash, e.g.
```sh
$ pulumi import openstack:images/imageAccess:ImageAccess openstack_images_image_access_v2 89c60255-9bd6-460c-822a-e2b959ede9d2/bed6b6cbb86a4e2d8dc2735c2f1000e4
```
:param str resource_name: The name of the resource.
:param ImageAccessArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ImageAccessArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
image_id: Optional[pulumi.Input[str]] = None,
member_id: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ImageAccessArgs.__new__(ImageAccessArgs)
if image_id is None and not opts.urn:
raise TypeError("Missing required property 'image_id'")
__props__.__dict__["image_id"] = image_id
if member_id is None and not opts.urn:
raise TypeError("Missing required property 'member_id'")
__props__.__dict__["member_id"] = member_id
__props__.__dict__["region"] = region
__props__.__dict__["status"] = status
__props__.__dict__["created_at"] = None
__props__.__dict__["schema"] = None
__props__.__dict__["updated_at"] = None
super(ImageAccess, __self__).__init__(
'openstack:images/imageAccess:ImageAccess',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
created_at: Optional[pulumi.Input[str]] = None,
image_id: Optional[pulumi.Input[str]] = None,
member_id: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
schema: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
updated_at: Optional[pulumi.Input[str]] = None) -> 'ImageAccess':
"""
Get an existing ImageAccess resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] created_at: The date the image access was created.
:param pulumi.Input[str] image_id: The image ID.
:param pulumi.Input[str] member_id: The member ID, e.g. the target project ID.
:param pulumi.Input[str] region: The region in which to obtain the V2 Glance client.
A Glance client is needed to manage Image members. If omitted, the `region`
argument of the provider is used. Changing this creates a new resource.
:param pulumi.Input[str] schema: The member schema.
:param pulumi.Input[str] status: The member proposal status. Optional if admin wants to
force the member proposal acceptance. Can either be `accepted`, `rejected` or
`pending`. Defaults to `pending`. Foridden for non-admin users.
:param pulumi.Input[str] updated_at: The date the image access was last updated.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ImageAccessState.__new__(_ImageAccessState)
__props__.__dict__["created_at"] = created_at
__props__.__dict__["image_id"] = image_id
__props__.__dict__["member_id"] = member_id
__props__.__dict__["region"] = region
__props__.__dict__["schema"] = schema
__props__.__dict__["status"] = status
__props__.__dict__["updated_at"] = updated_at
return ImageAccess(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> pulumi.Output[str]:
"""
The date the image access was created.
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter(name="imageId")
def image_id(self) -> pulumi.Output[str]:
"""
The image ID.
"""
return pulumi.get(self, "image_id")
@property
@pulumi.getter(name="memberId")
def member_id(self) -> pulumi.Output[str]:
"""
The member ID, e.g. the target project ID.
"""
return pulumi.get(self, "member_id")
@property
@pulumi.getter
def region(self) -> pulumi.Output[str]:
"""
The region in which to obtain the V2 Glance client.
A Glance client is needed to manage Image members. If omitted, the `region`
argument of the provider is used. Changing this creates a new resource.
"""
return pulumi.get(self, "region")
@property
@pulumi.getter
def schema(self) -> pulumi.Output[str]:
"""
The member schema.
"""
return pulumi.get(self, "schema")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
"""
The member proposal status. Optional if admin wants to
force the member proposal acceptance. Can either be `accepted`, `rejected` or
`pending`. Defaults to `pending`. Foridden for non-admin users.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter(name="updatedAt")
def updated_at(self) -> pulumi.Output[str]:
"""
The date the image access was last updated.
"""
return pulumi.get(self, "updated_at")
|
from statistical_modeling.distributions.discrete_uniform import (
Distribution,
Mean,
Variance
)
from typing import Final
import unittest
class TestDiscreteUniform(unittest.TestCase):
d: Final = Distribution(1, 100)
def test_distribution(self):
self.assertEqual(self.d.a, 1)
self.assertEqual(self.d.b, 100)
def test_mean(self):
self.assertAlmostEqual(
Mean(self.d),
50.5
)
def test_variance(self):
self.assertAlmostEqual(
Variance(self.d),
833.25
)
|
images_path = ""
|
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import logging
import os
import sys
import re
import tensorflow as tf
tf.keras.backend.clear_session()
from callbacks import CallBacks
from model_factory import GetModel
from preprocess import Preprocess, format_example, format_example_tf, update_status
###############################################################################
# Input Arguments
###############################################################################
parser = argparse.ArgumentParser(description='Run a Siamese Network with a triplet loss on a folder of images.')
parser.add_argument("-t", "--image_dir_train",
dest='image_dir_train',
required=True,
help="File path ending in folders that are to be used for model training")
parser.add_argument("-v", "--image_dir_validation",
dest='image_dir_validation',
default=None,
help="File path ending in folders that are to be used for model validation")
parser.add_argument("-m", "--model-name",
dest='model_name',
default='VGG16',
choices=['DenseNet121',
'DenseNet169',
'DenseNet201',
'InceptionResNetV2',
'InceptionV3',
'MobileNet',
'MobileNetV2',
'NASNetLarge',
'NASNetMobile',
'ResNet50',
'VGG16',
'VGG19',
'Xception'],
help="Models available from tf.keras")
parser.add_argument("-o", "--optimizer-name",
dest='optimizer',
default='Adam',
choices=['Adadelta',
'Adagrad',
'Adam',
'Adamax',
'Ftrl',
'Nadam',
'RMSprop',
'SGD'],
help="Optimizers from tf.keras")
parser.add_argument("-p", "--patch_size",
dest='patch_size',
help="Patch size to use for training",
default=256, type=int)
parser.add_argument("-l", "--log_dir",
dest='log_dir',
default='log_dir',
help="Place to store the tensorboard logs")
parser.add_argument("-r", "--learning-rate",
dest='lr',
help="Learning rate",
default=0.0001, type=float)
parser.add_argument("-L", "--loss-function",
dest='loss_function',
default='BinaryCrossentropy',
choices=['SparseCategoricalCrossentropy',
'CategoricalCrossentropy',
'BinaryCrossentropy'],
help="Loss functions from tf.keras")
parser.add_argument("-e", "--num-epochs",
dest='num_epochs',
help="Number of epochs to use for training",
default=10, type=int)
parser.add_argument("-b", "--batch-size",
dest='BATCH_SIZE',
help="Number of batches to use for training",
default=1, type=int)
parser.add_argument("-w", "--num-workers",
dest='NUM_WORKERS',
help="Number of workers to use for training",
default=1, type=int)
parser.add_argument("--use-multiprocessing",
help="Whether or not to use multiprocessing",
const=True, default=False, nargs='?',
type=bool)
parser.add_argument("-V", "--verbose",
dest="logLevel",
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
default="DEBUG",
help="Set the logging level")
parser.add_argument("-F", "--filetype",
dest="filetype",
choices=['tfrecords', 'images'],
default="images",
help="Set the logging level")
parser.add_argument("--tfrecord_image",
dest="tfrecord_image",
default="image/encoded",
help="Set the logging level")
parser.add_argument("--tfrecord_label",
dest="tfrecord_label",
default="null",
help="Set the logging level")
parser.add_argument("--train_num_layers",
dest="train_num_layers",
default=False,
help="Set the logging level")
parser.add_argument("--prev_checkpoint",
dest="prev_checkpoint",
default=False,
help="Set the logging level")
args = parser.parse_args()
logging.basicConfig(stream=sys.stderr, level=args.logLevel,
format='%(name)s (%(levelname)s): %(message)s')
logger = logging.getLogger(__name__)
logger.setLevel(args.logLevel)
###############################################################################
# Begin priming the data generation pipeline
###############################################################################
# Get Training and Validation data
train_data = Preprocess(args.image_dir_train, args.filetype, args.tfrecord_image, args.tfrecord_label,
loss_function=args.loss_function)
logger.debug('Completed training dataset Preprocess')
# AUTOTUNE = tf.data.experimental.AUTOTUNE
AUTOTUNE = 1000
# Update status to Training for map function in the preprocess
update_status(True)
# If input datatype is tfrecords or images
if train_data.filetype != "tfrecords":
t_path_ds = tf.data.Dataset.from_tensor_slices(train_data.files)
t_image_ds = t_path_ds.map(format_example, num_parallel_calls=AUTOTUNE)
t_label_ds = tf.data.Dataset.from_tensor_slices(tf.cast(train_data.labels, tf.int64))
t_image_label_ds = tf.data.Dataset.zip((t_image_ds, t_label_ds))
train_ds = t_image_label_ds.shuffle(buffer_size=train_data.min_images).repeat()
else:
t_path_ds = tf.data.TFRecordDataset(train_data.files)
t_image_ds = t_path_ds.map(format_example_tf, num_parallel_calls=AUTOTUNE)
# min images variables should be update from number of tfrecords to number of images
num_image = 0
for image, label in t_image_ds:
num_image = num_image + 1
train_data.min_images = num_image
t_image_label_ds = tf.data.Dataset.zip(t_image_ds)
# adding these additional steps to avoid shuffling on images and shuffle on imagepaths
t_image_ds = t_path_ds.shuffle(buffer_size=train_data.min_images).repeat().map(format_example_tf,
num_parallel_calls=AUTOTUNE)
train_ds = tf.data.Dataset.zip(t_image_ds)
train_ds = train_ds.batch(args.BATCH_SIZE).prefetch(buffer_size=AUTOTUNE)
training_steps = int(train_data.min_images / args.BATCH_SIZE)
logger.debug('Completed Training dataset')
if args.image_dir_validation:
# Get Validation data
# Update status to Testing for map function in the preprocess
update_status(False)
validation_data = Preprocess(args.image_dir_validation, args.filetype, args.tfrecord_image, args.tfrecord_label,
loss_function=args.loss_function)
logger.debug('Completed test dataset Preprocess')
if validation_data.filetype != "tfrecords":
v_path_ds = tf.data.Dataset.from_tensor_slices(validation_data.files)
v_image_ds = v_path_ds.map(format_example, num_parallel_calls=AUTOTUNE)
v_label_ds = tf.data.Dataset.from_tensor_slices(tf.cast(validation_data.labels, tf.int64))
v_image_label_ds = tf.data.Dataset.zip((v_image_ds, v_label_ds))
else:
v_path_ds = tf.data.TFRecordDataset(validation_data.files)
v_image_ds = v_path_ds.map(format_example_tf, num_parallel_calls=AUTOTUNE)
# min images variables should be update from number of tfrecords to number of images
num_image = 0
for image, label in v_image_ds:
num_image = num_image + 1
# print(num_image)
# sys.exit(0)
validation_data.min_images = num_image
v_image_label_ds = tf.data.Dataset.zip(v_image_ds)
validation_ds = v_image_label_ds.shuffle(buffer_size=validation_data.min_images).repeat()
validation_ds = validation_ds.batch(args.BATCH_SIZE).prefetch(buffer_size=AUTOTUNE)
validation_steps = int(validation_data.min_images / args.BATCH_SIZE)
logger.debug('Completed Validation dataset')
else:
validation_ds = None
validation_steps = None
out_dir = os.path.join(args.log_dir,
args.model_name + '_' + args.optimizer + '_' + str(args.lr) + '-' + args.loss_function)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
checkpoint_path = os.path.join(out_dir, "cp-{epoch:04d}.ckpt")
checkpoint_dir = os.path.dirname(checkpoint_path)
###############################################################################
# Build the model
###############################################################################
logger.debug('Mirror initialized')
GPU = True
if GPU is True:
# This must be fixed for multi-GPU
mirrored_strategy = tf.distribute.MirroredStrategy()
with mirrored_strategy.scope():
if args.train_num_layers:
m = GetModel(model_name=args.model_name, img_size=args.patch_size, classes=train_data.classes,
num_layers=int(args.train_num_layers))
else:
m = GetModel(model_name=args.model_name, img_size=args.patch_size, classes=train_data.classes)
# logger.debug('Model constructed')
model = m.compile_model(args.optimizer, args.lr, args.loss_function)
# inside scope
logger.debug('Model compiled')
latest = tf.train.latest_checkpoint(checkpoint_dir)
if not latest:
if args.prev_checkpoint:
model.load_weights(args.prev_checkpoint)
logger.debug('Loading weights from ' + args.prev_checkpoint)
model.save_weights(checkpoint_path.format(epoch=0))
latest = tf.train.latest_checkpoint(checkpoint_dir)
ini_epoch = int(re.findall(r'\b\d+\b', os.path.basename(latest))[0])
logger.debug('Loading initialized model')
model.load_weights(latest)
logger.debug('Loading weights from ' + latest)
logger.debug('Completed loading initialized model')
cb = CallBacks(learning_rate=args.lr, log_dir=out_dir, optimizer=args.optimizer)
logger.debug('Model image saved')
model.fit(train_ds,
steps_per_epoch=training_steps,
epochs=args.num_epochs,
callbacks=cb.get_callbacks(),
validation_data=validation_ds,
validation_steps=validation_steps,
class_weight=None,
max_queue_size=1000,
workers=args.NUM_WORKERS,
use_multiprocessing=args.use_multiprocessing,
shuffle=False, initial_epoch=ini_epoch
)
model.save(os.path.join(out_dir, 'my_model.h5'))
else:
if args.train_num_layers:
m = GetModel(model_name=args.model_name, img_size=args.patch_size, classes=train_data.classes,
num_layers=int(args.train_num_layers))
else:
m = GetModel(model_name=args.model_name, img_size=args.patch_size, classes=train_data.classes)
logger.debug('Model constructed')
model = m.compile_model(args.optimizer, args.lr, args.loss_function)
logger.debug('Model compiled')
model.save_weights(checkpoint_path.format(epoch=0))
latest = tf.train.latest_checkpoint(checkpoint_dir)
if not latest:
model.save_weights(checkpoint_path.format(epoch=0))
latest = tf.train.latest_checkpoint(checkpoint_dir)
ini_epoch = int(re.findall(r'\b\d+\b', os.path.basename(latest))[0])
logger.debug('Loading initialized model')
model.load_weights(latest)
logger.debug('Loading weights from ' + latest)
cb = CallBacks(learning_rate=args.lr, log_dir=out_dir, optimizer=args.optimizer)
logger.debug('Model image saved')
model.fit(train_ds,
steps_per_epoch=training_steps,
epochs=args.num_epochs,
callbacks=cb.get_callbacks(),
validation_data=validation_ds,
validation_steps=validation_steps,
class_weight=None,
max_queue_size=1000,
workers=args.NUM_WORKERS,
use_multiprocessing=args.use_multiprocessing,
shuffle=False, initial_epoch=ini_epoch)
model.save(os.path.join(out_dir, 'my_model.h5'))
|
import pickle
from ds import *
import pandas as pd
from sklearn.neural_network import MLPRegressor
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.preprocessing import StandardScaler
from sklearn import metrics
import numpy as np
from sklearn.impute import SimpleImputer
data_as_list = []
pickle_files = ['data/dataset_0_10000.pkl', 'data/dataset_10000_20000.pkl', 'data/dataset_20000_30000.pkl', 'data/dataset_30000_40000.pkl']
for pickle_file in pickle_files:
pickle_off = open(pickle_file, "rb")
emp = pickle.load(pickle_off)
title_vec_len = emp[0].features.title.vector.shape[0]
story_vec_len = emp[0].features.story.vector.shape[0]
for dataobject in emp:
category = dataobject.features.category
goal = dataobject.features.goal
created = dataobject.features.created
title_vec = dataobject.features.title.vector
story_vec = dataobject.features.story.vector
amt_raised = dataobject.result
feature_vec = [category, goal, created]
feature_vec.extend(title_vec)
feature_vec.extend(story_vec)
feature_vec.append(amt_raised)
data_as_list.append(feature_vec)
headings = ["category", "goal", "created"]
headings.extend(["title_{}".format(i) for i in range(0, title_vec_len)])
headings.extend(["story_{}".format(i) for i in range(0, story_vec_len)])
headings.append("amt_raised")
df = pd.DataFrame(data_as_list, columns = headings)
df['category'] = pd.Categorical(df['category'])
dfDummies = pd.get_dummies(df['category'], prefix='category')
df = pd.concat([df, dfDummies], axis=1)
df.to_pickle("data/output_df.pkl")
print(len(df))
df.dropna(axis=0, how='any', thresh=None, subset=None, inplace=True)
print(len(df))
predictor_variable_indexes = [i for i in range(1,194+1)]
predictor_variable_indexes.extend([i for i in range(196, 214+1)])
response_variable_index = 195
X = df.iloc[:, predictor_variable_indexes].values
y = df.iloc[:, response_variable_index].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
#regressor = RandomForestRegressor(n_estimators=300, random_state=0, n_jobs=10, verbose=1)
regressor = MLPRegressor(hidden_layer_sizes=(100,), max_iter=100000, verbose=True)
print("Started training")
regressor.fit(X_train, y_train)
print("Finished training")
y_pred = regressor.predict(X_test)
pickle.dump( regressor, open("data/regressor.pkl", "wb" ) )
pickle.dump( X_train, open("data/X_train.pkl", "wb" ) )
pickle.dump( X_test, open("data/X_test.pkl", "wb" ) )
pickle.dump( y_train, open("data/y_train.pkl", "wb" ) )
pickle.dump( y_test, open("data/y_test.pkl", "wb" ) )
pickle.dump( y_pred, open("data/y_pred.pkl", "wb" ) )
print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred))
print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred))
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print("Done")
|
'''
思考路線:
1.找到PTT表特版網址,get表特版原始碼
2.由表特版的原始碼,匹配get各個文章網址列表
3.由各個文章原始碼,匹配單一文章中各個圖片網址列表
4.由各個圖片網址列表,下載儲存圖片(由二進位碼寫入檔案中儲存.jpg格式)
'''
from selenium import webdriver
from selenium.webdriver.common.by import By
import requests
import re
import os
from urllib.request import urlretrieve
from urllib import request, error
browser = webdriver.PhantomJS()
url = "https://www.ptt.cc/bbs/beauty/index.html"
browser.get(url)
browser.implicitly_wait(3)
#page source盡量用selenium爬取,不要用requests可能被擋
content_beauty = browser.page_source
'''
#刪文章不要讀
#topic 用selenium爬,回傳是element的list
topic = browser.find_elements_by_class_name('title')
'''
#文章連結們topic_url用Regex爬,回傳字串的list,
topic_url = re.findall('mark.*?title.*?href="(.*?)">.*?</a>', content_beauty, re.S)
#驗證是否有抓到文章列表
'''
#被刪除的文章不要讀
print(len(topic)) #這是element的list
'''
print("PTT_Beauty文章列表\n")
print(topic_url) #這是list
print("\n")
#爬topic_url-4篇文章,因為要扣除【版規文章】
for i in range(0, len(topic_url)-4):
#topic是element的list,所以要.text才可以看內容
browser.get("https://www.ptt.cc"+topic_url[i])
topic = browser.find_elements_by_class_name('article-meta-value')
#會有作者[0]、看板[1]、標題[2]、時間[3]
print("標題: "+topic[2].text+" 作者: "+topic[0].text+" 時間: "+topic[3].text+"\n")
#獲取文章的source code,用selenium比較保險
content_topic = browser.page_source
#抓取文章內的pic_url,使用list儲存
pic_url = re.findall('<a href=".*?" target="_blank.*?nofollow.*?">(.*?)</a>', content_topic, re.S)
#爬1到pic_url-1篇文章,因為要扣除【置底連結】
for i in range(0, len(pic_url)-1):
print(pic_url[i]+"\n") #印出每張圖片連結,確認網址都對
pic = requests.get(pic_url[i])
#文章名當標題,for迴圈順序當附加,記得用.jpg當結尾不然不能簡單看
pic_title = topic[2].text+"_"+str(i)+".jpg"
#由連結下載寫入圖片二進位碼
with open(pic_title,'wb') as f:
f.write(pic.content)
f.close()
#存取位置就是執行python程式的位置(不是python程式的位置)
browser.close()
|
# Takes a .json input file and formats it to a csv file
# Usage: python json_to_csv.py <input file>.json <output_file>.csv
import json
import sys
import html
# Simple argument handling
input_file = sys.argv[1]
output_file = sys.argv[2]
with open(input_file) as infile, open(output_file, 'w') as outfile:
data = json.load(infile)
def listToString(l):
nohtmllist = list(map(html.unescape,l))
return f'"{"; ".join(nohtmllist)}"'
for paper in data:
title = (
f'{listToString(paper["title"])},'
f'{listToString(paper["date"])},'
f'{listToString(paper["email"])},'
f'{paper["url"]}'
)
for key, value in paper.items():
if isinstance(value, dict):
name = listToString(value['name'])
affil = listToString(value['affiliation'])
email = listToString(value['email'])
outstring = f'{title},{name},{affil},{email}\n'
outfile.write(outstring)
|
#!/usr/bin/env python3
"""Test the configuration module."""
import multiprocessing
import os
import sys
import os.path
import unittest
import shutil
import random
import string
import tempfile
import yaml
# Try to create a working PYTHONPATH
TEST_DIRECTORY = os.path.dirname(os.path.realpath(__file__))
SWITCHMAP_DIRECTORY = os.path.abspath(os.path.join(TEST_DIRECTORY, os.pardir))
ROOT_DIRECTORY = os.path.abspath(os.path.join(SWITCHMAP_DIRECTORY, os.pardir))
if TEST_DIRECTORY.endswith('/switchmap-ng/switchmap/test') is True:
sys.path.append(ROOT_DIRECTORY)
else:
print(
'This script is not installed in the "switchmap-ng/bin" directory. '
'Please fix.')
sys.exit(2)
from switchmap.utils import configuration
class TestConfig(unittest.TestCase):
"""Checks all functions and methods."""
#########################################################################
# General object setup
#########################################################################
random_string = ''.join([random.choice(
string.ascii_letters + string.digits) for n in range(9)])
log_directory = tempfile.mkdtemp()
cache_directory = tempfile.mkdtemp()
good_config = ("""\
main:
log_directory: {}
cache_directory: {}
agent_threads: 25
bind_port: 3000
hostnames:
- 192.168.1.1
- 192.168.1.2
- 192.168.1.3
- 192.168.1.4
listen_address: 0.0.0.0
log_level: debug
polling_interval: 20
""".format(log_directory, cache_directory))
# Convert good_config to dictionary
good_dict = yaml.safe_load(bytes(good_config, 'utf-8'))
# Set the environmental variable for the configuration directory
directory = tempfile.mkdtemp()
os.environ['SWITCHMAP_CONFIGDIR'] = directory
config_file = '{}/test_config.yaml'.format(directory)
# Write good_config to file
with open(config_file, 'w') as f_handle:
yaml.dump(good_dict, f_handle, default_flow_style=True)
# Create configuration object
config = configuration.Config()
@classmethod
def tearDownClass(cls):
"""Post test cleanup."""
os.rmdir(cls.log_directory)
os.rmdir(cls.config.topology_directory())
os.rmdir(cls.config.idle_directory())
os.rmdir(cls.cache_directory)
os.remove(cls.config_file)
os.rmdir(cls.directory)
def test_init(self):
"""Testing method init."""
# Testing with non-existant directory
directory = 'bogus'
os.environ['SWITCHMAP_CONFIGDIR'] = directory
with self.assertRaises(SystemExit):
configuration.Config()
# Testing with an empty directory
empty_directory = tempfile.mkdtemp()
os.environ['SWITCHMAP_CONFIGDIR'] = empty_directory
with self.assertRaises(SystemExit):
configuration.Config()
# Write bad_config to file
empty_config_file = '{}/test_config.yaml'.format(empty_directory)
with open(empty_config_file, 'w') as f_handle:
f_handle.write('')
# Create configuration object
config = configuration.Config()
with self.assertRaises(SystemExit):
config.log_file()
# Cleanup files in temp directories
_delete_files(directory)
def test_log_file(self):
"""Testing method log_file."""
# Test the log_file with a good_dict
# good key and key_value
result = self.config.log_file()
self.assertEqual(
result, '{}/switchmap-ng.log'.format(self.log_directory))
def test_web_log_file(self):
"""Testing method web_log_file ."""
# Testing web_log_file with a good dictionary.
result = self.config.web_log_file()
self.assertEqual(
result, '{}/switchmap-ng-api.log'.format(self.log_directory))
def test_log_level(self):
"""Testing method log_level."""
# Tesing with a good_dictionary
# good key and good key_value
result = self.config.log_level()
self.assertEqual(result, 'debug')
self.assertEqual(result, self.good_dict['main']['log_level'])
# Set the environmental variable for the configuration directory
directory = tempfile.mkdtemp()
os.environ['SWITCHMAP_CONFIGDIR'] = directory
config_file = '{}/test_config.yaml'.format(directory)
# Testing log_level with blank key and blank key_value
key = ''
key_value = ''
bad_config = ("""\
main:
{} {}
""".format(key, key_value))
bad_dict = yaml.safe_load(bytes(bad_config, 'utf-8'))
# Write bad_config to file
with open(config_file, 'w') as f_handle:
yaml.dump(bad_dict, f_handle, default_flow_style=True)
# Create configuration object
config = configuration.Config()
with self.assertRaises(SystemExit):
config.log_level()
# Testing log_level with good key and blank key_value
key = 'log_level:'
key_value = ''
bad_config = ("""\
main:
{} {}
""".format(key, key_value))
bad_dict = yaml.safe_load(bytes(bad_config, 'utf-8'))
# Write bad_config to file
with open(config_file, 'w') as f_handle:
yaml.dump(bad_dict, f_handle, default_flow_style=True)
# Create configuration object
config = configuration.Config()
with self.assertRaises(SystemExit):
config.log_level()
# Cleanup files in temp directories
_delete_files(directory)
def test_cache_directory(self):
"""Testing method cache_directory."""
# Testing cache_directory with temp directory
# Set the environmental variable for the configuration directory
directory = tempfile.mkdtemp()
os.environ['SWITCHMAP_CONFIGDIR'] = directory
config_file = '{}/test_config.yaml'.format(directory)
# Testing cache_directory with blank key_value(filepath)
key = ''
key_value = ''
bad_config = ("""\
main:
{} {}
""".format(key, key_value))
bad_dict = yaml.safe_load(bytes(bad_config, 'utf-8'))
with open(config_file, 'w') as f_handle:
yaml.dump(bad_dict, f_handle, default_flow_style=True)
# Create configuration object
config = configuration.Config()
with self.assertRaises(SystemExit):
config.cache_directory()
# Cleanup files in temp directories
_delete_files(directory)
def test_agent_threads(self):
"""Testing method agent_threads."""
# Testing agent_threads with good_dict
# good key and key_value
result = self.config.agent_threads()
# Get CPU cores
cores = multiprocessing.cpu_count()
desired_max_threads = max(1, cores - 1)
# We don't want a value that's too big that the CPU cannot cope
expected = min(result, desired_max_threads)
self.assertEqual(result, expected)
def test_polling_interval(self):
"""Testing method polling_interval."""
# Testing polling_interval with good_dictionary
# good key and key_value
result = self.config.polling_interval()
self.assertEqual(result, 20)
self.assertEqual(result, self.good_dict['main']['polling_interval'])
# Set the environmental variable for the configuration directory
directory = tempfile.mkdtemp()
os.environ['SWITCHMAP_CONFIGDIR'] = directory
config_file = '{}/test_config.yaml'.format(directory)
# Testing polling_interval with blank key and blank key_value
key = ''
key_value = ''
bad_config = ("""\
main:
{} {}
""".format(key, key_value))
bad_dict = yaml.safe_load(bytes(bad_config, 'utf-8'))
# Write bad_config to file
with open(config_file, 'w') as f_handle:
yaml.dump(bad_dict, f_handle, default_flow_style=True)
# Create configuration object
config = configuration.Config()
with self.assertRaises(SystemExit):
config.polling_interval()
# Testing polling_interval with good key and blank key_value
key = 'polling_interval:'
key_value = ''
bad_config = ("""\
main:
{} {}
""".format(key, key_value))
bad_dict = yaml.safe_load(bytes(bad_config, 'utf-8'))
# Write bad_config to file
with open(config_file, 'w') as f_handle:
yaml.dump(bad_dict, f_handle, default_flow_style=True)
# Create configuration object
config = configuration.Config()
result = config.polling_interval()
self.assertEqual(result, 86400)
# Cleanup files in temp directories
_delete_files(directory)
def test_bind_port(self):
"""Testing method bind_port."""
# Testing bind_port with good_dictionary
# good key and key_value
result = self.config.bind_port()
self.assertEqual(result, 3000)
self.assertEqual(result, self.good_dict['main']['bind_port'])
# Set the environmental variable for the configuration directory
directory = tempfile.mkdtemp()
os.environ['SWITCHMAP_CONFIGDIR'] = directory
config_file = '{}/test_config.yaml'.format(directory)
# Testing bind_port with blank key and blank key_value
key = ''
key_value = ''
bad_config = ("""\
main:
{} {}
""".format(key, key_value))
bad_dict = yaml.safe_load(bytes(bad_config, 'utf-8'))
# Write bad_config to file
with open(config_file, 'w') as f_handle:
yaml.dump(bad_dict, f_handle, default_flow_style=True)
# Create configuration object
config = configuration.Config()
with self.assertRaises(SystemExit):
config.bind_port()
# Testing bind_port with good key and blank key_value
key = 'bind_port:'
key_value = ''
bad_config = ("""\
main:
{} {}
""".format(key, key_value))
bad_dict = yaml.safe_load(bytes(bad_config, 'utf-8'))
# Write bad_config to file
with open(config_file, 'w') as f_handle:
yaml.dump(bad_dict, f_handle, default_flow_style=True)
# Create configuration object
config = configuration.Config()
result = config.bind_port()
self.assertEqual(result, 7000)
# Cleanup files in temp directories
_delete_files(directory)
def test_idle_directory(self):
"""Testing function idle_directory."""
# Verify that directory exists
result = self.config.idle_directory()
self.assertEqual(os.path.exists(result), True)
self.assertEqual(os.path.isdir(result), True)
# Doesn't fail because directory now exists
result = self.config.idle_directory()
expected = '{}/idle'.format(
self.good_dict['main']['cache_directory'])
self.assertEqual(result, expected)
def test_topology_directory(self):
"""Testing function topology_directory."""
# Verify that directory exists
result = self.config.topology_directory()
self.assertEqual(os.path.exists(result), True)
self.assertEqual(os.path.isdir(result), True)
# Doesn't fail because directory now exists
result = self.config.topology_directory()
expected = '{}/topology'.format(
self.good_dict['main']['cache_directory'])
self.assertEqual(result, expected)
def test_topology_device_file(self):
"""Testing function topology_device_file."""
# Recreate the path to the device file
result = self.config.topology_device_file(self.random_string)
expected = '{}/{}.yaml'.format(
self.config.topology_directory(), self.random_string)
self.assertEqual(result, expected)
def test_hostnames(self):
"""Testing function hostnames."""
# Test expected versus returned values
result = self.config.hostnames()
expected = sorted(self.good_dict['main']['hostnames'])
self.assertEqual(result, expected)
def test_log_directory(self):
"""Testing method log_directory."""
# Testing log_directory with temp directory
# Set the environmental variable for the configuration directory
directory = tempfile.mkdtemp()
os.environ['SWITCHMAP_CONFIGDIR'] = directory
config_file = '{}/test_config.yaml'.format(directory)
# Testing log_directory with blank key_value(filepath)
key = ''
key_value = ''
bad_config = ("""\
main:
{} {}
""".format(key, key_value))
bad_dict = yaml.safe_load(bytes(bad_config, 'utf-8'))
with open(config_file, 'w') as f_handle:
yaml.dump(bad_dict, f_handle, default_flow_style=True)
# Create configuration object
config = configuration.Config()
with self.assertRaises(SystemExit):
config.log_directory()
# Cleanup files in temp directories
_delete_files(directory)
class TestConfigSNMP(unittest.TestCase):
"""Checks all functions and methods."""
# ---------------------------------------------------------------------- #
# General object setup
# ---------------------------------------------------------------------- #
# Required
maxDiff = None
@classmethod
def setUpClass(cls):
"""Setup the environmental before testing begins."""
# Define agent name
cls.group_name = ''.join([random.choice(
string.ascii_letters + string.digits) for n in range(9)])
# Create logfile
cls.log_file = tempfile.NamedTemporaryFile(delete=False).name
# Create temporary configuration directory
cls.test_config_dir = tempfile.mkdtemp()
# Initializing key variables
text_configuration = ("""
snmp_groups:
- group_name: {}
snmp_version: 3
snmp_secname: woohoo
snmp_community:
snmp_port: 161
snmp_authprotocol: sha
snmp_authpassword: auth123
snmp_privprotocol: des
snmp_privpassword: priv123
- group_name: Remote Sites
snmp_version: 3
snmp_secname: foobar
snmp_community:
snmp_port: 161
snmp_authprotocol: sha
snmp_authpassword: 123auth
snmp_privprotocol: aes
snmp_privpassword: 123priv
""".format(cls.group_name))
cls.configuration_dict = yaml.safe_load(text_configuration)
# Create the configuration file on disk
test_config_file = '{}/config.yaml'.format(cls.test_config_dir)
with open(test_config_file, 'w') as f_handle:
f_handle.write(text_configuration)
# Instantiate object to test
os.environ['SWITCHMAP_CONFIGDIR'] = cls.test_config_dir
cls.testobj = configuration.ConfigSNMP()
@classmethod
def tearDownClass(cls):
"""Cleanup the environmental after testing ends."""
# Cleanup temporary files when done
shutil.rmtree(cls.test_config_dir)
os.remove(cls.log_file)
def test_snmp_auth(self):
"""Testing method / function snmp_auth."""
# Initializing key variables
expected_list = [
{
'group_name': 'Remote Sites',
'snmp_version': 3,
'snmp_secname': 'foobar',
'snmp_community': None,
'snmp_port': 161,
'snmp_authprotocol': 'sha',
'snmp_authpassword': '123auth',
'snmp_privprotocol': 'aes',
'snmp_privpassword': '123priv'
},
{
'group_name': self.group_name,
'snmp_version': 3,
'snmp_secname': 'woohoo',
'snmp_community': None,
'snmp_port': 161,
'snmp_authprotocol': 'sha',
'snmp_authpassword': 'auth123',
'snmp_privprotocol': 'des',
'snmp_privpassword': 'priv123'
}
]
# Get results from configuration file
groups = self.testobj.snmp_auth()
# Iterate through each item in the snmp parameters list received
for group in groups:
for expected_dict in expected_list:
if expected_dict['group_name'] == group['group_name']:
for key in expected_dict.keys():
self.assertEqual(
group[key], expected_dict[key])
def _delete_files(directory):
"""Delete all files in directory."""
# Verify that directory exists
if os.path.isdir(directory) is False:
return
# Cleanup files in temp directories
filenames = [filename for filename in os.listdir(
directory) if os.path.isfile(
os.path.join(directory, filename))]
# Get the full filepath for the cache file and remove filepath
for filename in filenames:
filepath = os.path.join(directory, filename)
os.remove(filepath)
# Remove directory after files are deleted.
os.rmdir(directory)
if __name__ == '__main__':
# Do the unit test
unittest.main()
|
import tensorflow as tf
from models.item_ranking.cdae import CDAE
from utils.evaluation.RankingMetrics import evaluate
class ModifiedCDAE(CDAE):
def __init__(self, sess, num_user, num_item, nn_factors=None, **kwds):
super(ModifiedCDAE, self).__init__(sess, num_user, num_item, **kwds)
self.nn_factors = nn_factors if nn_factors is not None else [512, 1024]
def build_network(self, hidden_neuron=500, corruption_level=0):
super(ModifiedCDAE, self).build_network(corruption_level=corruption_level)
_W = tf.compat.v1.Variable(tf.compat.v1.random_normal([self.num_item, hidden_neuron], stddev=0.01))
_W_prime = tf.compat.v1.Variable(tf.compat.v1.random_normal([hidden_neuron, self.num_item], stddev=0.01))
_V = tf.compat.v1.Variable(tf.compat.v1.random_normal([self.num_user, hidden_neuron], stddev=0.01))
b = tf.compat.v1.Variable(tf.compat.v1.random_normal([hidden_neuron], stddev=0.01))
b_prime = tf.compat.v1.Variable(tf.compat.v1.random_normal([self.num_item], stddev=0.01))
self.nn_factors.append(self.num_item)
nn_weights = [tf.compat.v1.Variable(tf.compat.v1.random_normal([self.num_item, self.nn_factors[0]], stddev=0.01))]
for i in range(1, len(self.nn_factors)):
nn_weights.append(tf.compat.v1.Variable(tf.compat.v1.random_normal([self.nn_factors[i-1], self.nn_factors[i]], stddev=0.01)))
nn_biases = [tf.compat.v1.Variable(tf.compat.v1.random_normal([factor], stddev=0.01)) for factor in self.nn_factors]
self.final_layer = tf.compat.v1.sigmoid(tf.compat.v1.matmul(self.layer_2, nn_weights[0]) + nn_biases[0])
for i in range(1, len(self.nn_factors)):
self.final_layer = tf.compat.v1.sigmoid(tf.compat.v1.matmul(self.final_layer, nn_weights[i]) + nn_biases[i])
self.loss = - tf.compat.v1.reduce_sum(
self.rating_matrix * tf.compat.v1.log(self.final_layer) + (1 - self.rating_matrix) * tf.compat.v1.log(1 - self.final_layer)) + \
self.reg_rate * (tf.compat.v1.nn.l2_loss(_W) + tf.compat.v1.nn.l2_loss(_W_prime) + tf.compat.v1.nn.l2_loss(_V) +
tf.compat.v1.nn.l2_loss(b) + tf.compat.v1.nn.l2_loss(b_prime) +
sum([tf.compat.v1.nn.l2_loss(weight) for weight in nn_weights]) +
sum([tf.compat.v1.nn.l2_loss(bias) for bias in nn_biases]))
self.optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.loss)
def test(self):
self.reconstruction = self.sess.run(self.final_layer, feed_dict={self.corrupted_rating_matrix: self.train_data,
self.user_id: range(self.num_user)})
evaluate(self)
|
#pre-processing for ON hospital data
#read shapefile, output csv
#limit ourselves to hospitals
#Also turn 'POINT' Geometry into lat and lon
import geopandas as gpd
import pandas as pd
#read shapefile with geopandas into geodataframe
sc1=gpd.read_file('/home/csis/codes/shape_to_csv/MOH_SERVICE_LOCATION.shp')
sc1=pd.DataFrame(sc1)
print(sc1.SERV_TYPE.unique())
sc1=sc1.loc[(sc1["SERV_TYPE"]=="Hospital - Corporation") | (sc1["SERV_TYPE"]=="Hospital - Site")]
#sc1=sc1.loc[sc1["SERV_TYPE"]==("Hospital - Site")]
def strip_point(x):
x=str(x)
t=x.strip('POINT (')
t=t.rstrip(')')
print(t)
return t.split()
LONGS=[]
LATS=[]
for i in sc1.geometry:
LONGS.append(strip_point(i)[0])
LATS.append(strip_point(i)[1])
sc1["LONGITUDE"]=LONGS
sc1["LATITUDE"]=LATS
print(sc1)
sc1.to_csv('Ontario_hospitals.csv')
|
import math
import numpy
def parenthesis_finder(str):
inner_str = str[str.find("(")+1:str.rfind(")")]
return inner_str
events = input("Enter event:")
balls = {
"blue":5.0,
"red":6.0,
"white":4.0
}
balls_list = list(balls.values())
total_balls = sum(balls_list)
inside_parenthesis = parenthesis_finder(events)
print(inside_parenthesis)
# event_split = events.split()
# print(event_split)
# for event in event_split:
# event_value = balls[event]
#examples: red, red or white, red and not white, not (blue or white),
#(not blue) or white, red and (not white or blue), red or not white,
#(red and not white) or blue
#all parentheses will be preceded by a logic statement
|
"""docstring for models init file."""
|
#!/usr/bin/env python
from types import SimpleNamespace
from typing import List, Optional, Tuple
import re
def flatten_string_list(l: List[List[str]]) -> List[str]:
"""Flatten a list of list of str
Args:
l (List[List[str]]): [description]
Returns:
List[str]: [description]
"""
return [item for sublist in l for item in sublist]
def split_string(s: str, delimiters: str = ' |, | ,|,') -> List[str]:
"""Split a string using the regex delimiters
Args:
s (str): the string
delimiters (str, optional): regex delimiters. Defaults to ' |, | ,|,'.
Returns:
List[str]: the splitted string
"""
split_str = re.split(delimiters, s)
return list(filter(None, split_str))
def read_file(filename: str) -> List[str]:
"""Read the data file and returns a list of strings
Args:
filname (str): name of the file to read
Returns:
List[str]: data in the file
"""
with open(filename, 'r') as f:
rawdata = f.readlines()
return rawdata
def replace_ampersand(rawdata: List[str]) -> List[List[str]]:
"""[summary]
Args:
rawdata (List[str]): [description]
Returns:
List[List[str]]: [description]
"""
for il, rd in enumerate(rawdata):
if len(rd) > 0:
if rd.lstrip(' ').startswith('use'):
next_line = il+1
while rawdata[next_line].lstrip(' ').startswith('&'):
name = rd.split()[1].lstrip(',').rstrip(',')
rawdata[next_line] = rawdata[next_line].replace(
'&', ' use %s, only: ' % name)
next_line += 1
return rawdata
def process_data(rawdata: List[str]) -> List[List[str]]:
"""Split the raw data into chunks
Args:
rawdata (List[str]): [description]
Returns:
List[List[str]]: [description]
"""
rawdata = replace_ampersand(rawdata)
return [split_string(rd) if len(rd) > 0 else rd for rd in rawdata]
def separate_scope(data: List[str]) -> List[SimpleNamespace]:
"""Find the scope regions of the data
Args:
data (List[str]): data read in the file
Returns:
List[List[str]]: each scope separated
"""
# identifier for scoping
start_keyword = ['subroutine', 'function', 'module']
end_keyword = ['end', 'end\n']
# get the index of start/end scope
name, idx_start, idx_end = [], [], []
for i, d in enumerate(data):
if len(d) == 0:
continue
if d[0] in start_keyword:
idx_start.append(i)
name.append(d[1].split('(')[0])
if d[0] in end_keyword:
idx_end.append(i)
return [SimpleNamespace(name=name, istart=istart, data=data[istart:iend], module=[]) for name, istart, iend in zip(name, idx_start, idx_end)]
def find_import_var(scope: SimpleNamespace) -> SimpleNamespace:
"""Find variable that are imported in the scope
Args:
scope_data (List[str]): data of the scope
Returns:
SimpleNamespace: namespace containing name, iline, icol of each var in scope
"""
for iline, s in enumerate(scope.data):
if len(s) == 0:
continue
if len(s) == 2 and s[0] == "use":
continue
if len(s) >= 2:
if s[0] == 'use' and s[2].startswith('only'):
module_name = s[1].rstrip('\n')
mod = SimpleNamespace(
name=module_name, iline=iline, total_count=0)
mod.var = []
for icol in range(3, len(s)):
varname = s[icol].rstrip('\n')
if len(varname) > 0:
mod.var.append(SimpleNamespace(name=varname,
count=None))
scope.module.append(mod)
return scope
def count_var(scope: SimpleNamespace) -> SimpleNamespace:
"""[summary]
Args:
scope (SimpleNamespace): [description]
Returns:
SimpleNamespace: [description]
"""
# Avoid to count variables in commented lines:
exclude = ["c", "C", "!"]
data_copy = [var for index, var in enumerate(scope.data)
if var[0] not in exclude]
for mod in scope.module:
for var in mod.var:
c = count(data_copy, var.name)
var.count = c
mod.total_count += c
return scope
def count(scope_data: List[str], varname: str) -> int:
"""Count the number of time a variable appears in the
Args:
scope_data (List[str]): data of the scope
var (str): name of the vairable
Returns:
int: count
"""
joined_data = ' ' + \
' '.join(flatten_string_list(scope_data)) + ' '
pattern = re.compile('[\W\s]' + varname + '[\W\s]', re.IGNORECASE)
return len(pattern.findall(joined_data))-1
def clean_raw_data(rawdata: List[str], scope: SimpleNamespace) -> List[str]:
"""
Args:
rawdata (List[str]): [description]
scope (SimpleNamespace): [description]
Returns:
List[str]: [description]
"""
for mod in scope.module:
print(' -- Module : %s' % mod.name)
idx_rawdata = scope.istart + mod.iline
if mod.total_count == 0:
print(' No variable called, removing the entire module')
rawdata[idx_rawdata] = ''
idx_rawdata += 1
while rawdata[idx_rawdata].lstrip(' ').startswith('&'):
rawdata[idx_rawdata] = ''
idx_rawdata += 1
else:
ori_line = rawdata[idx_rawdata]
line = ori_line.split(
'use')[0] + 'use ' + mod.name + ', only: '
for var in mod.var:
if var.count != 0:
line += var.name + ', '
else:
print(' --- removing unused variable %s' %
var.name)
rawdata[idx_rawdata] = line.rstrip(', ') + '\n'
# remove the unwanted
idx_rawdata += 1
while rawdata[idx_rawdata].lstrip(' ').startswith('&'):
rawdata[idx_rawdata] = ''
idx_rawdata += 1
return rawdata
def get_new_filename(filename: str) -> str:
"""[summary]
Args:
filename (str): [description]
Returns:
str: [description]
"""
base, ext = filename.split('.')
return base + '_copy.' + ext
def save_file(filename: str, rawdata: List[str]):
"""[summary]
Args:
filename (str): [description]
scope_data ([type]): [description]
"""
save_data = ''.join(rawdata)
with open(filename, 'w') as f:
f.write(save_data)
print('=')
print('= Outpufile written in %s' % filename)
print('=')
def clean_use_statement(filename: str, overwrite: bool = False) -> List[SimpleNamespace]:
"""[summary]
Args:
filename (str): [description]
overwrite (bool): [description]
"""
print('=')
print('= Clean Use Statements from %s' % filename)
print('=')
# read the data file and split it
rawdata = read_file(filename)
# splitted data
data = process_data(rawdata)
# separate in scope
scoped_data = separate_scope(data)
# loop over scopes
for scope in scoped_data:
print(' - Scope : %s' % scope.name)
# find variables
scope = find_import_var(scope)
# count the number of var calls per var per module in scope
scope = count_var(scope)
# clean the raw data
rawdata = clean_raw_data(rawdata, scope)
# save file copy
if overwrite:
save_file(filename, rawdata)
else:
new_filename = get_new_filename(filename)
save_file(new_filename, rawdata)
return scoped_data
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="clean_use filename")
parser.add_argument("filename", help="name of the file to clean")
parser.add_argument(
'-ow', '--overwrite', action='store_true', help='overwrite the inputfile')
args = parser.parse_args()
scope = clean_use_statement(args.filename, args.overwrite)
|
"""Unit test package for shipflowmotionshelpers."""
import os
path = os.path.dirname(__file__)
path_test_project_1 = os.path.join(path,'test_project_1')
|
### Importing libraries
import pandas as pd
import numpy as np
from datetime import datetime
from lightgbm import LGBMRegressor
# import gresearch_crypto
import traceback
import time
from datetime import datetime
import matplotlib.pyplot as plt
import plotly.graph_objects as go
from sklearn.model_selection import GridSearchCV
import seaborn as sns
from sklearn.model_selection import train_test_split
def read_g_research(path):
df_train = pd.read_csv(path + "train.csv")
df_test = pd.read_csv(path + "example_test.csv")
df_asset_details = pd.read_csv(path + "asset_details.csv")
df_supp_train = pd.read_csv(path + "supplemental_train.csv")
return df_train,df_test,df_asset_details,df_supp_train
# path="F:/g-research-crypto-forecasting/"
# df_train,df_test,df_asset_details,df_supp_train=read_g_research(path)
# print(df_train.describe())
# print(df_train.head())
# 时间戳
# 定义了一个助手函数,该函数将日期格式转换为时间戳,以用于索引。
# auxiliary function, from datetime to timestamp
totimestamp = lambda s: np.int32(time.mktime(datetime.strptime(s, "%d/%m/%Y").timetuple()))
## Checking Time Range
def check_time_range(df,asset_id=1):
data_index=df[df_train["Asset_ID"]==asset_id].set_index("timestamp")
beg_time = datetime.fromtimestamp(data_index.index[0]).strftime("%A, %B %d, %Y %I:%M:%S")
end_time = datetime.fromtimestamp(data_index.index[-1]).strftime("%A, %B %d, %Y %I:%M:%S")
# btc = df_train[df_train["Asset_ID"]==1].set_index("timestamp") # Asset_ID = 1 for Bitcoin
# eth = df_train[df_train["Asset_ID"]==6].set_index("timestamp") # Asset_ID = 6 for Ethereum
# bnb = df_train[df_train["Asset_ID"]==0].set_index("timestamp") # Asset_ID = 0 for Binance Coin
# ada = df_train[df_train["Asset_ID"]==3].set_index("timestamp") # Asset_ID = 3 for Cardano
# beg_btc = datetime.fromtimestamp(btc.index[0]).strftime("%A, %B %d, %Y %I:%M:%S")
# end_btc = datetime.fromtimestamp(btc.index[-1]).strftime("%A, %B %d, %Y %I:%M:%S")
# beg_eth = datetime.fromtimestamp(eth.index[0]).strftime("%A, %B %d, %Y %I:%M:%S")
# end_eth = datetime.fromtimestamp(eth.index[-1]).strftime("%A, %B %d, %Y %I:%M:%S")
# beg_bnb = datetime.fromtimestamp(eth.index[0]).strftime("%A, %B %d, %Y %I:%M:%S")
# end_bnb = datetime.fromtimestamp(eth.index[-1]).strftime("%A, %B %d, %Y %I:%M:%S")
# beg_ada = datetime.fromtimestamp(eth.index[0]).strftime("%A, %B %d, %Y %I:%M:%S")
# end_ada = datetime.fromtimestamp(eth.index[-1]).strftime("%A, %B %d, %Y %I:%M:%S")
# print('Bitcoin data goes from ', beg_btc, ' to ', end_btc)
# print('Ethereum data goes from ', beg_eth, ' to ', end_eth)
# print('Binance coin data goes from ', beg_bnb, ' to ', end_bnb)
# print('Cardano data goes from ', beg_ada, ' to ', end_ada)
return beg_time,end_time
def show_heatmap(df):
plt.figure(figsize=(8,6))
sns.heatmap(df[['Count','Open','High','Low','Close','Volume','VWAP','Target']].corr(),
vmin=-1.0, vmax=1.0, annot=True, cmap='coolwarm', linewidths=0.1)
plt.show()
def show_heatmap_(df):
# Heatmap: Coin Correlation (Last 10000 Minutes
data =df[-10000:]
check = pd.DataFrame()
for i in data.Asset_ID.unique():
check[i] = data[data.Asset_ID==i]['Target'].reset_index(drop=True)
plt.figure(figsize=(10,8))
sns.heatmap(check.dropna().corr(), vmin=-1.0, vmax=1.0, annot=True, cmap='coolwarm', linewidths=0.1)
plt.show()
def candlesticks_charts(df):
# Candlesticks Charts for BTC & ETH, Last 200 Minutes
btc_mini = df.iloc[-200:] # Select recent data rows
fig = go.Figure(data=[go.Candlestick(x=btc_mini.index, open=btc_mini['Open'], high=btc_mini['High'], low=btc_mini['Low'], close=btc_mini['Close'])])
fig.update_xaxes(title_text="$")
fig.update_yaxes(title_text="Index")
fig.update_layout(title="Bitcoin Price, 200 Last Minutes")
fig.show()
def show_close_prices(df):
# Plotting BTC and ETH closing prices
f = plt.figure(figsize=(15,4))
# fill NAs for BTC and ETH
btc = btc.reindex(range(btc.index[0],btc.index[-1]+60,60),method='pad')
eth = eth.reindex(range(eth.index[0],eth.index[-1]+60,60),method='pad')
ax = f.add_subplot(121)
plt.plot(btc['Close'], color='yellow', label='BTC')
plt.legend()
plt.xlabel('Time (timestamp)')
plt.ylabel('Bitcoin')
ax2 = f.add_subplot(122)
ax2.plot(eth['Close'], color='purple', label='ETH')
plt.legend()
plt.xlabel('Time (timestamp)')
plt.ylabel('Ethereum')
plt.tight_layout()
plt.show()
# Feature Extraction:定义一些函数来添加到用于预测的特性列表中。
def hlco_ratio(df):
return (df['High'] - df['Low'])/(df['Close']-df['Open'])
def upper_shadow(df):
return df['High'] - np.maximum(df['Close'], df['Open'])
def lower_shadow(df):
return np.minimum(df['Close'], df['Open']) - df['Low']
def get_features(df):
df_feat = df[['Count', 'Open', 'High', 'Low', 'Close', 'Volume', 'VWAP']]
df_feat['Upper_Shadow'] = upper_shadow(df_feat)
df_feat['hlco_ratio'] = hlco_ratio(df_feat)
df_feat['Lower_Shadow'] = lower_shadow(df_feat)
return df_feat
def get_Xy_and_model_for_asset(df_train, asset_id):
df = df_train[df_train["Asset_ID"] == asset_id]
df = df.sample(frac=0.2)
df_proc = get_features(df)
df_proc['y'] = df['Target']
df_proc.replace([np.inf, -np.inf], np.nan, inplace=True)
df_proc = df_proc.dropna(how="any")
X = df_proc.drop("y", axis=1)
print(X)
y = df_proc["y"]
print("===:",y)
model = LGBMRegressor()
model.fit(X, y)
return X, y, model
if __name__=="__main__":
path="F:/g-research-crypto-forecasting/"
df_train,df_test,df_asset_details,df_supp_train=read_g_research(path)
df_feat=get_features(df_train)
print("=========:",df_feat)
# Xs = {}
# ys = {}
# models = {}
# # Prediction
# # train test split df_train into 80% train rows and 20% valid rows
# train_data = df_train
# # train_data = df_train.sample(frac = 0.8)
# # valid_data = df_train.drop(train_data.index)
# for asset_id, asset_name in zip(df_asset_details['Asset_ID'], df_asset_details['Asset_Name']):
# print(f"Training model for {asset_name:<16} (ID={asset_id:<2})")
# X, y, model = get_Xy_and_model_for_asset(train_data, asset_id)
# try:
# Xs[asset_id], ys[asset_id], models[asset_id] = X, y, model
# except:
# Xs[asset_id], ys[asset_id], models[asset_id] = None, None, None
# # Evaluation, Hyperparam Tuning:对14个硬币的LGBM模型执行GridSearch。
# parameters = {
# # 'max_depth': range (2, 10, 1),
# 'num_leaves': range(21, 161, 10),
# 'learning_rate': [0.1, 0.01, 0.05]
# }
# new_models = {}
# for asset_id, asset_name in zip(df_asset_details['Asset_ID'], df_asset_details['Asset_Name']):
# print("GridSearchCV for: " + asset_name)
# grid_search = GridSearchCV(
# estimator=get_Xy_and_model_for_asset(df_train, asset_id)[2], # bitcoin
# param_grid=parameters,
# n_jobs = -1,
# cv = 5,
# verbose=True
# )
# grid_search.fit(Xs[asset_id], ys[asset_id])
# new_models[asset_id] = grid_search.best_estimator_
# grid_search.best_estimator_
# for asset_id, asset_name in zip(df_asset_details['Asset_ID'], df_asset_details['Asset_Name']):
# print(f"Tuned model for {asset_name:<1} (ID={asset_id:})")
# print(new_models[asset_id])
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
import ofd
import config
class TestOFDYa(unittest.TestCase):
""" E2E unittest OFD-interactions """
OFD = None
@classmethod
def setUpClass(cls):
""" Setup """
config.debug = False
cls.OFD = ofd.OFDProvider(True).detect(
"t=20170305T005100&s=140.00&fn=8710000100161943&i=8018&fp=2398195357&n=1",
"0000069245023747")
def test_search(self):
self.assertIsNotNone(self.OFD)
def test_items_parsing(self):
self.assertEqual(self.OFD.get_items(), [('Хлеб Ржаной пол. рез. 0,415 кг (Каравай', '-28.40'), ('ФО Картофель, кг (17.9 * 1.132)', '-20.26'), ('ФО Огурцы Эстафета, кг (161.9 * 0.18)', '-29.14'), ('Яйцо фас. С0 10шт ', '-62.20')])
def test_items_count(self):
self.assertEqual(len(self.OFD.get_items()), 4)
def test_first_item(self):
item_name = self.OFD.get_items()[0][0]
self.assertEqual(item_name, "Хлеб Ржаной пол. рез. 0,415 кг (Каравай")
def test_receipt_final_sum(self):
self.assertEqual(self.OFD.raw_sum, '140.00')
if __name__ == '__main__':
unittest.main()
|
# Visit https://www.gluu.org/docs/gluu-server/user-management/scim-scripting/ to learn more
from org.gluu.model.custom.script.type.scim import ScimType
import java
class ScimEventHandler(ScimType):
def __init__(self, currentTimeMillis):
self.currentTimeMillis = currentTimeMillis
def init(self, configurationAttributes):
print "ScimEventHandler (init): Initialized successfully"
return True
def destroy(self, configurationAttributes):
print "ScimEventHandler (destroy): Destroyed successfully"
return True
def getApiVersion(self):
return 5
def createUser(self, user, configurationAttributes):
return True
def updateUser(self, user, configurationAttributes):
return True
def deleteUser(self, user, configurationAttributes):
return True
def createGroup(self, group, configurationAttributes):
return True
def updateGroup(self, group, configurationAttributes):
return True
def deleteGroup(self, group, configurationAttributes):
return True
def postCreateUser(self, user, configurationAttributes):
return True
def postUpdateUser(self, user, configurationAttributes):
return True
def postDeleteUser(self, user, configurationAttributes):
return True
def postUpdateGroup(self, group, configurationAttributes):
return True
def postCreateGroup(self, group, configurationAttributes):
return True
def postDeleteGroup(self, group, configurationAttributes):
return True
def getUser(self, user, configurationAttributes):
return True
def getGroup(self, group, configurationAttributes):
return True
def postSearchUsers(self, results, configurationAttributes):
return True
def postSearchGroups(self, results, configurationAttributes):
return True
def manageResourceOperation(self, context, entity, payload, configurationAttributes):
return None
def manageSearchOperation(self, context, searchRequest, configurationAttributes):
return None
|
#coding=utf-8
from mongoengine import *
import logging
import datetime
from app.customer.models.user import User, UploadImage
from base.core.util.dateutils import datetime_to_timestamp
from django.db import models
from PIL import Image, ImageFilter
import multiprocessing
from base.settings import CHATPAMONGO
from app.customer.models.vip import UserVip
from app.customer.models.community import UserMoment
connect(CHATPAMONGO.db, host=CHATPAMONGO.host, port=CHATPAMONGO.port, username=CHATPAMONGO.username,
password=CHATPAMONGO.password)
class PictureInfo(Document):
PRIVATE = [
(0, u'未公开'),
(1, u'全部公开'),
(2, u'仅好友可见'),
]
STATUS = [
(0, u'可见'),
(1, u'删除'),
]
LOCK = [
(0, u'无锁'),
(1, u'铜锁'), # 1引力币
(2, u'银锁'), # 5引力币
(3, u'金锁'), # 10引力币
]
user_id = IntField(verbose_name=u'用户id', required=True)
created_at = DateTimeField(verbose_name=u'创建时间', default=None)
picture_url = StringField(verbose_name=u'图片url', max_length=256, default=None)
picture_real_url = StringField(verbose_name=u'真实图片url', max_length=256, default=None)
comment = ListField(StringField(verbose_name=u'评论', default=None))
desc = StringField(verbose_name=u'图片描述', max_length=65535, default=None)
picture_type = StringField(verbose_name=u'分类', max_length=1024, default=None)
price = IntField(verbose_name=u'价格', default=0)
is_private = IntField(verbose_name=u'权限', default=0, choices=PRIVATE)
lock_type = IntField(verbose_name=u'锁类型', default=0, choices=LOCK)
lock_count = IntField(verbose_name=u'自动解锁需购买次数', default=0)
purchase_list = ListField(IntField(verbose_name=u'购买人详情', default=None))
location = StringField(verbose_name=u'地点', max_length=256, default=None)
mention = ListField(IntField(verbose_name=u'圈人', default=None))
like_user = ListField(IntField(verbose_name=u'点赞人', default=None))
like_count = IntField(verbose_name=u'点赞数', default=0)
view_count = IntField(verbose_name=u'浏览次数', default=0)
status = IntField(verbose_name=u'状态', default=0)
type = IntField(verbose_name=u'相册类型') # 1: 普通相册照片 2:精华相册照片
show_status = IntField(verbose_name=u'显示状态', default=0) # 1: 数美通过 2:数美屏蔽 3:数美鉴定中
class Meta:
app_label = "picture"
verbose_name = u"图片"
verbose_name_plural = verbose_name
def normal_info(self):
data = {}
data['id'] = str(self.id)
data['user_id'] = self.user_id
data['created_at'] = datetime_to_timestamp(self.created_at)
data['picture_url'] = self.picture_url
data['comment_count'] = len(self.comment)
data['desc'] = self.desc
data['picture_type'] = self.picture_type
data['price'] = self.price
data['is_private'] = self.is_private
data['lock_type'] = self.lock_type
data['lock_count'] = self.lock_count
data['purchase_list'] = self.purchase_list
data['purchase_user_count'] = len(self.purchase_list)
data['location'] = self.location
data['mention'] = self.mention
data['like_user'] = self.like_user
data['like_count'] = self.like_count
data['view_count'] = self.view_count
data['status'] = self.status
return data
def real_info(self):
data = {}
data['id'] = str(self.id)
data['user_id'] = self.user_id
data['created_at'] = datetime_to_timestamp(self.created_at)
data['picture_url'] = self.picture_real_url
data['comment_count'] = len(self.comment)
data['desc'] = self.desc
data['picture_type'] = self.picture_type
data['price'] = self.price
data['is_private'] = self.is_private
data['lock_type'] = self.lock_type
data['lock_count'] = self.lock_count
data['purchase_list'] = self.purchase_list
data['purchase_user_count'] = len(self.purchase_list)
data['location'] = self.location
data['mention'] = self.mention
data['like_user'] = self.like_user
data['like_count'] = self.like_count
data['view_count'] = self.view_count
data['status'] = self.status
return data
@classmethod
def create_picture(cls, user_id, created_at, picture_url, desc=None, picture_type=None, price=0, is_private=1,
lock_type=0, lock_count=0, location=None, mention=None, type=1):
try:
picture = PictureInfo(
user_id=user_id,
created_at=created_at,
picture_url=picture_url,
picture_real_url=picture_url,
comment=None,
desc=desc,
picture_type=picture_type,
price=price,
is_private=is_private,
lock_type=lock_type,
lock_count=lock_count,
purchase_list=None,
location=location,
mention=mention,
like_user=None,
like_count=0,
view_count=0,
status=0,
type=1,
)
if price != 0:
picture.picture_url = 'https://hdlive-10048692.image.myqcloud.com/5c8ff8bdc5a3645edcd8d4f9313f29e7'
picture.save()
lock = multiprocessing.Lock()
p = multiprocessing.Process(target=PictureInfo.generate_blurred_picture, args=(lock, picture_url, lock_type, picture.id))
p.start()
picture.save()
user = User.objects.get(id=user_id)
user.add_experience(2)
except Exception,e:
logging.error("create picture error:{0}".format(e))
return False
return str(picture.id)
@classmethod
def create_comment(cls, picture_id, user_id, reply_id=0, created_at=None, comment=None):
try:
picture = PictureInfo.objects.get(id=picture_id)
comment_id = str(CommentInfo.create_comment(user_id, picture_id, reply_id, created_at, comment))
if comment_id:
picture.comment.append(comment_id)
picture.save()
else:
return False
except Exception,e:
logging.error("create comment error:{0}".format(e))
return False
return True
@classmethod
def create_likeuser(cls, picture_id, user_id):
try:
picture = PictureInfo.objects.get(id=picture_id)
is_like = PictureInfo.check_is_like(picture_id, user_id)
if not is_like:
picture.like_user.append(user_id)
picture.like_count += 1
picture.save()
else:
return False
except Exception,e:
logging.error("like user error:{0}".format(e))
return False
return True
@classmethod
def cancel_likeuser(cls, picture_id, user_id):
try:
picture = PictureInfo.objects.get(id=picture_id)
is_like = PictureInfo.check_is_like(picture_id, user_id)
if is_like:
picture.like_user.remove(user_id)
picture.like_count -= 1
picture.save()
else:
return False
except Exception,e:
logging.error("cancel like user error:{0}".format(e))
return False
return True
@classmethod
def check_is_like(cls, picture_id, user_id):
picture = PictureInfo.objects.get(id=picture_id)
if int(user_id) in picture.like_user:
return True
else:
return False
@classmethod
def purchase_picture(cls, picture_id, user_id):
try:
picture = PictureInfo.objects.get(id=picture_id)
new_url = picture.picture_real_url
is_purchase = PictureInfo.check_is_purchase(picture_id, user_id)
if not is_purchase:
picture.purchase_list.append(int(user_id))
picture.save()
UserPurchase.user_purchase_picture(user_id, picture_id)
else:
return False, None
except Exception,e:
logging.error("purchase picture error:{0}".format(e))
return False, None
return True, new_url
@classmethod
def check_is_purchase(cls, picture_id, user_id):
picture = PictureInfo.objects.get(id=picture_id)
if int(user_id) in picture.purchase_list:
return True
else:
return False
@classmethod
def get_picture_list(cls, page=1, page_count=10):
pictures = PictureInfo.objects.filter(is_private=1, status=0).order_by('-created_at')[(page-1)*page_count:page*page_count]
return pictures
@classmethod
def get_unlock_user_list(cls, picture_id, page=1, page_count=10):
user_list = PictureInfo.objects.get(id=picture_id).purchase_list[(page-1)*page_count:page*page_count]
return user_list
@classmethod
def get_picture_user(cls, user_id):
user = User.objects.get(id=user_id)
return user
@classmethod
def add_viewcount(cls, picture_id):
try:
picture = PictureInfo.objects.get(id=picture_id)
picture.view_count = picture.view_count + 1
picture.save()
except Exception,e:
logging.error("view count error:{0}".format(e))
return False
return picture.view_count
@classmethod
def get_picture_info(cls, picture_id):
try:
picture = PictureInfo.objects.get(id=picture_id)
except Exception,e:
logging.error("get picture info error:{0}".format(e))
return False
return picture
@classmethod
def delete_comment(cls, comment_id, user_id):
try:
comment = CommentInfo.objects.get(id=comment_id)
picture = PictureInfo.objects.get(id=comment.picture_id)
if comment.user_id == int(user_id):
picture.comment.remove(comment_id)
picture.save()
comment.status = 1
comment.save()
else:
return False
except Exception,e:
logging.error("delete comment error:{0}".format(e))
return False
return True
@classmethod
def delete_picture(cls, picture_id, user_id):
try:
picture = PictureInfo.objects.get(id=picture_id)
if picture.user_id == int(user_id):
picture.update(set__status=1)
else:
return False
except Exception,e:
logging.error("delete picture error:{0}".format(e))
return False
return True
@classmethod
def get_user_picture(cls, user_id, page=1, page_count=10):
pictures = PictureInfo.objects.filter(user_id=user_id, status=0).order_by('-created_at')
return pictures
@classmethod
def generate_blurred_picture(cls, picture_url, picture_id):
from PIL.ExifTags import TAGS
import urllib2
img = urllib2.urlopen(picture_url).read()
name = '/mydata/python/live_video/app/download_pic/1.jpg'
new_name = '/mydata/python/live_video/app/download_pic/2.jpg'
pic = open(name, 'wb')
pic.write(img)
pic.close()
radius = 50
image = Image.open(name)
exifinfo = image._getexif()
if exifinfo:
ret = {}
for tag, value in exifinfo.items():
decoded = TAGS.get(tag, tag)
ret[decoded] = value
if 'Orientation' not in ret:
orientation = 1
else:
orientation = ret['Orientation']
if orientation == 1:
# Nothing
mirror = image.copy()
elif orientation == 2:
# Vertical Mirror
mirror = image.transpose(Image.FLIP_LEFT_RIGHT)
elif orientation == 3:
# Rotation 180°
mirror = image.transpose(Image.ROTATE_180)
elif orientation == 4:
# Horizontal Mirror
mirror = image.transpose(Image.FLIP_TOP_BOTTOM)
elif orientation == 5:
# Horizontal Mirror + Rotation 90° CCW
mirror = image.transpose(Image.FLIP_TOP_BOTTOM).transpose(Image.ROTATE_90)
elif orientation == 6:
# Rotation 270°
mirror = image.transpose(Image.ROTATE_270)
elif orientation == 7:
# Horizontal Mirror + Rotation 270°
mirror = image.transpose(Image.FLIP_TOP_BOTTOM).transpose(Image.ROTATE_270)
elif orientation == 8:
# Rotation 90°
mirror = image.transpose(Image.ROTATE_90)
mirror.save(name, "JPEG", quality=85)
image = Image.open(name)
image = image.filter(MyGaussianBlur(radius=radius))
image.save(new_name)
new_pic = open(new_name, 'rb')
data = UploadImage.push_binary_to_qclude(new_pic, radius)
new_pic.close()
new_url = data.get("data", {}).get('download_url', '')
picture = PictureInfo.objects.get(id=picture_id)
picture.picture_url = User.convert_http_to_https(new_url)
picture.save()
@classmethod
def check_count(cls, new_count, user, type):
"""
VIP:
3)相册:普通上线20张,精华上线20张
播主VIP:
3)相册:普通上线20张,精华上线20张
播主:
3)相册:普通上线10张,精华上线10张
普通用户:
3)相册:普通上线5张,精华不可上传
"""
vip_count_normal = 20
vip_count = 20
anchor_vip_count = 20
anchor_vip_count_normal = 20
anchor_count_normal = 10
anchor_count = 10
user_count_normal = 5
is_video = user.is_video_auth
user_vip = UserVip.objects.filter(user_id=user.id).first()
now = datetime.datetime.now()
starttime = now.strftime("%Y-%m-%d 00:00:00")
endtime = now.strftime('%Y-%m-%d 23:59:59')
today_count = PictureInfo.objects.filter(user_id=int(user.id), status=0, type=type, show_status__ne=2).count()
code = 1
message = ""
total = today_count + int(new_count)
if type == 1:
# 普通相册
if user_vip:
if int(is_video) == 1:
# 播住vip
if total > anchor_vip_count_normal:
code = 2
message = u"播主VIP,普通相册最多20张"
return code, message
else:
# 用户vip
if total > vip_count_normal:
code = 2
message = u"用户VIP,普通相册最多20张"
return code, message
else:
if int(is_video) == 1:
# 播主:
if total > anchor_count_normal:
code = 2
message = u"播主普通相册最多10张"
return code, message
else:
# 普通用户
if total > user_count_normal:
code = 2
message = u"普通用户普通相册最多5张"
return code, message
if type == 2:
# 精华相册
if user_vip:
if int(is_video) == 1:
# 播住vip
if total > anchor_vip_count:
code = 2
message = u"播主VIP,精美相册最多20张"
return code, message
else:
# 用户vip
if total > vip_count:
code = 2
message = u"用户VIP,精美相册最多20张"
return code, message
else:
if int(is_video) == 1:
# 播主:
if total > anchor_count:
code = 2
message = u"播主精美相册最多10张"
return code, message
else:
# 普通用户
if total > 0:
code = 2
message = u"普通用户不可上传精美相册"
return code, message
return code, message
class MyGaussianBlur(ImageFilter.Filter):
name = "GaussianBlur"
def __init__(self, radius=2, bounds=None):
self.radius = radius
self.bounds = bounds
def filter(self, image):
if self.bounds:
clips = image.crop(self.bounds).gaussian_blur(self.radius)
image.paste(clips, self.bounds)
return image
else:
return image.gaussian_blur(self.radius)
class UserPurchase(Document):
user_id = IntField(verbose_name=u'用户id', required=True)
purchase_picture = ListField(StringField(verbose_name=u'用户购买的图片列表', default=None))
class Meta:
app_label = "picture"
verbose_name = u"图片"
verbose_name_plural = verbose_name
# 创建用户
@classmethod
def create_user_purchase(cls, user_id, picture_id):
try:
user = UserPurchase(
user_id=user_id,
purchase_picture=[picture_id],
)
user.save()
except Exception,e:
logging.error("create user purchase error:{0}".format(e))
return False
return True
# 添加图片
@classmethod
def user_purchase_picture(cls, user_id, picture_id):
try:
user = UserPurchase.objects.get(user_id=user_id)
user.purchase_picture.insert(0, picture_id)
user.save()
return True
except UserPurchase.DoesNotExist:
status = UserPurchase.create_user_purchase(user_id, picture_id)
return status
class PicturePriceList(Document):
picture_price = IntField(verbose_name=u'图片价格列表', required=True)
price_desc = StringField(verbose_name=u'价格描述', max_length=64, default='')
class Meta:
app_label = "picture"
verbose_name = u"图片价格"
verbose_name_plural = verbose_name
# 图片价格列表
@classmethod
def create_price(cls, picture_price, price_desc=None):
try:
price = PicturePriceList(picture_price=picture_price, price_desc=price_desc)
price.save()
except Exception,e:
logging.error("create price error:{0}".format(e))
return False
return True
@classmethod
def get_price_list(cls):
price_list = cls.objects.all()
return price_list
@classmethod
def get_price_desc(cls, picture_price):
desc = cls.objects.get(picture_price=picture_price).price_desc
return desc
|
import json
import time
import requests
from Lib import DocSegmentation
DOCKER = False
ENDPOINT = "http://localhost:8080/api/v1"
DOCUMENT = 'Document2'
'''
Post request
'''
def post_request(path, json, headers=None):
url = F'{ENDPOINT}/{path}'
headers = headers or {}
headers['Content-Type'] = 'application/json'
response = requests.post(url, json=json, headers=headers)
if response.ok:
return response.json()
else:
raise Exception(str(response))
'''
Split Document Segments
'''
def do_segmentation(txt):
doc = {'Text': txt}
segs = post_request('document/segmentation?lan=es', doc)
jso = json.dumps(segs, indent=2, sort_keys=True)
print(jso)
with open(F'_output/web-{DOCUMENT}-segs.json', 'w') as fp:
fp.write(jso)
return segs
'''
MAIN
'''
def run():
with open(F'_input/{DOCUMENT}.txt', 'r', encoding='UTF-8') as fp:
text = fp.read()
segs = do_segmentation(text)
if __name__ == '__main__':
if DOCKER:
run()
else:
import threading
from app import main
threading.Thread(target=run).start()
main()
|
import pygame
class Ship():
def __init__(self, game_settings, screen):
"""Initialize ship and define ship start position"""
self.screen = screen
self.game_settings = game_settings
self.image = pygame.image.load("Images/kolmnurkvaike.bmp")
self.rect = self.image.get_rect()
self.screen_rect = screen.get_rect()
self.rect.centerx = self.screen_rect.centerx
self.center = float(self.rect.centerx)
self.rect.bottom = self.screen_rect.bottom
self.moving_right = False
self.moving_left = False
def update(self):
"""Update ship position according to moving flag"""
if self.moving_right and self.rect.right < self.screen_rect.right:
self.center += self.game_settings.ship_speed_factor
if self.moving_left and self.rect.left > 0:
self.center -= self.game_settings.ship_speed_factor
self.rect.centerx = self.center
def blitme(self):
"""Draw ship at this position"""
self.screen.blit(self.image, self.rect)
def restartship(self):
self.center = self.screen_rect.centerx
|
# -*- coding: utf-8 -*-
import logging
from pyramid.events import BeforeRender
from pyramid.events import subscriber
from pyramid.renderers import get_renderer
from amnesia import helpers
log = logging.getLogger(__name__) # pylint: disable=invalid-name
def includeme(config):
config.scan(__name__)
@subscriber(BeforeRender)
def add_renderers_global(event):
registry = event['request'].registry
layout = registry.settings.get('amnesia.master_layout')
if layout:
layout = get_renderer(layout).implementation()
event.update({
'h': helpers,
'widgets': registry['widgets'],
'layout': layout
})
|
import asyncio
import logging
import time
from typing import Optional, Any, List, Dict
from collections.abc import Iterable
from .logger import setup_custom_logger
import ray
logger = setup_custom_logger(__name__)
class Empty(Exception):
pass
class Full(Exception):
pass
# TODO(Clark): Update docstrings and examples.
class MultiQueue:
"""A first-in, first-out queue implementation on Ray.
The behavior and use cases are similar to those of the asyncio.Queue class.
Features both sync and async put and get methods. Provides the option to
block until space is available when calling put on a full queue,
or to block until items are available when calling get on an empty queue.
Optionally supports batched put and get operations to minimize
serialization overhead.
Args:
maxsize (optional, int): maximum size of the queue. If zero, size is
unbounded.
actor_options (optional, Dict): Dictionary of options to pass into
the QueueActor during creation. These are directly passed into
QueueActor.options(...). This could be useful if you
need to pass in custom resource requirements, for example.
Examples:
>>> q = Queue()
>>> items = list(range(10))
>>> for item in items:
>>> q.put(item)
>>> for item in items:
>>> assert item == q.get()
>>> # Create Queue with the underlying actor reserving 1 CPU.
>>> q = Queue(actor_options={"num_cpus": 1})
"""
def __init__(self,
num_queues: int,
maxsize: int = 0,
name: str = None,
connect: bool = False,
actor_options: Optional[Dict] = None,
connect_retries: int = 5) -> None:
self.num_queues = num_queues
self.maxsize = maxsize
if connect:
logger.info("Will connect to queue actor")
assert actor_options is None
assert name is not None
self.actor = connect_queue_actor(name, connect_retries)
logger.info("Successfully connected to queue actor")
else:
actor_options = actor_options or {}
if name is not None:
actor_options["name"] = name
self.actor = ray.remote(_QueueActor).options(
**actor_options).remote(self.num_queues, self.maxsize)
logger.info("Successfully spun up queue actor")
def __len__(self) -> int:
return sum(
self.size(queue_idx) for queue_idx in range(self.num_queues))
def size(self, queue_idx: int) -> int:
"""The size of the queue."""
return ray.get(self.actor.qsize.remote(queue_idx))
def qsize(self, queue_idx: int) -> int:
"""The size of the queue."""
return self.size(queue_idx)
def empty(self, queue_idx: int) -> bool:
"""Whether the queue is empty."""
return ray.get(self.actor.empty.remote(queue_idx))
def full(self, queue_idx: int) -> bool:
"""Whether the queue is full."""
return ray.get(self.actor.full.remote(queue_idx))
def put(self,
queue_idx: int,
item: Any,
block: bool = True,
timeout: Optional[float] = None) -> None:
"""Adds an item to the queue.
If block is True and the queue is full, blocks until the queue is no
longer full or until timeout.
There is no guarantee of order if multiple producers put to the same
full queue.
Raises:
Full: if the queue is full and blocking is False.
Full: if the queue is full, blocking is True, and it timed out.
ValueError: if timeout is negative.
"""
if not block:
try:
ray.get(self.actor.put_nowait.remote(queue_idx, item))
except asyncio.QueueFull:
raise Full
else:
if timeout is not None and timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
ray.get(self.actor.put.remote(queue_idx, item, timeout))
def put_batch(self,
queue_idx: int,
items: Iterable,
block: bool = True,
timeout: Optional[float] = None) -> None:
"""Adds an item to the queue.
If block is True and the queue is full, blocks until the queue is no
longer full or until timeout.
There is no guarantee of order if multiple producers put to the same
full queue.
Raises:
Full: if the queue is full and blocking is False.
Full: if the queue is full, blocking is True, and it timed out.
ValueError: if timeout is negative.
"""
if not block:
try:
ray.get(self.actor.put_nowait_batch.remote(queue_idx, items))
except asyncio.QueueFull:
raise Full
else:
if timeout is not None and timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
ray.get(self.actor.put_batch.remote(queue_idx, items, timeout))
async def put_async(self,
queue_idx: int,
item: Any,
block: bool = True,
timeout: Optional[float] = None) -> None:
"""Adds an item to the queue.
If block is True and the queue is full,
blocks until the queue is no longer full or until timeout.
There is no guarantee of order if multiple producers put to the same
full queue.
Raises:
Full: if the queue is full and blocking is False.
Full: if the queue is full, blocking is True, and it timed out.
ValueError: if timeout is negative.
"""
if not block:
try:
await self.actor.put_nowait.remote(queue_idx, item)
except asyncio.QueueFull:
raise Full
else:
if timeout is not None and timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
await self.actor.put.remote(queue_idx, item, timeout)
def get(self,
queue_idx: int,
block: bool = True,
timeout: Optional[float] = None) -> Any:
"""Gets an item from the queue.
If block is True and the queue is empty, blocks until the queue is no
longer empty or until timeout.
There is no guarantee of order if multiple consumers get from the
same empty queue.
Returns:
The next item in the queue.
Raises:
Empty: if the queue is empty and blocking is False.
Empty: if the queue is empty, blocking is True, and it timed out.
ValueError: if timeout is negative.
"""
if not block:
try:
return ray.get(self.actor.get_nowait.remote(queue_idx))
except asyncio.QueueEmpty:
raise Empty
else:
if timeout is not None and timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
return ray.get(self.actor.get.remote(queue_idx, timeout))
async def get_async(self,
queue_idx: int,
block: bool = True,
timeout: Optional[float] = None) -> Any:
"""Gets an item from the queue.
There is no guarantee of order if multiple consumers get from the
same empty queue.
Returns:
The next item in the queue.
Raises:
Empty: if the queue is empty and blocking is False.
Empty: if the queue is empty, blocking is True, and it timed out.
ValueError: if timeout is negative.
"""
if not block:
try:
return await self.actor.get_nowait.remote(queue_idx)
except asyncio.QueueEmpty:
raise Empty
else:
if timeout is not None and timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
return await self.actor.get.remote(queue_idx, timeout)
def put_nowait(self, queue_idx: int, item: Any) -> None:
"""Equivalent to put(item, block=False).
Raises:
Full: if the queue is full.
"""
return self.put(queue_idx, item, block=False)
def put_nowait_batch(self, queue_idx: int, items: Iterable) -> None:
"""Takes in a list of items and puts them into the queue in order.
Raises:
Full: if the items will not fit in the queue
"""
if not isinstance(items, Iterable):
raise TypeError("Argument 'items' must be an Iterable")
ray.get(self.actor.put_nowait_batch.remote(queue_idx, items))
def get_nowait(self, queue_idx: int) -> Any:
"""Equivalent to get(block=False).
Raises:
Empty: if the queue is empty.
"""
return self.get(queue_idx, block=False)
def get_nowait_batch(self, queue_idx: int, num_items: int) -> List[Any]:
"""Gets items from the queue and returns them in a
list in order.
Raises:
Empty: if the queue does not contain the desired number of items
"""
if not isinstance(num_items, int):
raise TypeError("Argument 'num_items' must be an int")
if num_items < 0:
raise ValueError("'num_items' must be nonnegative")
return ray.get(
self.actor.get_nowait_batch.remote(queue_idx, num_items))
def shutdown(self, force: bool = False, grace_period_s: int = 5) -> None:
"""Terminates the underlying QueueActor.
All of the resources reserved by the queue will be released.
Args:
force (bool): If True, forcefully kill the actor, causing an
immediate failure. If False, graceful
actor termination will be attempted first, before falling back
to a forceful kill.
grace_period_s (int): If force is False, how long in seconds to
wait for graceful termination before falling back to
forceful kill.
"""
if self.actor:
if force:
ray.kill(self.actor, no_restart=True)
else:
done_ref = self.actor.__ray_terminate__.remote()
done, not_done = ray.wait([done_ref], timeout=grace_period_s)
if not_done:
ray.kill(self.actor, no_restart=True)
self.actor = None
def connect_queue_actor(name, num_retries=5):
"""
Connect to the named actor denoted by `name`, retrying up to
`num_retries` times. Note that the retry uses exponential backoff.
If max retries is reached without connecting, an exception is raised.
"""
retries = 0
sleep_dur = 1
last_exc = None
while retries < num_retries:
try:
return ray.get_actor(name)
except Exception as e:
retries += 1
logger.info(
f"Couldn't connect to queue actor {name}, trying again in "
f"{sleep_dur} seconds: {retries} / {num_retries}, error: "
f"{e!s}")
time.sleep(sleep_dur)
sleep_dur *= 2
last_exc = e
raise ValueError(f"Unable to connect to queue actor {name} after "
f"{num_retries} retries. Last error: {last_exc!s}")
class _QueueActor:
def __init__(self, num_queues, maxsize):
logger.info(f"Initializing _QueueActor with num_queues: {num_queues} and max_size {maxsize} ")
self.maxsize = maxsize
self.queues = [asyncio.Queue(self.maxsize) for _ in range(num_queues)]
def qsize(self, queue_idx: int):
return self.queues[queue_idx].qsize()
def empty(self, queue_idx: int):
return self.queues[queue_idx].empty()
def full(self, queue_idx: int):
return self.queues[queue_idx].full()
async def put(self, queue_idx: int, item, timeout=None):
try:
await asyncio.wait_for(self.queues[queue_idx].put(item), timeout)
except asyncio.TimeoutError:
raise Full
async def put_batch(self, queue_idx: int, items, timeout=None):
for item in items:
try:
await asyncio.wait_for(self.queues[queue_idx].put(item),
timeout)
except asyncio.TimeoutError:
raise Full
async def get(self, queue_idx: int, timeout=None):
try:
return await asyncio.wait_for(self.queues[queue_idx].get(),
timeout)
except asyncio.TimeoutError:
raise Empty
def put_nowait(self, queue_idx: int, item):
self.queues[queue_idx].put_nowait(item)
def put_nowait_batch(self, queue_idx: int, items):
# If maxsize is 0, queue is unbounded, so no need to check size.
if (self.maxsize > 0
and len(items) + self.qsize(queue_idx) > self.maxsize):
raise Full(f"Cannot add {len(items)} items to queue of size "
f"{self.qsize()} and maxsize {self.maxsize}.")
for item in items:
self.queues[queue_idx].put_nowait(item)
def get_nowait(self, queue_idx: int):
return self.queues[queue_idx].get_nowait()
def get_nowait_batch(self, queue_idx: int, num_items):
if num_items > self.qsize(queue_idx):
raise Empty(f"Cannot get {num_items} items from queue of size "
f"{self.qsize()}.")
return [self.queues[queue_idx].get_nowait() for _ in range(num_items)]
|
class ThemeBoneColorSet:
active = None
normal = None
select = None
show_colored_constraints = None
|
counter = 0
while counter < 10:
counter += 1
if counter == 3:
continue
print("No teller vi: " + str(counter))
|
"""
572. Subtree of Another Tree
Example 1:
Given tree s:
3
/ \
4 5
/ \
1 2
Given tree t:
4
/ \
1 2
Return true, because t has the same structure and node values with a subtree of s.
Example 2:
Given tree s:
3
/ \
4 5
/ \
1 2
/
0
Given tree t:
4
/ \
1 2
Return false.
"""
class Solution:
def isSubtree(self, s, t):
"""
:type s: TreeNode
:type t: TreeNode
:rtype: bool
"""
samestructure = lambda na, nb: True if not na and not nb \
else False if na and not nb or not na and nb \
else na.val == nb.val and samestructure(na.left, nb.left) and samestructure(na.right, nb.right)
def dfs(s, t):
return samestructure(s, t) or s and any((dfs(s.left, t), dfs(s.right, t)))
return dfs(s,t)
class Solution(object):
def isSubtree(self, s, t):
def convert(p):
return "^" + str(p.val) + "#" + convert(p.left) + convert(p.right) if p else "$"
return convert(t) in convert(s)
class Solution:
def isSubtree(self, s, t):
return self.preorder(s).find(self.preorder(t)) != -1
def preorder(self, node):
stack, ans = [node], ''
while stack:
curr = stack.pop()
if curr:
ans += ',%d' % curr.val
stack.append(curr.right), stack.append(curr.left)
else:
ans += ',#'
return ans
class Solution:
def isSubtree(self, s: TreeNode, t: TreeNode) -> bool:
def seralize(root):
code = ""
stk = [root]
while stk:
top = stk.pop()
if not top:
code += "# "
else:
code += "b"+str(top.val) + "e "
stk.extend([top.left, top.right])
return code
return seralize(t) in seralize(s)
|
"""
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2012 Nathanael C. Fritz
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
from sleekxmpp.plugins.base import register_plugin
from sleekxmpp.features.feature_preapproval.preapproval import FeaturePreApproval
from sleekxmpp.features.feature_preapproval.stanza import PreApproval
register_plugin(FeaturePreApproval)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 2 11:18:20 2021
@author: h2jw
"""
import pandas as pd
# SELECT TEST VISUALIZATION NUMBER
nb = 4
#%%
topic_desc = f"/Users/h2jw/Documents/GitHub/NLP-FOMC/LDA_qje/LDA QJE test {nb}/topic_description.csv"
t_desc = pd.read_csv(topic_desc)
pres = f"/Users/h2jw/Documents/GitHub/NLP-FOMC/LDA_qje/LDA QJE test {nb}/president_top_topics.csv"
pres_topics = pd.read_csv(pres)
dico = f'/Users/h2jw/Documents/GitHub/NLP-FOMC/LDA_qje/LDA QJE test {nb}/dict.csv'
dico_topics = pd.read_csv(dico)
dt_query = f"/Users/h2jw/Documents/GitHub/NLP-FOMC/LDA_qje/LDA QJE test {nb}/dt_query.csv"
query = pd.read_csv(dt_query)
dt = f"/Users/h2jw/Documents/GitHub/NLP-FOMC/LDA_qje/LDA QJE test {nb}/dt.csv"
dt_df = pd.read_csv(dt)
tr = f"/Users/h2jw/Documents/GitHub/NLP-FOMC/LDA_qje/LDA QJE test {nb}/tfidf_ranking.csv"
dt_tr = pd.read_csv(tr)
df_final = pd.read_csv(f"/Users/h2jw/Documents/GitHub/NLP-FOMC/LDA_qje/LDA QJE test {nb}/final_output_agg.csv")
df_final = df_final.astype({ 'T0':'float64', 'T1':'float64', 'T2':'float64', 'T3':'float64', 'T4':'float64', 'T5':'float64', 'T6':'float64',
'T7':'float64', 'T8':'float64', 'T9':'float64', 'T10':'float64', 'T11':'float64', 'T12':'float64', 'T13':'float64', 'T14':'float64', 'T15':'float64', 'T16':'float64',
'T17':'float64', 'T18':'float64', 'T19':'float64', 'T20':'float64', 'T21':'float64', 'T22':'float64', 'T23':'float64', 'T24':'float64', 'T25':'float64', 'T26':'float64',
'T27':'float64', 'T28':'float64', 'T29':'float64'})
df_heatmap = df_final.drop(columns='year').set_index('chair_in_charge')
#%%
from tqdm import trange
import numpy as np
l_scores = [t_desc.iloc[0].tolist()[1:14]]
l_col0 = t_desc.columns.tolist()[1:14]
l_topics = [l_col0]
for i in trange(1,30):
l_topics.append(t_desc.iloc[2*i-1].tolist()[1:14])
l_scores.append(t_desc.iloc[2*i].tolist()[1:14])
l_scores = [np.float_(elem) for elem in l_scores]
#%% SAME VISUALS AS IN ARTICLE
import matplotlib.pyplot as plt
import seaborn as sns
plt.figure(figsize=(20,10))
sns.heatmap(l_scores,cmap="Purples",annot=l_topics, fmt="")
plt.title("Topics ")
plt.show()
#%% VISUALS PER CHAIR PER YEAR
plt.figure()
df_final2 = df_final.set_index(['chair_in_charge', 'year'])
sns.heatmap(df_final2)
plt.title("Distribution des topics par année")
plt.show()
#%% TFIDF RANK
dt_tr['score']=dt_tr['49.296371']
plt.plot(dt_tr.score)
|
import numpy as np
v = np.array([1,2,3])
print("Vector")
print(v) # [1 2 3]
np.shape(v) # (3,)
# Creamos matriz columna
v[:, np.newaxis]
"""
v valdrá:
array([[1],
[2],
[3]])
"""
v[:,np.newaxis].shape # (3, 1)
# Matriz fila
v[np.newaxis,:].shape # (1, 3)
|
"""
Given a circular array (the next element of the last element is the first
element of the array), print the Next Greater Number for every element. The
Next Greater Number of a number x is the first greater number to its
traversing-order next in the array, which means you could search circularly
to find its next greater number. If it doesn't exist, output -1 for this
number.
Input: [1,2,1] (circular array) Output: [2,-1,2]
Explanation: The first 1's next greater number is 2; The number 2 can't find
next greater number; The second 1's next greater number needs to search
circularly, which is also 2.
IDEA:
use stack to hold only those elements which are > current
array stack result
-------------------------------------------
[1, 2, 1] [] [0,0,-1]
|
[1, 2, 1] [2] [0,-1,-1] <-- '1' has been removed, '2' was added during normal cycle
|
[1, 2, 1] [2,1] [2,-1,-1] <-- first hit, '1' was added during normal cycle
|
In order to get the correct answer, the 2nd round is needed
[1, 2, 1] [2,1] [2,-1, 2] <-- second hit, first '1' has been removed, then last '1' was added during normal cycle
|
and so on
T(2n + n)
"""
class Solution503:
pass
|
# Copyright 2018 BLEMUNDSBURY AI LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cape_userdb.user import User
from peewee import IntegrityError
import pytest
def test_user_creation():
try:
del_user = User.get('user_id', 'fake-id')
del_user.delete_instance()
except:
pass
user = User(user_id='fake-id', password='test')
user.save()
test_user = User.get('user_id', 'fake-id')
assert user == test_user
user.delete_instance()
def test_unique_user():
try:
del_user = User.get('user_id', 'fake-id')
del_user.delete_instance()
except:
pass
user = User(user_id='fake-id', password='test')
user.save()
with pytest.raises(IntegrityError):
duplicate_user = User(user_id='fake-id', password='test')
duplicate_user.save()
user.delete_instance()
def test_delete_user():
try:
del_user = User.get('user_id', 'fake-id')
del_user.delete_instance()
except:
pass
user = User(user_id='fake-id', password='test')
user.save()
print(user.__dict__)
user.delete_instance()
test_user = User.get('user_id', 'fake-id')
assert test_user is None
|
#
# PySNMP MIB module JUNIPER-LSYSSP-NATDSTRULE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/JUNIPER-LSYSSP-NATDSTRULE-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:49:03 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, SingleValueConstraint, ValueRangeConstraint, ConstraintsUnion, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsUnion", "ConstraintsIntersection")
jnxLsysSpNATdstrule, = mibBuilder.importSymbols("JUNIPER-LSYS-SECURITYPROFILE-MIB", "jnxLsysSpNATdstrule")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
NotificationType, Integer32, Unsigned32, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, ObjectIdentity, TimeTicks, Counter64, ModuleIdentity, Bits, iso, IpAddress, Counter32 = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "Integer32", "Unsigned32", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "ObjectIdentity", "TimeTicks", "Counter64", "ModuleIdentity", "Bits", "iso", "IpAddress", "Counter32")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
jnxLsysSpNATdstruleMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 13, 1))
if mibBuilder.loadTexts: jnxLsysSpNATdstruleMIB.setLastUpdated('201005191644Z')
if mibBuilder.loadTexts: jnxLsysSpNATdstruleMIB.setOrganization('Juniper Networks, Inc.')
jnxLsysSpNATdstruleObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 13, 1, 1))
jnxLsysSpNATdstruleSummary = MibIdentifier((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 13, 1, 2))
jnxLsysSpNATdstruleTable = MibTable((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 13, 1, 1, 1), )
if mibBuilder.loadTexts: jnxLsysSpNATdstruleTable.setStatus('current')
jnxLsysSpNATdstruleEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 13, 1, 1, 1, 1), ).setIndexNames((1, "JUNIPER-LSYSSP-NATDSTRULE-MIB", "jnxLsysSpNATdstruleLsysName"))
if mibBuilder.loadTexts: jnxLsysSpNATdstruleEntry.setStatus('current')
jnxLsysSpNATdstruleLsysName = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 13, 1, 1, 1, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 64)))
if mibBuilder.loadTexts: jnxLsysSpNATdstruleLsysName.setStatus('current')
jnxLsysSpNATdstruleProfileName = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 13, 1, 1, 1, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxLsysSpNATdstruleProfileName.setStatus('current')
jnxLsysSpNATdstruleUsage = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 13, 1, 1, 1, 1, 3), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxLsysSpNATdstruleUsage.setStatus('current')
jnxLsysSpNATdstruleReserved = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 13, 1, 1, 1, 1, 4), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxLsysSpNATdstruleReserved.setStatus('current')
jnxLsysSpNATdstruleMaximum = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 13, 1, 1, 1, 1, 5), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxLsysSpNATdstruleMaximum.setStatus('current')
jnxLsysSpNATdstruleUsedAmount = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 13, 1, 2, 1), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxLsysSpNATdstruleUsedAmount.setStatus('current')
jnxLsysSpNATdstruleMaxQuota = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 13, 1, 2, 2), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxLsysSpNATdstruleMaxQuota.setStatus('current')
jnxLsysSpNATdstruleAvailableAmount = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 13, 1, 2, 3), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxLsysSpNATdstruleAvailableAmount.setStatus('current')
jnxLsysSpNATdstruleHeaviestUsage = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 13, 1, 2, 4), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxLsysSpNATdstruleHeaviestUsage.setStatus('current')
jnxLsysSpNATdstruleHeaviestUser = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 13, 1, 2, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxLsysSpNATdstruleHeaviestUser.setStatus('current')
jnxLsysSpNATdstruleLightestUsage = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 13, 1, 2, 6), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxLsysSpNATdstruleLightestUsage.setStatus('current')
jnxLsysSpNATdstruleLightestUser = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 13, 1, 2, 7), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxLsysSpNATdstruleLightestUser.setStatus('current')
mibBuilder.exportSymbols("JUNIPER-LSYSSP-NATDSTRULE-MIB", jnxLsysSpNATdstruleHeaviestUser=jnxLsysSpNATdstruleHeaviestUser, jnxLsysSpNATdstruleObjects=jnxLsysSpNATdstruleObjects, jnxLsysSpNATdstruleLightestUsage=jnxLsysSpNATdstruleLightestUsage, jnxLsysSpNATdstruleMIB=jnxLsysSpNATdstruleMIB, jnxLsysSpNATdstruleProfileName=jnxLsysSpNATdstruleProfileName, PYSNMP_MODULE_ID=jnxLsysSpNATdstruleMIB, jnxLsysSpNATdstruleLightestUser=jnxLsysSpNATdstruleLightestUser, jnxLsysSpNATdstruleUsage=jnxLsysSpNATdstruleUsage, jnxLsysSpNATdstruleAvailableAmount=jnxLsysSpNATdstruleAvailableAmount, jnxLsysSpNATdstruleLsysName=jnxLsysSpNATdstruleLsysName, jnxLsysSpNATdstruleMaximum=jnxLsysSpNATdstruleMaximum, jnxLsysSpNATdstruleReserved=jnxLsysSpNATdstruleReserved, jnxLsysSpNATdstruleEntry=jnxLsysSpNATdstruleEntry, jnxLsysSpNATdstruleUsedAmount=jnxLsysSpNATdstruleUsedAmount, jnxLsysSpNATdstruleHeaviestUsage=jnxLsysSpNATdstruleHeaviestUsage, jnxLsysSpNATdstruleMaxQuota=jnxLsysSpNATdstruleMaxQuota, jnxLsysSpNATdstruleSummary=jnxLsysSpNATdstruleSummary, jnxLsysSpNATdstruleTable=jnxLsysSpNATdstruleTable)
|
from django.shortcuts import render
from .models import Carousel, LearnLink, LearnPresentation
from play.models import Game
def homepage(request):
item_list = Carousel.objects.all()
game_list = Game.objects.all()
context = {
'game_list': game_list,
'item_list': item_list,
'item_ids': range(len(item_list)),
}
return render(request, 'homepage.html', context)
def learn(request):
game_list=Game.objects.all()
link_list = LearnLink.objects.all()
presentation_list = LearnPresentation.objects.all()
context = {
'game_list': game_list,
'link_list': link_list,
'presentation_list': presentation_list,
}
return render(request,'learn.html',context)
|
# -*- coding:utf-8 -*-
import re
import os.path
def is_chinese(uchar):
"""判断一个unicode是否是汉字"""
if uchar >= u'\u4e00' and uchar<=u'\u9fa5':
return True
else:
return False
f1=open('inf.txt','r')
f2=open('outf.txt','w')
with open('word.json','r') as dtf:
pydt=eval(dtf.readline())
newline=''
ish=0
for line in f1:
line=line.decode('utf8')
for c in line:
if is_chinese(c):
c=pydt[c]
if ish==1:
c='/'+c
ish=1
else:
ish=0
newline=newline+c
newline=newline.encode('utf8')
f2.write(newline)
newline=''
f1.close()
f2.close()
|
# Copyright 2020 The KNIX Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import random
import sys
import time
import unittest
sys.path.append("../")
from mfn_test_utils import MFNTest
class TriggersStorageTest(unittest.TestCase):
# @unittest.skip("")
def test_triggers_storage(self):
test = MFNTest(test_name='triggers_storage',
workflow_filename='wf_triggers_storage.json')
nonce = str(int(time.time() * 1000))
input_data = []
input_data.append("wf_triggers_storage")
input_data.append("triggerable_bucket")
input_data.append("triggerable_key")
input_data.append(nonce)
response = test.execute(input_data)
logs = test.get_workflow_logs()
wflog = logs["log"]
log_lines = wflog.split("\n")
received_reponse = []
try:
received_reponse = [response["trigger_start_main_wf"], response["explicit_start_main_wf"],
response["trigger_start_other_wf"], response["explicit_start_other_wf"]]
main_trigger_logs = response["main_trigger_logs"]
other_trigger_logs = response["other_trigger_logs"]
except Exception as e:
print("Error: " + str(e))
pass
if self.matches_expected_response(received_reponse) == True and self.log_lines_match(main_trigger_logs, other_trigger_logs, nonce) == True:
test.report(True, str(input_data), input_data, response)
else:
test.report(False, str(input_data), input_data, response)
for line in log_lines:
print(line.strip())
test.undeploy_workflow()
test.cleanup()
def matches_expected_response(self, received_reponse):
expected_response = [4, 1, 2, 0]
if received_reponse == expected_response:
return True
else:
print("ERROR: matches_expected_response = False: received response: " +
str(received_reponse))
return False
def log_lines_match(self, main_trigger_logs, other_trigger_logs, nonce):
main_log_lines_suffix = [1, 2, 3, 4]
if len(main_trigger_logs) != len(main_log_lines_suffix):
print("ERROR: log_lines_match = False, len(main_trigger_logs) does not match")
return False
for i in range(4):
suffix = main_log_lines_suffix[i]
to_match = f"_!_TRIGGER_START_{nonce};{suffix}"
logline = main_trigger_logs[i].strip()
if to_match not in logline:
print("ERROR: log_lines_match = False, main_trigger_logs mismatch: " +
to_match + " not found in " + logline)
return False
other_log_lines_suffix = [1, 3]
if len(other_trigger_logs) != len(other_log_lines_suffix):
print(
"ERROR: log_lines_match = False, len(other_trigger_logs) does not match")
return False
for i in range(2):
suffix = other_log_lines_suffix[i]
to_match = f"_!_TRIGGER_START_{nonce};{suffix}"
logline = other_trigger_logs[i].strip()
if to_match not in logline:
print("ERROR: log_lines_match = False, other_trigger_logs mismatch: " +
to_match + " not found in " + logline)
return False
return True
|
from unittest import TestCase
from earful.contacts import (
EmailAddress,
PhoneNumber,
HipChat,
Recipient,
Group,
)
class ContactInformationTest(TestCase):
def test_hipchat_defaults(self):
instance = HipChat('contactname', 'roomname')
self.assertEqual(instance.name, 'contactname')
self.assertEqual(instance.weight, 100)
self.assertEqual(instance.room, 'roomname')
self.assertTrue(instance.notify)
self.assertFalse(instance.mention)
def test_hipchat_withuser(self):
instance = HipChat('contactname', 'roomname', username='person')
self.assertFalse(instance.notify)
self.assertTrue(instance.mention)
def test_hipchat_setprefs(self):
instance = HipChat('contactname', 'roomname', username='person', notify=True, mention=False)
self.assertTrue(instance.notify)
self.assertFalse(instance.mention)
class RecipientTest(TestCase):
def test_recipient_defaults(self):
r = Recipient('recipientname')
self.assertEqual(list(r.contacts()), [])
def test_simple_recipient(self):
c = [EmailAddress('emailname', 'emailaddr')]
r = Recipient('recipientname', contacts=c)
self.assertEqual(list(r.contacts()), c)
def test_less_simple_recipient(self):
c = [
EmailAddress('emailname', 'emailaddr'),
PhoneNumber('phonename', 'phonenum'),
]
r = Recipient('recipientname', contacts=c)
self.assertEqual(list(r.contacts()), c)
def test_contacts_by_type(self):
c = [
EmailAddress('emailname', 'emailaddr'),
PhoneNumber('phonename', 'phonenum'),
]
r = Recipient('recipientname', contacts=c)
self.assertEqual(list(r.contacts(of_type=EmailAddress)), [c[0]])
def test_contacts_with_weight(self):
c = [
EmailAddress('emailname', 'emailaddr'),
EmailAddress('emailname', 'emailaddr', weight=50),
PhoneNumber('phonename', 'phonenum'),
]
r = Recipient('recipientname', contacts=c)
self.assertEqual(list(r.contacts()), c[1:])
def test_contacts_with_weight_all(self):
c = [
EmailAddress('emailname', 'emailaddr'),
EmailAddress('emailname', 'emailaddr', weight=50),
PhoneNumber('phonename', 'phonenum'),
]
r = Recipient('recipientname', contacts=c)
self.assertEqual(list(r.contacts(include_all=True)), [c[1], c[0], c[2]])
def test_contacts_with_weight_type(self):
c = [
EmailAddress('emailname', 'emailaddr'),
EmailAddress('emailname', 'emailaddr', weight=50),
PhoneNumber('phonename', 'phonenum'),
]
r = Recipient('recipientname', contacts=c)
self.assertEqual(list(r.contacts(of_type=EmailAddress)), [c[1]])
def test_contacts_having(self):
c = [
PhoneNumber('phonename', 'phonenum', sms_ok=False),
PhoneNumber('phonename', 'phonenum', sms_ok=True),
]
r = Recipient('recipientname', contacts=c)
self.assertEqual(list(r.contacts(sms_ok=True)), [c[1]])
class GroupTest(TestCase):
def test_groups(self):
t = EmailAddress('emailname', 'emailaddr')
r = Recipient('recipientname', contacts=[t])
c = Group('c', recipients=[r])
b = Group('b', groups=[c])
a = Group('a', groups=[b])
self.assertEqual(list(a.groups(recursive=False)), [b])
self.assertEqual(list(a.recipients(recursive=False)), [])
self.assertEqual(list(a.contacts(recursive=False)), [])
self.assertEqual(list(a.groups()), [b, c])
self.assertEqual(list(a.recipients()), [r])
self.assertEqual(list(a.contacts()), [t])
|
#!/usr/bin/python
############################################################################
#
# mirror is a tool for handling MIRROR commands.
#
############################################################################
import argparse
import getopt
import json
import os
import re
import sys
import swsssdk
from swsssdk import ConfigDBConnector
from scripts.render_cli import show_cli_output
from os import path
CFG_MIRROR_SESSION_TABLE = "MIRROR_SESSION"
STATE_MIRROR_SESSION_TABLE = "MIRROR_SESSION_TABLE"
def show_session(session_name):
"""
Show mirror session configuration. Temporary implementation for now. will be modified to Jinja files in next commit.
:param session_name: Optional. Mirror session name. Filter sessions by specified name.
:return:
"""
configdb = ConfigDBConnector()
configdb.connect()
statedb = swsssdk.SonicV2Connector(host='127.0.0.1')
statedb.connect(statedb.STATE_DB)
sessions_db_info = configdb.get_table(CFG_MIRROR_SESSION_TABLE)
for key in sessions_db_info.keys():
state_db_info = statedb.get_all(statedb.STATE_DB, "{}|{}".format(STATE_MIRROR_SESSION_TABLE, key))
if state_db_info:
status = state_db_info.get("status", "inactive")
else:
status = "error"
sessions_db_info[key]["status"] = status
erspan_header = ("Name", "Status", "SRC IP", "DST IP", "GRE", "DSCP", "TTL", "Queue",
"Policer", "SRC Port", "Direction")
span_header = ("Name", "Status", "DST Port", "SRC Port", "Direction")
erspan_data = []
span_data = []
if session_name is None:
print("\nERSPAN Sessions")
print("---------------------------------------------------------------------------------------------------------")
print("%10s %6s %16s %16s %6s %6s %6s %6s %6s %12s %6s" %("Name", "Status", "SRC IP", "DST IP", "GRE", "DSCP", "TTL", "Queue", "Policer", "SRC Port", "Direction"))
for key, val in sessions_db_info.iteritems():
if session_name and key != session_name:
continue
if "src_ip" in val:
if session_name and key == session_name:
print("\nERSPAN Sessions")
print("---------------------------------------------------------------------------------------------------------")
print("%10s %6s %16s %16s %6s %6s %6s %6s %6s %12s %6s" %("Name", "Status", "SRC IP", "DST IP", "GRE", "DSCP", "TTL", "Queue", "Policer", "SRC Port", "Direction"))
print("%10s %6s %16s %16s %6s %6s %6s %6s %6s %12s %6s" %(key, val.get("status", ""), val.get("src_ip", ""), val.get("dst_ip", ""), val.get("gre_type", ""), val.get("dscp", ""),
val.get("ttl", ""), val.get("queue", ""), val.get("policer", ""),
val.get("src_port", ""), val.get("direction", "")))
if session_name is None:
print("\nSPAN Sessions")
print("---------------------------------------------------------------------------------------------------------")
print("%10s %6s %16s %16s %6s" %("Name", "Status", "DST Port", "SRC Port", "Direction"))
for key, val in sessions_db_info.iteritems():
if session_name and key != session_name:
continue
if "dst_port" in val:
if session_name and key == session_name:
print("\nSPAN Sessions")
print("---------------------------------------------------------------------------------------------------------")
print("%10s %6s %16s %16s %6s" %("Name", "Status", "DST Port", "SRC Port", "Direction"))
print("%10s %6s %16s %16s %6s" %(key, val.get("status", ""), val.get("dst_port", ""), val.get("src_port", ""), val.get("direction", "")))
def session(session_name):
"""
Show mirror session configuration.
:return:
"""
show_session(session_name)
def show_mirror(args):
"""
Add port mirror session
"""
session(args.session)
def config_span(args):
"""
Add port mirror session
"""
config_db = ConfigDBConnector()
config_db.connect()
session_info = {
}
if args.destination is not None:
session_info['dst_port'] = args.destination
if args.source is not None:
session_info['src_port'] = args.source
if args.direction is not None:
session_info['direction'] = args.direction
if args.dst_ip is not None:
session_info['dst_ip'] = args.dst_ip
if args.src_ip is not None:
session_info['src_ip'] = args.src_ip
if args.dscp is not None:
session_info['dscp'] = args.dscp
if args.ttl is not None:
session_info['ttl'] = args.ttl
if args.gre is not None:
session_info['gre_type'] = args.gre
if args.source is not None:
print("sucess. create mirror session " + args.session + " destination " + args.destination + " source " + args.source + " direction " + args.direction)
if args.dst_ip is not None:
print("sucess. create mirror session " + args.session + " dst_ip " + args.dst_ip + " src_ip " + args.src_ip + " dscp " + args.dscp + " ttl " + args.ttl)
config_db.set_entry("MIRROR_SESSION", args.session, session_info)
def remove_span(args):
"""
Delete mirror session
"""
config_db = ConfigDBConnector()
config_db.connect()
print("sucess. remove mirror session " + args.session)
config_db.set_entry("MIRROR_SESSION", args.session, None)
def main():
parser = argparse.ArgumentParser(description='Handles MIRROR commands',
version='1.0.0',
formatter_class=argparse.RawTextHelpFormatter,
epilog="""
Examples:
mirror -config -deviceid value
mirror -config -collector collectorname -iptype ipv4/ipv6 -ip ipaddr -port value
mirror -clear -device_id
mirror -clear -collector collectorname
mirror -show -device_id
mirror -show -collector collectorname
""")
parser.add_argument('-clear', '--clear', action='store_true', help='Clear mirror information')
parser.add_argument('-show', '--show', action='store_true', help='Show mirror information')
parser.add_argument('-config', '--config', action='store_true', help='Config mirror information')
parser.add_argument('-session', '--session', type=str, help='mirror session name')
parser.add_argument('-destination', '--destination', help='destination port')
parser.add_argument('-source', '--source', type=str, help='mirror source port')
parser.add_argument('-direction', '--direction', type=str, help='mirror direction')
parser.add_argument('-dst_ip', '--dst_ip', help='ERSPAN destination ip address')
parser.add_argument('-src_ip', '--src_ip', help='ERSPAN source ip address')
parser.add_argument('-dscp', '--dscp', help='ERSPAN dscp')
parser.add_argument('-gre', '--gre', help='ERSPAN gre')
parser.add_argument('-ttl', '--ttl', help='ERSPAN ttl')
args = parser.parse_args()
if args.config:
config_span(args)
elif args.clear:
remove_span(args)
elif args.show:
show_mirror(args)
sys.exit(0)
if __name__ == "__main__":
main()
|
from django.views.generic.base import ContextMixin
from django.utils.translation import ugettext_lazy
# pylint: disable=too-few-public-methods
class OrganizationContextMixin(ContextMixin):
"""
This mixin provides extra context for organization views
"""
extra_context = {
"delete_dialog_title": ugettext_lazy(
"Please confirm that you really want to delete this organization"
),
"delete_dialog_text": ugettext_lazy(
"This will update all pages and users that are part of this organization."
),
}
|
#!/usr/bin/python
#
# Copyright (c) 2017, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration.
#
# All rights reserved.
#
# The Astrobee platform is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import pandas as pd
import os
import sys
def save_rmse_results_to_csv(rmses, prefix='', rmses_2=None, label_1=None, label_2=None):
mean_rmses_dataframe = pd.DataFrame()
labels = []
if label_1 and label_2 and rmses_2 is not None:
labels.append(label_1)
labels.append(label_2)
if labels:
mean_rmses_dataframe['Label'] = labels
mean_rmses_list = []
mean_rmses_list.append(rmses.mean())
relative_rmses = []
relative_change_in_rmses = []
if rmses_2 is not None:
mean_rmses_list.append(rmses_2.mean())
relative_rmses.append(mean_rmses_list[0] / mean_rmses_list[1])
relative_rmses.append(mean_rmses_list[1] / mean_rmses_list[0])
relative_change_in_rmses.append(mean_rmses_list[0] / mean_rmses_list[1] - 1.0)
relative_change_in_rmses.append(mean_rmses_list[1] / mean_rmses_list[0] - 1.0)
mean_rmses_dataframe['rel_' + prefix + 'rmse_%'] = relative_rmses
mean_rmses_dataframe['rel_' + prefix + 'rmse_delta_%'] = relative_change_in_rmses
mean_rmses_dataframe['mean_' + prefix + 'rmse'] = mean_rmses_list
mean_rmses_csv_file = 'mean_rmses.csv'
mean_rmses_dataframe.to_csv(mean_rmses_csv_file, index=False, mode='a')
return mean_rmses_list, labels, relative_rmses, relative_change_in_rmses
def rmse_plot(pdf, x_axis_vals, shortened_bag_names, rmses, prefix='', label_1='', rmses_2=None, label_2=''):
plt.figure()
plt.plot(x_axis_vals, rmses, 'b', label=label_1, linestyle='None', marker='o', markeredgewidth=0.1, markersize=10.5)
if rmses_2 is not None:
plt.plot(x_axis_vals,
rmses_2,
'r',
label=label_2,
linestyle='None',
marker='o',
markeredgewidth=0.1,
markersize=10.5)
plt.legend(prop={'size': 8}, bbox_to_anchor=(1.05, 1))
plt.xticks(x_axis_vals, shortened_bag_names, fontsize=7, rotation=20)
plt.ylabel(prefix + ' RMSE')
plt.title(prefix + ' RMSE vs. Bag')
x_range = x_axis_vals[len(x_axis_vals) - 1] - x_axis_vals[0]
x_buffer = x_range * 0.1
# Extend x axis on either side to make data more visible
plt.xlim([x_axis_vals[0] - x_buffer, x_axis_vals[len(x_axis_vals) - 1] + x_buffer])
plt.tight_layout()
pdf.savefig()
plt.close()
def save_rmse_stats_to_plot(pdf, rmses, prefix='', label_1='', rmses_2=None, label_2=''):
# Plot mean rmses
mean_rmses, labels, relative_rmses, relative_change_in_rmses = save_rmse_results_to_csv(
rmses, prefix, rmses_2, label_1, label_2)
mean_rmses_1_string = prefix + 'rmse: ' + str(mean_rmses[0])
if labels:
mean_rmses_1_string += ', label: ' + labels[0]
plt.figure()
plt.axis('off')
plt.text(0.0, 1.0, mean_rmses_1_string)
if len(mean_rmses) > 1:
mean_rmses_2_string = prefix + 'rmse: ' + str(mean_rmses[1])
if labels:
mean_rmses_2_string += ', label: ' + labels[1]
plt.text(0.0, 0.85, mean_rmses_2_string)
relative_rmses_string = prefix + 'rel rmse %: ' + str(100 * relative_rmses[0])
plt.text(0.0, 0.7, relative_rmses_string)
relative_rmses_change_string = prefix + 'rel change in rmse %: ' + str(100 * relative_change_in_rmses[0])
plt.text(0.0, 0.55, relative_rmses_change_string)
pdf.savefig()
def rmse_plots(pdf,
x_axis_vals,
shortened_bag_names,
rmses,
integrated_rmses,
orientation_rmses,
prefix='',
label_1='',
rmses_2=None,
integrated_rmses_2=None,
orientation_rmses_2=None,
label_2=''):
rmse_plot(pdf, x_axis_vals, shortened_bag_names, rmses, prefix + ' ', label_1, rmses_2, label_2)
if integrated_rmses is not None:
rmse_plot(pdf, x_axis_vals, shortened_bag_names, integrated_rmses, prefix + ' Integrated ', label_1,
integrated_rmses_2, label_2)
rmse_plot(pdf, x_axis_vals, shortened_bag_names, orientation_rmses, prefix + ' Orientation ', label_1,
orientation_rmses_2, label_2)
save_rmse_stats_to_plot(pdf, rmses, prefix + ' ', label_1, rmses_2, label_2)
if integrated_rmses is not None:
save_rmse_stats_to_plot(pdf, integrated_rmses, prefix + ' Integrated ', label_1, integrated_rmses_2, label_2)
save_rmse_stats_to_plot(pdf, orientation_rmses, prefix + ' Orientation ', label_1, orientation_rmses_2, label_2)
def create_plot(output_file, csv_file, label_1='', csv_file_2=None, label_2='', imu_augmented_2=True):
dataframe = pd.read_csv(csv_file)
dataframe.sort_values(by=['Bag'], inplace=True)
# Graph rmses
rmses = dataframe['rmse']
integrated_rmses = dataframe['integrated_rmse']
orientation_rmses = dataframe['orientation_rmse']
# IMU augmented rmses
imu_augmented_rmses = dataframe['imu_augmented_rmse']
imu_augmented_integrated_rmses = dataframe['imu_augmented_integrated_rmse']
imu_augmented_orientation_rmses = dataframe['imu_augmented_orientation_rmse']
# IMU bias tester rmses
imu_bias_tester_rmses = dataframe['imu_bias_tester_rmse']
imu_bias_tester_orientation_rmses = dataframe['imu_bias_tester_orientation_rmse']
bag_names = dataframe['Bag'].tolist()
max_name_length = 45
shortened_bag_names = [
bag_name[-1 * max_name_length:] if len(bag_name) > max_name_length else bag_name for bag_name in bag_names
]
x_axis_vals = range(len(shortened_bag_names))
rmses_2 = None
integrated_rmses_2 = None
orientation_rmses_2 = None
imu_augmented_rmses_2 = None
imu_augmented_integrated_rmses_2 = None
imu_augmented_orientation_rmses_2 = None
imu_bias_tester_rmses_2 = None
imu_bias_tester_orientation_rmses_2 = None
if (csv_file_2):
dataframe_2 = pd.read_csv(csv_file_2)
dataframe_2.sort_values(by=['Bag'], inplace=True)
# Graph rmses
rmses_2 = dataframe_2['rmse']
integrated_rmses_2 = dataframe_2['integrated_rmse']
orientation_rmses_2 = dataframe_2['orientation_rmse']
if (imu_augmented_2):
# IMU augmented rmses
imu_augmented_rmses_2 = dataframe_2['imu_augmented_rmse']
imu_augmented_integrated_rmses_2 = dataframe_2['imu_augmented_integrated_rmse']
imu_augmented_orientation_rmses_2 = dataframe_2['imu_augmented_orientation_rmse']
# IMU bias tester rmses
imu_bias_tester_rmses_2 = dataframe_2['imu_bias_tester_rmse']
imu_bias_tester_orientation_rmses_2 = dataframe_2['imu_bias_tester_orientation_rmse']
bag_names_2 = dataframe_2['Bag'].tolist()
if bag_names != bag_names_2:
print('Bag names for first and second csv file are not the same')
exit()
with PdfPages(output_file) as pdf:
rmse_plots(pdf, x_axis_vals, shortened_bag_names, rmses, integrated_rmses, orientation_rmses, '', label_1, rmses_2,
integrated_rmses_2, orientation_rmses_2, label_2)
if imu_augmented_2:
rmse_plots(pdf, x_axis_vals, shortened_bag_names, imu_augmented_rmses, imu_augmented_integrated_rmses,
imu_augmented_orientation_rmses, 'imu_augmented', label_1, imu_augmented_rmses_2,
imu_augmented_integrated_rmses_2, imu_augmented_orientation_rmses_2, label_2)
rmse_plots(pdf, x_axis_vals, shortened_bag_names, imu_bias_tester_rmses, None, imu_bias_tester_orientation_rmses,
'imu_bias_tester', label_1, imu_bias_tester_rmses_2, None, imu_bias_tester_orientation_rmses_2,
label_2)
else:
rmse_plots(pdf, x_axis_vals, shortened_bag_names, imu_augmented_rmses, imu_augmented_integrated_rmses,
imu_augmented_orientation_rmses, 'imu_augmented', label_1, rmses_2, integrated_rmses_2,
orientation_rmses_2, label_2 + ' no imu aug')
rmse_plots(pdf, x_axis_vals, shortened_bag_names, imu_bias_tester_rmses, None, imu_bias_tester_orientation_rmses,
'imu_bias_tester', label_1, imu_bias_tester_rmses_2, None, imu_bias_tester_orientation_rmses_2,
label_2)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Combined csv results, where each row is the result from a bag file
parser.add_argument('csv_file')
parser.add_argument('--output-file', default='bag_sweep_results.pdf')
parser.add_argument('--csv-file2', help='Optional second csv file to plot')
parser.add_argument('--label1', default='', help='Optional label for first csv file')
parser.add_argument('--label2', default='', help='Optional label for second csv file')
parser.add_argument('--no-imu-augmented2', dest='imu_augmented2', action='store_false')
args = parser.parse_args()
create_plot(args.output_file, args.csv_file, args.label1, args.csv_file2, args.label2, args.imu_augmented2)
|
#!/usr/bin/env python
from cStringIO import StringIO
import requests
import tarfile
import sys
install_dir = "/usr/local"
print "Downloading a CheesePi install into "+install_dir+"..."
# Location of the most recent release of the CheesePi code
code_url = "http://cheesepi.sics.se/files/cheesepi.tar.gz"
response = None
try:
response = requests.head(url=code_url)
except Exception as e:
print "Error: Could not make request to CheesePi server "+code_url+": "+str(e)
exit(1)
if response.status_code!=200:
print "Error: file %s was not available on server" % code_url
exit(1)
lastmodified = response.headers['last-modified']
#print lastmodified
# if we have downloaded since it was updated, do nothing
response = requests.get(code_url)
fd = StringIO(response.content)
tfile = tarfile.open(mode="r:gz", fileobj=fd)
try:
# should actually do this into /usr/local (or the correct cheesepi directory)
tfile.extractall(install_dir)
sys.path.append(install_dir)
import cheesepi
# record that we have just updated the code
cheesepi.config.set_last_updated()
except OSError:
print "Error: Can't untar the .tar.gz, you probably do not have permission, try sudo"
exit(1)
|
import os
from glob import glob
from monty.serialization import loadfn
from propnet import logger
from propnet.core.symbols import Symbol
# Auto loading of all allowed properties
# stores all loaded properties as PropertyMetadata instances in a dictionary,
# mapped to their names
DEFAULT_SYMBOLS = {}
_DEFAULT_SYMBOL_TYPE_FILES = glob(os.path.join(os.path.dirname(__file__),
'../symbols/**/*.yaml'),
recursive=True)
for f in _DEFAULT_SYMBOL_TYPE_FILES:
try:
symbol_type = Symbol.from_dict(loadfn(f))
DEFAULT_SYMBOLS[symbol_type.name] = symbol_type
if "{}.yaml".format(symbol_type.name) not in f:
raise ValueError('Name/filename mismatch in {}'.format(f))
except Exception as e:
logger.error('Failed to parse {}, {}.'.format(os.path.basename(f), e))
# Stores all loaded properties' names in a tuple in the global scope.
DEFAULT_UNITS = {name: symbol.units
for name, symbol in DEFAULT_SYMBOLS.items()}
DEFAULT_SYMBOL_TYPE_NAMES = tuple(DEFAULT_SYMBOLS.keys())
|
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Empire_of_Movies.settings')
import django
django.setup()
from articles.models import Movie
movie_list = Movie.objects.all()
for movie in movie_list:
movie.update()
|
# To generate password and overwrite setting manully
from notebook.auth import passwd
import os,re
print("Generating config")
config_path = os.path.expanduser('~/.jupyter/jupyter_notebook_config.py')
with open(config_path,'r') as f:
content = f.read()
if "disable_password" in os.environ and os.environ['disable_password'] == '1':
print("disabling password")
content = re.sub(r"^.*c\.NotebookApp\.password .*\n", f"#c.NotebookApp.password = u''",content,flags=re.M)
else:
print("detecting password")
if "jupyter_password" in os.environ and os.environ['jupyter_password'] != '':
print("Password found. configurating hashed password with jupyter")
hashed = passwd(os.environ['jupyter_password'])
os.environ['jupyter_password'] = "" # Reset password env for security reasons
content = re.sub(r"^.*c\.NotebookApp\.password .*\n", f"c.NotebookApp.password = u'{hashed}'\n",content,flags=re.M)
else:
print("No password is specified. Will run jupyter in token mode")
if "base_url" in os.environ:
print("Configurating base url")
base_url = os.environ['base_url']
content = re.sub(r"^.*c\.NotebookApp\.base_url .*\n", f"c.NotebookApp.base_url = {base_url}\n",content,flags=re.M)
# print(re.findall(r"^.*c\.NotebookApp\.base_url .*\n",content,flags=re.M))
with open(config_path,'w') as f:
print("Writing changes to config file")
f.write(content)
# Launch jupyter book from here to prevent a password leak
print("Attempting to launch jupyter")
try:
os.system("/opt/conda/bin/jupyter notebook --notebook-dir=/opt/notebooks --ip='0.0.0.0' --port=8888 --no-browser --allow-root")
except:
print("Unexpected error:", sys.exc_info()[0])
sys.exit("Unable to startup jupyter")
|
from back.models import ClusterLogs
def test_post_model(session):
post = ClusterLogs(DeviceID="1")
session.add(post)
session.commit()
assert post.id > 0
|
from collections import Counter
with open('./inputs/02.txt') as f:
ids = f.readlines()
def count23(id):
count = Counter(id)
exactly_two = bool({k for k, v in count.items() if v == 2})
exactly_three = bool({k for k, v in count.items() if v == 3})
return exactly_two, exactly_three
def solve(ids):
twos = 0
threes = 0
for id in ids:
two, three = count23(id)
twos += two
threes += three
return twos, threes
test = """
abcdef
bababc
abbcde
abcccd
aabcdd
abcdee
ababab
"""
test_ids = test.splitlines()
print(count23('bababc'))
a, b = solve(test_ids)
print(a, b, a*b)
a, b = solve(ids)
print(len(ids))
print(a, b, a*b)
# second part
# distance bewteen ids
def distance(id1, id2):
return sum([0 if c1 == c2 else 1 for c1, c2 in zip(id1, id2)])
print(distance("abcde", "axcye"))
# cycle over all pairs of ids
for i, id1 in enumerate(ids):
for id2 in ids[(i+1):]:
dist = distance(id1, id2)
if dist == 1:
break
if dist == 1:
break
print(id1)
print(id2)
print(''.join([c for i, c in enumerate(id1) if id2[i] == c]))
|
import time
from multiprocessing import cpu_count
import numpy as np
import pandas as pd
from mlforecast.core import TimeSeries
from mlforecast.forecast import Forecast
from sklearn.linear_model import LinearRegression
from window_ops.ewm import ewm_mean
from window_ops.expanding import expanding_mean
def main() -> None:
train = pd.read_csv('data/prepared-data-train.csv')
train['ds'] = pd.to_datetime(train['ds'])
train = train.set_index('unique_id')
ts = TimeSeries(
freq='M',
num_threads=cpu_count(),
lags=list(range(1, 4)),
lag_transforms={
i: [(expanding_mean),
(ewm_mean, 0.1),
(ewm_mean, 0.3),
(ewm_mean, 0.5),
(ewm_mean, 0.7),
(ewm_mean, 0.9)]
for i in range(1, 4)
},
date_features=['year', 'quarter', 'month']
)
start = time.time()
model = LinearRegression()
fcst = Forecast(model, ts)
fcst.fit(train)
forecasts = fcst.predict(4).rename(columns={'y_pred': 'mlforecast_lr'})
end = time.time()
print(f'Time: {end - start}')
forecasts = forecasts.reset_index()
forecasts.to_csv('data/mlforecast-forecasts.csv', index=False)
if __name__ == '__main__':
main()
|
import pytest
from asynctest import (
mock as async_mock,
TestCase as AsyncTestCase,
)
from ......messaging.request_context import RequestContext
from ......messaging.responder import MockResponder
from ......storage.error import StorageNotFoundError
from ......transport.inbound.receipt import MessageReceipt
from ...messages.presentation_request import PresentationRequest
from .. import presentation_request_handler as handler
class TestPresentationRequestHandler(AsyncTestCase):
async def test_called(self):
request_context = RequestContext()
request_context.connection_record = async_mock.MagicMock()
request_context.connection_record.connection_id = "dummy"
request_context.message_receipt = MessageReceipt()
request_context.message = PresentationRequest()
request_context.message.indy_proof_request = async_mock.MagicMock(
return_value=async_mock.MagicMock()
)
with async_mock.patch.object(
handler, "PresentationManager", autospec=True
) as mock_pres_mgr, async_mock.patch.object(
handler, "V10PresentationExchange", autospec=True
) as mock_pres_ex_rec:
mock_pres_ex_rec.retrieve_by_tag_filter = async_mock.CoroutineMock(
return_value=mock_pres_ex_rec
)
mock_pres_mgr.return_value.receive_request = async_mock.CoroutineMock(
return_value=async_mock.MagicMock()
)
mock_pres_mgr.return_value.receive_request.return_value.auto_present = False
request_context.connection_ready = True
handler_inst = handler.PresentationRequestHandler()
responder = MockResponder()
await handler_inst.handle(request_context, responder)
mock_pres_mgr.assert_called_once_with(request_context)
mock_pres_mgr.return_value.receive_request.assert_called_once_with(
mock_pres_ex_rec
)
assert not responder.messages
async def test_called_not_found(self):
request_context = RequestContext()
request_context.connection_record = async_mock.MagicMock()
request_context.connection_record.connection_id = "dummy"
request_context.message_receipt = MessageReceipt()
request_context.message = PresentationRequest()
request_context.message.indy_proof_request = async_mock.MagicMock(
return_value=async_mock.MagicMock()
)
with async_mock.patch.object(
handler, "PresentationManager", autospec=True
) as mock_pres_mgr, async_mock.patch.object(
handler, "V10PresentationExchange", autospec=True
) as mock_pres_ex_rec:
mock_pres_ex_rec.retrieve_by_tag_filter = async_mock.CoroutineMock(
side_effect=StorageNotFoundError
)
mock_pres_ex_rec.return_value = mock_pres_ex_rec
mock_pres_mgr.return_value.receive_request = async_mock.CoroutineMock(
return_value=async_mock.MagicMock()
)
mock_pres_mgr.return_value.receive_request.return_value.auto_present = False
request_context.connection_ready = True
handler_inst = handler.PresentationRequestHandler()
responder = MockResponder()
await handler_inst.handle(request_context, responder)
mock_pres_mgr.assert_called_once_with(request_context)
mock_pres_mgr.return_value.receive_request.assert_called_once_with(
mock_pres_ex_rec
)
assert not responder.messages
async def test_called_auto_present(self):
request_context = RequestContext()
request_context.connection_record = async_mock.MagicMock()
request_context.connection_record.connection_id = "dummy"
request_context.message = PresentationRequest()
request_context.message.indy_proof_request = async_mock.MagicMock(
return_value=async_mock.MagicMock()
)
request_context.message_receipt = MessageReceipt()
with async_mock.patch.object(
handler, "PresentationManager", autospec=True
) as mock_pres_mgr, async_mock.patch.object(
handler, "V10PresentationExchange", autospec=True
) as mock_pres_ex_rec, async_mock.patch.object(
handler, "BaseHolder", autospec=True
) as mock_holder:
request_context.inject = async_mock.CoroutineMock(return_value=mock_holder)
mock_pres_ex_rec.retrieve_by_tag_filter = async_mock.CoroutineMock(
return_value=mock_pres_ex_rec
)
mock_pres_mgr.return_value.receive_request = async_mock.CoroutineMock(
return_value=mock_pres_ex_rec
)
mock_pres_mgr.return_value.receive_request.return_value.auto_present = True
handler.indy_proof_request2indy_requested_creds = async_mock.CoroutineMock(
return_value=async_mock.MagicMock()
)
mock_pres_mgr.return_value.create_presentation = async_mock.CoroutineMock(
return_value=(mock_pres_ex_rec, "presentation_message")
)
request_context.connection_ready = True
handler_inst = handler.PresentationRequestHandler()
responder = MockResponder()
await handler_inst.handle(request_context, responder)
mock_pres_mgr.return_value.create_presentation.assert_called_once()
mock_pres_mgr.assert_called_once_with(request_context)
mock_pres_mgr.return_value.receive_request.assert_called_once_with(
mock_pres_ex_rec
)
messages = responder.messages
assert len(messages) == 1
(result, target) = messages[0]
assert result == "presentation_message"
assert target == {}
async def test_called_auto_present_value_error(self):
request_context = RequestContext()
request_context.connection_record = async_mock.MagicMock()
request_context.connection_record.connection_id = "dummy"
request_context.message = PresentationRequest()
request_context.message.indy_proof_request = async_mock.MagicMock(
return_value=async_mock.MagicMock()
)
request_context.message_receipt = MessageReceipt()
with async_mock.patch.object(
handler, "PresentationManager", autospec=True
) as mock_pres_mgr, async_mock.patch.object(
handler, "V10PresentationExchange", autospec=True
) as mock_pres_ex_rec, async_mock.patch.object(
handler, "BaseHolder", autospec=True
) as mock_holder:
request_context.inject = async_mock.CoroutineMock(return_value=mock_holder)
mock_pres_ex_rec.retrieve_by_tag_filter = async_mock.CoroutineMock(
return_value=mock_pres_ex_rec
)
mock_pres_mgr.return_value.receive_request = async_mock.CoroutineMock(
return_value=mock_pres_ex_rec
)
mock_pres_mgr.return_value.receive_request.return_value.auto_present = True
handler.indy_proof_request2indy_requested_creds = async_mock.CoroutineMock(
side_effect=ValueError
)
mock_pres_mgr.return_value.create_presentation = async_mock.CoroutineMock(
return_value=(mock_pres_ex_rec, "presentation_message")
)
request_context.connection_ready = True
handler_inst = handler.PresentationRequestHandler()
responder = MockResponder()
await handler_inst.handle(request_context, responder)
mock_pres_mgr.return_value.create_presentation.assert_not_called()
mock_pres_mgr.assert_called_once_with(request_context)
mock_pres_mgr.return_value.receive_request.assert_called_once_with(
mock_pres_ex_rec
)
assert not responder.messages
async def test_called_not_ready(self):
request_context = RequestContext()
request_context.message_receipt = MessageReceipt()
with async_mock.patch.object(
handler, "PresentationManager", autospec=True
) as mock_pres_mgr:
mock_pres_mgr.return_value.receive_request = async_mock.CoroutineMock()
request_context.message = PresentationRequest()
request_context.connection_ready = False
handler_inst = handler.PresentationRequestHandler()
responder = MockResponder()
with self.assertRaises(handler.HandlerException):
await handler_inst.handle(request_context, responder)
assert not responder.messages
|
import asyncio
import json
import logging
from typing import Any, Dict
from urllib.parse import unquote
from anyio.exceptions import IncompleteRead
from p2pclient import Client as P2PClient
from p2pclient.pb.p2pd_pb2 import PSMessage
from p2pclient.utils import read_pbmsg_safe
from aleph.services.ipfs.pubsub import sub as sub_ipfs
from aleph.services.utils import pubsub_msg_to_dict
from aleph.types import Protocol
LOGGER = logging.getLogger("P2P.peers")
async def handle_incoming_host(pubsub_msg: Dict[str, Any], source: Protocol = Protocol.P2P):
from aleph.model.p2p import add_peer
sender = pubsub_msg["from"]
try:
LOGGER.debug("New message received %r" % pubsub_msg)
message_data = pubsub_msg.get("data", b"").decode("utf-8")
content = json.loads(unquote(message_data))
# TODO: replace this validation by marshaling (ex: Pydantic)
peer_type = content.get("peer_type", "P2P")
if not isinstance(content["address"], str):
raise ValueError("Bad address")
if not isinstance(content["peer_type"], str):
raise ValueError("Bad peer type")
# TODO: handle interests and save it
if peer_type not in ["P2P", "HTTP", "IPFS"]:
raise ValueError("Unsupported peer type %r" % peer_type)
await add_peer(
address=content["address"],
peer_type=peer_type,
source=source,
sender=sender,
)
except Exception as e:
if isinstance(e, ValueError):
LOGGER.info("Received a bad peer info %s from %s" % (e.args[0], sender))
else:
LOGGER.exception("Exception in pubsub peers monitoring")
async def monitor_hosts_p2p(p2p_client: P2PClient, alive_topic: str) -> None:
# The communication with the P2P daemon sometimes fails repeatedly, spamming
# IncompleteRead exceptions. We still want to log these to Sentry without sending
# thousands of logs.
incomplete_read_threshold = 150
incomplete_read_counter = 0
while True:
try:
stream = await p2p_client.pubsub_subscribe(alive_topic)
while True:
pubsub_msg = PSMessage()
await read_pbmsg_safe(stream, pubsub_msg)
msg_dict = pubsub_msg_to_dict(pubsub_msg)
await handle_incoming_host(msg_dict, source=Protocol.P2P)
except IncompleteRead:
if (incomplete_read_counter % incomplete_read_threshold) == 0:
LOGGER.exception(
"Incomplete read (%d times), reconnecting. Try to restart the application.",
incomplete_read_counter,
)
incomplete_read_counter += 1
except Exception:
LOGGER.exception("Exception in pubsub peers monitoring, resubscribing")
await asyncio.sleep(2)
async def monitor_hosts_ipfs(alive_topic: str):
while True:
try:
async for mvalue in sub_ipfs(alive_topic):
await handle_incoming_host(mvalue, source=Protocol.IPFS)
except Exception:
LOGGER.exception("Exception in pubsub peers monitoring, resubscribing")
|
#!/usr/bin/python
from __future__ import unicode_literals, print_function, absolute_import
import gevent.monkey
gevent.monkey.patch_all()
import psycogreen.gevent
psycogreen.gevent.patch_psycopg()
import application
import logging
import os
import os.path
import json
import argparse
import gevent.wsgi
app = application.app
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO, format='%(asctime)-15s %(message)s')
folder = os.path.dirname(__file__)
fc = os.path.join(folder, "config.json")
with open(fc, "rb") as file_:
fc_content = file_.read().decode("utf8")
config = json.loads(fc_content)
app.configure(config)
parser = argparse.ArgumentParser(description='')
subparsers = parser.add_subparsers(dest='action')
parser_serve = subparsers.add_parser('serve', help='serves in development mode')
parser_serve.add_argument('-p', '--port', type=int, default=5000)
parser_serve.add_argument('-o', '--host', default="localhost")
def serve():
server = gevent.wsgi.WSGIServer((args.host, args.port), app.web_app)
server.serve_forever()
parser_serve.set_defaults(func=serve)
parser_createdb = subparsers.add_parser('createdb', help='creates the database according to configuration')
parser_createdb.add_argument('-d', '--dev', action="store_true", default=False)
def createdb():
with app:
app.create_tables()
parser_createdb.set_defaults(func=createdb)
args = parser.parse_args()
args.func()
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.spectral_norm import spectral_norm
from torch.nn.utils.spectral_norm import remove_spectral_norm
from utils.utils import VGG16Partial, device
class ConvBlock(nn.Module):
'''(conv => BN => LeakyReLU)'''
def __init__(self, in_ch, out_ch, stride=2, dilation=1, first=False):
super(ConvBlock, self).__init__()
if first:
self.conv = nn.Sequential(
nn.Conv2d(in_ch, out_ch, 4, stride=stride, padding=1, dilation=dilation),
nn.LeakyReLU(0.2, inplace=True)
)
else:
self.conv = nn.Sequential(
nn.Conv2d(in_ch, out_ch, 4, stride=stride, padding=1, dilation=dilation),
nn.BatchNorm2d(out_ch),
nn.LeakyReLU(0.2, inplace=True)
)
def forward(self, x):
x = self.conv(x)
return x
class VGGBlock(nn.Module):
def __init__(self, in_ch, out_ch, small=True):
super(VGGBlock, self).__init__()
if small:
self.block = nn.Sequential(nn.Conv2d(in_ch, out_ch, kernel_size=3, stride=1, padding=1),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(out_ch, out_ch, kernel_size=3, stride=1, padding=1),
nn.LeakyReLU(0.2, inplace=True),
nn.AvgPool2d(kernel_size=2, stride=2, padding=0))
else:
self.block = nn.Sequential(nn.Conv2d(in_ch, out_ch, kernel_size=3, stride=1, padding=1),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(out_ch, out_ch, kernel_size=3, stride=1, padding=1),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(out_ch, out_ch, kernel_size=3, stride=1, padding=1),
nn.LeakyReLU(0.2, inplace=True),
nn.AvgPool2d(kernel_size=2, stride=2, padding=0))
def forward(self, x):
return self.block(x)
class Discriminator(nn.Module):
def __init__(self, channels=None, dilation=None, stride=None):
super(Discriminator, self).__init__()
if channels is None:
self.net = nn.Sequential(ConvBlock(3, 32, first=True),
ConvBlock(32, 64, first=False),
ConvBlock(64, 128, first=False),
ConvBlock(128, 256, first=False))
self.outConv = nn.Conv2d(in_channels=256, out_channels=1, stride=1, kernel_size=4, padding=1)
else:
blocks = [ConvBlock(channels[i], channels[i+1],
stride=stride[i],
dilation=dilation[i],
first=False) for i in range(len(channels)-1)]
self.net = nn.Sequential(*blocks)
self.outConv = nn.Conv2d(in_channels=channels[-1], out_channels=1, stride=1, kernel_size=4, padding=1)
self.loss = nn.MSELoss()
self.sigmoid = nn.Sigmoid()
self.spectral_norm = False
def forward(self, tensorImage):
tensorImage = self.net(tensorImage)
return self.outConv(tensorImage)
def adversarialLoss(self, tensorImage, isReal):
predictions = self.forward(tensorImage)
if isReal:
labels = torch.ones_like(predictions).to(device)
else:
labels = torch.zeros_like(predictions).to(device)
return self.loss(predictions, labels)
class PerceptualDiscriminator(nn.Module):
def __init__(self):
super(PerceptualDiscriminator, self).__init__()
self.extractor = VGG16Partial().eval()
for p in self.extractor.parameters():
p.requires_grad = False
self.net = nn.Sequential(ConvBlock(256, 256, first=False),
ConvBlock(256, 256, first=False),
ConvBlock(256, 256, first=False))
self.outConv = nn.Conv2d(in_channels=256, out_channels=1, stride=1, kernel_size=4, padding=1)
self.sigmoid = nn.Sigmoid()
self.loss = nn.MSELoss()
self.spectral_norm = False
def forward(self, tensorImage):
vggFeatures = self.extractor(tensorImage)
tensorImage = self.net(vggFeatures[-1])
return self.outConv(tensorImage)
def adversarialLoss(self, tensorImage, isReal):
predictions = self.forward(tensorImage)
loss = 0
if isReal:
labels = torch.ones_like(predictions).to(device)
else:
labels = torch.zeros_like(predictions).to(device)
return self.loss(predictions, labels)
class MultiScalePerceptualDiscriminator(nn.Module):
def __init__(self):
super(MultiScalePerceptualDiscriminator, self).__init__()
self.extractor = VGG16Partial().eval()
for p in self.extractor.parameters():
p.requires_grad = False
self.ConvBlock0 = VGGBlock(3, 64)
self.ConvBlock1 = VGGBlock(128, 128)
self.ConvBlock2 = VGGBlock(256, 256, small=False)
self.localD1 = Discriminator([256, 256, 256], [1, 1], [1, 1])
self.localD2 = Discriminator([512, 256, 256], [1, 1], [2, 1])
self.Dmain = Discriminator([512, 256, 256, 256], [8, 4, 1], [1, 1, 1])
self.sigmoid = nn.Sigmoid()
self.loss = nn.MSELoss()
self.spectral_norm = False
def forward(self, tensorImage):
[vggF1, vggF2, vggF3] = self.extractor(tensorImage)
F1 = self.ConvBlock0(tensorImage)
F2 = self.ConvBlock1(torch.cat([vggF1, F1], dim=1))
F3 = self.ConvBlock2(torch.cat([vggF2, F2], dim=1))
return [self.sigmoid(self.localD1(torch.cat([vggF2, F2], dim=1))),
self.sigmoid(self.localD2(torch.cat([vggF3, F3], dim=1))),
self.sigmoid(self.Dmain(torch.cat([vggF3, F3], dim=1)))]
def adversarialLoss(self, tensorImage, isReal):
predictions = self.forward(tensorImage)
loss = 0
for pred in predictions:
if isReal:
labels = torch.ones_like(pred).to(device)
else:
labels = torch.zeros_like(pred).to(device)
loss += self.loss(pred, labels)
return loss/len(predictions)
class MultiScaleDiscriminator(nn.Module):
def __init__(self):
super(MultiScaleDiscriminator, self).__init__()
self.ConvBlock0 = VGGBlock(3, 64)
self.ConvBlock1 = VGGBlock(64, 128)
self.ConvBlock2 = VGGBlock(128, 256, small=False)
self.localD1 = Discriminator([128, 256, 256], [1, 1], [1, 1])
self.localD2 = Discriminator([256, 256, 256], [1, 1], [2, 1])
self.Dmain = Discriminator([256, 256, 256, 256], [8, 4, 1], [1, 1, 1])
self.sigmoid = nn.Sigmoid()
self.loss = nn.MSELoss()
self.spectral_norm = False
def forward(self, tensorImage):
F1 = self.ConvBlock0(tensorImage)
F2 = self.ConvBlock1(F1)
F3 = self.ConvBlock2(F2)
return [self.sigmoid(self.localD1(F2)),
self.sigmoid(self.localD2(F3)),
self.sigmoid(self.Dmain(F3))]
def adversarialLoss(self, tensorImage, isReal):
predictions = self.forward(tensorImage)
loss = 0
for pred in predictions:
if isReal:
labels = torch.ones_like(pred).to(device)
else:
labels = torch.zeros_like(pred).to(device)
loss += self.loss(pred, labels)
return loss/len(predictions)
class MPDDiscriminator(nn.Module):
def __init__(self):
super(MPDDiscriminator, self).__init__()
self.extractor = VGG16Partial().eval()
for p in self.extractor.parameters():
p.requires_grad = False
self.ConvBlock0 = VGGBlock(4, 64)
self.ConvBlock1 = VGGBlock(128, 128)
self.ConvBlock2 = VGGBlock(256, 256, small=False)
self.localD1 = Discriminator([256, 256, 256], [1, 1], [1, 1])
self.localD2 = Discriminator([512, 256, 256], [1, 1], [2, 1])
self.Dmain = Discriminator([512, 256, 256, 256], [8, 4, 1], [1, 1, 1])
self.sigmoid = nn.Sigmoid()
self.loss = nn.MSELoss()
self.spectral_norm = False
def forward(self, tensorImage, tensorDisparity):
[vggF1, vggF2, vggF3] = self.extractor(tensorImage)
F1 = self.ConvBlock0(torch.cat([tensorImage, tensorDisparity], dim=1))
F2 = self.ConvBlock1(torch.cat([vggF1, F1], dim=1))
F3 = self.ConvBlock2(torch.cat([vggF2, F2], dim=1))
return [self.sigmoid(self.localD1(torch.cat([vggF2, F2], dim=1))),
self.sigmoid(self.localD2(torch.cat([vggF3, F3], dim=1))),
self.sigmoid(self.Dmain(torch.cat([vggF3, F3], dim=1)))]
def adversarialLoss(self, tensorImage, tensorDisparity, isReal):
predictions = self.forward(tensorImage, tensorDisparity)
loss = 0
for pred in predictions:
if isReal:
labels = torch.ones_like(pred).to(device)
else:
labels = torch.zeros_like(pred).to(device)
loss += self.loss(pred, labels)
return loss/len(predictions)
|
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
import math
import re
from ml_pipeline_lch import isolate_noncategoricals
def view_dist(df, geo_columns):
'''
Plot distributions of non-categorical columns in a given dataframe
Inputs:
df: pandas dataframe
geo_columns: list of column names corresponding to columns with numeric geographical information (ex: zipcodes)
'''
non_categoricals = isolate_noncategoricals(df, ret_categoricals = False,
geo_cols = geo_columns)
df[non_categoricals].hist(bins = 50, figsize=(20,15), color = 'blue')
plt.show()
def check_corr(df, geo_columns):
'''
Display heatmap of linear correlation between non-categorical columns in a
given dataframe
Inputs:
df: pandas dataframe
geo_columns: list of column names corresponding to columns with numeric
geographical information (ex: zipcodes)
Attribution: Colormap Attribution: adapted from gradiated dataframe at
https://www.datascience.com/blog/introduction-to-correlation-learn-data-science-tutorials and correlation heatmap at https://stackoverflow.com/questions/29432629/correlation-matrix-using-pandas
'''
fig, ax = plt.subplots(figsize=(12, 12))
non_categoricals = isolate_noncategoricals(df, ret_categoricals = False,
geo_cols = geo_columns)
corr = df[non_categoricals].corr(method="pearson")
sns.heatmap(corr, mask=np.zeros_like(corr, dtype=np.bool),
cmap=plt.get_cmap("coolwarm"), square=True, ax=ax, annot=True)
ax.set_xticks(range(len(non_categoricals)))
ax.set_yticks(range(len(non_categoricals)))
ax.tick_params(direction='inout')
ax.set_xticklabels(non_categoricals, rotation=45, ha='right')
ax.set_yticklabels(non_categoricals, rotation=45, va='top')
plt.show()
def discretize_cols(df, geo_columns, num_bins):
'''
Add columns to discretize and classify non-categorical columns in a given
data frame
Inputs:
df: pandas dataframe
geo_columns: list of column names corresponding to columns with
numeric geographical information (ex: zipcodes)
num_bins: number of groups into which column values should be
discretized
'''
non_categoricals = isolate_noncategoricals(df, ret_categoricals = False,
geo_cols = geo_columns)
for col in non_categoricals:
bin_col = col + "_bin"
if col == "age":
age_bins = math.ceil((df[col].max() - df[col].min()) / 10)
df[bin_col] = pd.cut(df[col], bins = age_bins, right = False,
precision=0)
else:
try:
df[bin_col] = pd.cut(df[col], bins = num_bins, precision=0)
except:
df[bin_col] = pd.cut(df[col], bins = num_bins + 3,
precision=0, duplicates = 'drop')
def create_binary_vars(df, cols_to_dummy, keyword_list):
'''
Create columns of binary values corresponding to values above zero for
selected columns in a given dataframe based on common keywords
Inputs:
df: pandas dataframe
cols_to_dummy: (list of strings) columns in data frame to be evaluated
into dummy variables
keyword_list: (list of strings) words or phrases included in columns
to be evaluated indicating a dummy variable should be created based
on its values
'''
keyword_string = ("|").join(keyword_list)
for col in cols_to_dummy:
colname_trunc = re.sub(keyword_string, '', col)
binary_col_name = 'tf_' + colname_trunc
df[binary_col_name] = df[col].apply(lambda x: x > 0)
def plot_corr(df, geo_columns, color_category):
'''
Observe distributions and correlations of features for non-categorical
Inputs:
df: pandas dataframe
categoricals_list: list of strings corresponding to categorical columns
(ex: zip codes)
'''
non_categoricals = isolate_noncategoricals(df, ret_categoricals = False,
geo_cols = geo_columns)
plot_list = non_categoricals + [color_category]
corr = sns.pairplot(df[plot_list], hue = color_category, palette = "Set2")
def plot_relationship(df, feature_x, xlabel,feature_y, ylabel, xlimit = None,
ylimit = None, color_cat = None):
'''
Plot two features in a given data frame against each other to view
relationship and outliers.
Attribution: adapted from https://s3.amazonaws.com/assets.datacamp.com/blog_assets/Python_Seaborn_Cheat_Sheet.pdf
'''
g = sns.lmplot(x = feature_x, y = feature_y, data = df, aspect = 3,
hue = color_cat)
g = (g.set_axis_labels(xlabel,ylabel)).set(xlim = xlimit , ylim = ylimit)
plot_title = ylabel + " by " + xlabel
plt.title(plot_title)
plt.show(g)
def plot_relationship(df, feature_x, xlabel,feature_y, ylabel, xlimit = None,
ylimit = None, color_cat = None):
'''
Plot two features in a given data frame against each other to view
relationship and outliers.
Attribution: adapted from https://s3.amazonaws.com/assets.datacamp.com/blog_assets/Python_Seaborn_Cheat_Sheet.pdf
'''
sns.set_style("whitegrid")
g = sns.lmplot(x = feature_x, y = feature_y, data = df, aspect = 3,
hue = color_cat)
g = (g.set_axis_labels(xlabel,ylabel)).set(xlim = xlimit , ylim = ylimit)
plot_title = ylabel + " by " + xlabel
plt.title(plot_title)
plt.show(g)
def eval_ratios(df, include_cols, category_cols, method = "count",
pct = False):
'''
Evaluate specific features via grouping on one or more category
Inputs:
df: (dataframe) pandas dataframe
include_cols: (list of strings) column names to be aggregated or
grouped
category_cols: (list of strings) column name(s) for variable(s) used
for grouping
method: (string) groupby aggregation method for column values
Output:
ratio_df: pandas data frame of grouped data
'''
if method == "count":
ratio_df = df[include_cols].groupby(category_cols).count()
if pct:
single_col = include_cols[-1] + " Percentage"
ratio_df[single_col] = ((df[include_cols].groupby(category_cols).count() /
df[include_cols].groupby(category_cols).count().sum()) * 100)
elif method == "sum":
ratio_df = df[include_cols].groupby(category_cols).sum()
if pct:
single_col = include_cols[-1] + " Percentage"
ratio_df[single_col] = ((df[include_cols].groupby(category_cols).sum() /
df[include_cols].groupby(category_cols).sum().sum()) * 100)
return ratio_df
def feature_by_geo(df, geo, expl_var, num_var, method = "median"):
'''
Evaluate specific features by geography (ex: zip code)
Inputs:
df: (dataframe) pandas dataframe
geo: (string) column name corresponding to geography used for grouping
expl_var: (string) column name for exploratory variable used for
grouping
num_var: (string) column name for numeric variable/ feature to be
aggregated
method: (string) groupby aggregation method for column values
Output:
geo_features: pandas data frame of grouped data
'''
df_geo = df[(df[geo] != 0)]
groupby_list = [geo] + expl_var
if method == "median":
geo_features = df_geo.groupby(groupby_list)[num_var].median().unstack(level = 1)
if method == "count":
geo_features = df_geo.groupby(groupby_list)[num_var].count().unstack(level = 1)
geo_features.fillna(value = "", inplace = True)
return geo_features
|
from django.core.management.base import BaseCommand
from django.core.serializers.base import ProgressBar
from django.core.cache import caches
from evap.evaluation.models import Evaluation
from evap.results.tools import collect_results
from evap.results.views import warm_up_template_cache
class Command(BaseCommand):
args = ''
help = 'Clears the cache and pre-warms it with the results of all evaluations'
requires_migrations_checks = True
def handle(self, *args, **options):
self.stdout.write("Clearing results cache...")
caches['results'].clear()
total_count = Evaluation.objects.count()
self.stdout.write("Calculating results for all evaluations...")
self.stdout.ending = None
progress_bar = ProgressBar(self.stdout, total_count)
for counter, evaluation in enumerate(Evaluation.objects.all()):
progress_bar.update(counter + 1)
collect_results(evaluation)
self.stdout.write("Prerendering result index page...\n")
warm_up_template_cache(Evaluation.objects.filter(state='published'))
self.stdout.write("Results cache has been refreshed.\n")
|
prefixes = "JKLMNOPQ"
for letter in prefixes:
if letter[0] == "Q" or letter[0] == "O":
print(letter + "uack")
else:
print(letter + "ack")
#https://pt.stackoverflow.com/q/305011/101
|
import os
import glob
import sys
root_py_folder = "klampt"
if sys.version_info[0] == 2:
#python2 version has drifted from current Python3 version... maintain separate packages list for compatibility
root_py_folder = "python2_version/klampt"
subpackages = ['apps','io','math','model','model/create','plan','plan/kinetrajopt','sim','vis','vis/backends','vis/ipython']
else:
subpackages = ['apps','control','control/blocks','control/io','io','math','math/autodiff','model','model/create','plan','plan/kinetrajopt','sim','vis','vis/backends','vis/ipython']
#need to grab the existing C extension module .py and .so files
import site
import glob
pip_klampt_version = '0.8.5'
py_version = '%d.%d'%(sys.version_info[0],sys.version_info[1])
klampt_path = None
for path in site.getsitepackages():
if os.path.exists(os.path.join(path,'klampt')):
klampt_path = os.path.join(path,'klampt')
break
if klampt_path is None:
print("Klampt",pip_klampt_version,"wasn't installed by pip?")
exit(1)
import shutil
dontcopy = ['robotsim.py','motionplanning.py']
def docopy(fn):
basefn = fn[len('klampt/'):]
print("Copying",fn,"to",os.path.join(klampt_path,basefn))
shutil.copy(fn,os.path.join(klampt_path,basefn))
for path in [root_py_folder] + [os.path.join(root_py_folder,p) for p in subpackages]:
for fn in glob.glob(os.path.join(path,'*.py')):
if os.path.basename(fn) not in dontcopy:
docopy(fn)
for fn in glob.glob(os.path.join(root_py_folder,'data/*.*')):
if os.path.basename(fn) not in dontcopy:
docopy(fn)
print("Klampt pip install patching complete")
|
"""Extras URLs."""
# Django
from django.urls import include, path
# Django REST Framework
from rest_framework.routers import DefaultRouter
# Views
from compras import views
router = DefaultRouter()
router.register(r'compras/views/pasarela.html', views.CompraViewSet, basename='compras')
urlpatterns = [
path('', include(router.urls))
]
|
#######################################################################################################
# Python program to create long biographies from the provided .csv file #
# Code adapted from Jia Zhang #
# Written by Zainab Alasadi #
# Saturday 14th September 2019 #
#######################################################################################################
import csv
import random
import numpy as np
from random import shuffle
import locale
locale.setlocale(locale.LC_ALL, '')
VISCURCF = 122
def clearInts(row, indexStart, indexEnd):
if indexStart == indexEnd:
row[indexStart] = ""
else:
for i in range(indexStart, indexEnd):
row[i] = ""
def randomPop(array, num):
shuffle(array)
for i in range(0, len(array)):
if len(array) >= num:
array.pop()
def joinStrings(string, array):
if array is None or len(array) == 0:
return("")
elif len(array) == 1:
return(string + array[0] + ".")
elif len(array) == 2:
return(string + array[0] + ' and ' + array[1] + ".")
return(string + ', '.join(array[:-1]) + ' and ' + array[-1] + ".")
def homeReturn(row, index, count):
home = int([row[index]][0])
if home == 10:
if count == 0:
return("I'm travelling")
elif count >= 1:
return("travel")
elif home == 11:
if count == 0:
return("of work")
elif count >= 1:
return("work")
elif home == 12:
if count == 0:
return("I'm house sitting")
elif count >= 1:
return("house sitting")
elif home == 14:
if count == 0:
return("I recently moved")
elif count >= 1:
return("moving homes")
elif home == 15:
if count == 0:
return("I'm renovating")
elif count >= 1:
return("renovations")
elif home == 16:
if count == 0:
return("the tight housing market")
elif count >= 1:
return("tight housing market")
elif home == 17:
if count == 0:
return("of domestic violence")
elif count >= 1:
return("domestic violence")
elif home == 18:
if count == 0:
return("of alcohol and drugs")
elif count >= 1:
return("alcohol and drugs")
elif home == 19:
if count == 0:
return("of family problems")
elif count >= 1:
return("family problems")
elif home == 20:
if count == 0:
return("of financial problems")
elif count >= 1:
return("financial problems")
elif home == 21:
if count == 0:
return("of mental illness")
elif count >= 1:
return("mental illness")
elif home == 22:
if count == 0:
return("I lost my job")
elif count >= 1:
return("unemployment")
elif home == 23:
if count == 0:
return("of gambling")
elif count >= 1:
return("gambling")
elif home == 24:
if count == 0:
return("of eviction")
elif count >= 1:
return("eviction")
elif home == 25:
if count == 0:
return("of natural disaster")
elif count >= 1:
return("natural disaster")
else:
return("")
# Reasons for ever being without a permanent place to live - ALLHOM
def withoutHome(row, index):
homeless = int([row[index]][0])
if homeless == 98:
row[index] = "I've never been homeless."
else:
a = []
count = 0
# generate an array of problems
for i in range(1, 17):
addOn = homeReturn(row, index + i, count)
if len(addOn) != 0:
a.append(addOn)
count = count + 1
# add problems to base sentence
if len(a) != 0:
if len(a) >= 5:
randomPop(a, 5)
row[index] = joinStrings("I don't have a permanent place to live because ", a)
else:
row[index] = "I don't have a permanent place to live."
clearInts(row, index + 1, index + 17)
# Victim of physical or threatened violence in last 12 months - ASSAULT
def assault(row, index):
assault = int([row[index]][0])
if assault == 1:
row[index] = "I'm a victim of violence."
else:
row[index] = ""
# Perceived level of difficulty with transport - ATRQ01CF
def transportDifficulty(row, index):
transport = int([row[index]][0])
if transport == 3:
row[index] = "I have difficulty travelling where I live."
elif transport == 4:
row[index] = "I'm housebound."
else:
row[index] = ""
# Family composition of household - FAMCOMB
def familyComp(row, index):
family = int([row[index]][0])
clearInts(row, index, index)
if family == 1:
return("my partner and children.")
elif family == 2:
return("my children.")
elif family == 3:
return("my partner.")
elif family == 4:
return("my extended family.")
elif family == 5:
return("multiple families.")
elif family == 6:
return("alone")
elif family == 7:
return("my friends.")
# Number of bedrooms - BEDCURF
def numBedrooms(row, index):
bedroom = int([row[index]][0])
family = str(familyComp(row, 43))
if bedroom == 1:
if len(family) == 0:
row[index] = "I live in a 1 bedroom home."
elif family == "alone":
row[index] = "I live alone in a 1 bedroom home."
else:
row[index] = "I live in a 1 bedroom home with " + family
elif bedroom == 2:
if len(family) == 0:
row[index] = "I live in a 2 bedroom home."
elif family == "alone":
row[index] = "I live alone in a 2 bedroom home."
else:
row[index] = "I live in a 2 bedroom home with " + family
elif bedroom == 3:
if len(family) == 0:
row[index] = "I live in a 3 bedroom home."
elif family == "alone":
row[index] = "I live alone in a 3 bedroom home."
else:
row[index] = "I live in a 3 bedroom home with " + family
elif bedroom == 4:
if len(family) == 0:
row[index] = "I live in a 4 bedroom home."
elif family == "alone":
row[index] = "I live alone in a 4 bedroom home."
else:
row[index] = "I live in a 4 bedroom home with " + family
elif bedroom == 5:
if len(family) == 0:
row[index] = "I live in a 5+ bedroom home."
elif family == "alone":
row[index] = "I live alone in a 5+ bedroom home."
else:
row[index] = "I live in a 5+ bedroom home with " + family
else:
row[index] = ""
def cashReturn(row, index):
cash = int([row[index]][0])
if cash == 1:
return("basic bills")
elif cash == 2:
return("my mortage")
elif cash == 3:
return("insurance")
elif cash == 4:
return("my credit card")
elif cash == 6:
return("meals")
else:
return("")
# Type(s) of cash flow problem - CASHFLT
def cashProblems(row, index):
cash = int([row[index]][0])
if cash == 0 or cash == 11:
row[index] = ""
elif cash == 10:
row[index] = "I've never had any money problems."
else:
a = []
# generate an array of problems
for i in range(1, 11):
addOn = cashReturn(row, index + i)
if len(addOn) != 0:
a.append(addOn)
# add problems to base sentence
if len(a) != 0:
if len(a) >= 5:
randomPop(a, 5)
row[index] = joinStrings("I've had difficulty paying for ", a)
else:
row[index] = "I've had difficulty paying for my everyday living."
clearInts(row, index + 1, index + 11)
# Country of birth - COBBC
def birthCountry(row, index):
birth = int([row[index]][0])
if birth == 1:
row[index] = "I was born in Australia."
elif birth == 3:
row[index] = "I wasn't born in an English-speaking country."
else:
row[index] = ""
# Whether used a computer at home in last 12 months - COMHOM
def computerUse(row, index):
computer = int([row[index]][0])
if computer == 3:
row[index] = "I don't have a computer at home."
else:
row[index] = ""
# Value of consumer debt - COTVALCF
def debtValue(row, index):
debt = int([row[index]][0])
if debt == 0:
row[index] = "I don't have any consumer debt."
elif debt == 2:
row[index] = "I have $" + str(f'{random.randint(5000, 9999):n}') + " in consumer debt."
elif debt == 3:
row[index] = "I have $" + str(f'{random.randint(10000, 49999):n}') + " in consumer debt."
elif debt == 4:
row[index] = "I have more than $50K consumer debt."
else:
row[index] = ""
def disReturn(row, index):
dis = int([row[index]][0])
if dis == 1:
return("sensory")
elif dis == 2:
return("physical")
elif dis == 3:
return("intellectual")
elif dis == 4:
return("psychological")
else:
return("")
# Disability type - DISTYPC
def disabilityType(row, index):
dis = int([row[index]][0])
if dis == 0:
row[index] = ""
elif dis == 6:
row[index] = "I don't have any disabilities."
else:
a = []
string = "I have "
# generate an array of disabilities
for i in range(1, 5):
addOn = disReturn(row, index + i)
if len(addOn) != 0:
a.append(addOn)
# add disabilities to base sentence
if a is None or len(a) == 0:
sentence = string + "a " + "disability."
elif len(a) == 1:
sentence = string + "a " + a[0] + " disability."
else:
a[-1] = a[-1] + " disabilities"
sentence = joinStrings(string, a)
row[index] = sentence
clearInts(row, index + 1, index + 5)
# Dwelling structure - DWSTBC
def dwellingType(row, index):
dwell = int([row[index]][0])
if dwell == 1:
row[index] = "I live in a house."
elif dwell == 3:
row[index] = "I live in a flat."
else:
row[index] = ""
# Main field of highest educational attainment - EDFIECF
def fieldEducation(row, index):
field = int([row[index]][0])
clearInts(row, index, index)
if field == 1:
return("sciences.")
elif field == 2:
return("IT.")
elif field == 3:
return("engineering.")
elif field == 4:
return("architecture.")
elif field == 5:
return("environmental studies.")
elif field == 6:
return("health.")
elif field == 7:
return("education.")
elif field == 8:
return("commerce.")
elif field == 9:
return("society and culture.")
elif field == 10:
return("creative arts.")
elif field == 11:
return("hospitality.")
else:
return("")
# Main reason did not study although wanted to - MRDSTU
def whyStopStudy(row, index):
stop = int([row[index]][0])
clearInts(row, index, index)
if stop == 17:
return("I have a disability.")
elif stop == 18:
return("I had to care for my family.")
elif stop == 19:
return("I had no time.")
elif stop == 20:
return("of financial reasons.")
elif stop == 23:
return("I lacked the basic skills for further study.")
elif stop == 24:
return("of discrimination that I've experienced.")
elif stop == 97:
return("I didn't like school.")
else:
return("")
# Highest educational attainment - EDATTBC
def educationHighest(row, index):
edu = int([row[index]][0])
field = fieldEducation(row, 41)
whyStop = whyStopStudy(row, 60)
if edu == 1:
if len(field) != 0:
row[index] = "I have a Postgraduate degree in " + fieldEducation(row, index)
else:
row[index] = "I have a Postgraduate degree."
elif edu == 2:
if len(field) != 0:
row[index] = "I have a Bachelor's degree in " + fieldEducation(row, index)
else:
row[index] = "I have a Bachelor's degree."
elif edu == 3 or edu == 4 or edu == 5 or edu == 6:
if len(field) != 0:
row[index] = "I have a certificate in " + fieldEducation(row, index)
else:
row[index] = "I have a professional certification."
elif edu == 7:
if len(whyStop) == 0:
row[index] = "I completed up to year 12."
else:
row[index] = "I completed up to year 12." + " I didn't continue studying because " + whyStop
elif edu == 8:
if len(whyStop) == 0:
row[index] = "I completed up to year 11."
else:
row[index] = "I completed up to year 11." + " I didn't finish studying because " + whyStop
elif edu == 9:
if len(whyStop) == 0:
row[index] = "I completed up to year 10."
else:
row[index] = "I completed up to year 10." + " I didn't finish studying because " + whyStop
elif edu == 10:
if len(whyStop) == 0:
row[index] = "I completed up to year 9."
else:
row[index] = "I completed up to year 9." + " I didn't finish studying because " + whyStop
elif edu == 11:
if len(whyStop) == 0:
row[index] = "I didn't finish primary school."
else:
row[index] = "I didn't finish primary school because " + whyStop
else:
row[index] = ""
# Whether ever experienced homelessness - EVRHMLES
def homelessness(row, index):
home = int([row[index]][0])
if home == 1:
row[index] = ("I've experienced homelessness.")
else:
row[index] = ""
# Full-time/part-time study - FPTSTUDY
def studyStatus(row, index):
study = int([row[index]][0])
if study == 1:
row[index] = "I'm a full-time student."
elif study == 2:
row[index] = "I'm a part-time student."
else:
row[index] = ""
# Full-time/part-time status - FPTSTA
def workStatus(row, index):
work = int([row[index]][0])
if work == 1:
row[index] = "I work full-time."
elif work == 2:
row[index] = "I work part-time."
elif work == 3 or work == 4:
row[index] = "I'm currently looking for work."
else:
row[index] = ""
# Frequency of voluntary work for organisation - FREQVORG
def volunteer(row, index):
volunteer = int([row[index]][0])
if volunteer == 1:
row[index] = "I volunteer at least once a week."
elif volunteer == 2:
row[index] = "I volunteer at least once fortnight."
elif volunteer == 3:
row[index] = "I volunteer whenever I can, at least once every month."
else:
row[index] = ""
# Frequency in experiencing difficulty in paying bills - FSRQ03
def billPaying(row, index):
bill = int([row[index]][0])
if bill == 1 or bill == 2:
row[index] = "I have little difficulty paying bills."
elif bill == 3 or bill == 4:
row[index] = "I've had some difficulties paying bills this year."
elif bill == 5 or bill == 6:
row[index] = "I've had a lot of difficulties paying bills this year."
else:
row[index] = ""
def homelessReturn(row, index):
home = int([row[index]][0])
if home == 10:
return("stayed with relatives")
elif home == 11:
return("stayed at a friend's house")
elif home == 12:
return("stayed at a caravan park")
elif home == 13:
return("stayed at a hostel")
elif home == 14:
return("stayed in a night shelter")
elif home == 15:
return("stayed in a homeless shelter")
elif home == 16:
return("stayed at a refuge")
elif home == 17:
return("squatted in an abandoned building")
elif home == 18:
return("slept rough")
else:
return("")
# All situations ever experienced because did not have a permanent place to live - HOMQ01
def homelessExperience(row, index):
home = int([row[index]][0])
if home == 0 or home == 20 or home == 19:
row[index] = ""
else:
a = []
# generate an array of problems
for i in range(1, 10):
addOn = homelessReturn(row, index + i)
if len(addOn) != 0:
a.append(addOn)
# add problems to base sentence
if len(a) != 0:
if len(a) >= 4:
randomPop(a, 4)
row[index] = joinStrings("When I was homeless, I ", a)
else:
row[index] = ""
clearInts(row, index + 1, index + 10)
# Hours usually worked in all jobs - HRSWKBC
def hoursWork(row, index):
hours = int([row[index]][0])
if hours == 1:
row[index] = "I work " + str(random.randint(1, 15)) + " hours a week."
elif hours == 2:
row[index] = "I work " + str(random.randint(16, 24)) + " hours a week."
elif hours == 3:
row[index] = "I work " + str(random.randint(25, 34)) + " hours a week."
elif hours == 4:
row[index] = "I work " + str(random.randint(35, 39)) + " hours a week."
elif hours == 5:
row[index] = "I work 40 hours a week."
elif hours == 6:
row[index] = "I work " + str(random.randint(41, 49)) + " hours a week."
elif hours == 7:
row[index] = "I work at least 50 hours a week."
else:
row[index] = ""
# Acceptance of different cultures - LEVTOL
def cultureAccept(row, index):
culture = int([row[index]][0])
if culture == 4 or culture == 5:
row[index] = "I find it difficult to accept other cultures other than my own."
elif culture == 1:
row[index] = "I embrace cultures outside my own."
else:
row[index] = ""
# Multiple job holder - MULTIJOB
def multipleJobs(row, index):
multWork = int([row[index]][0])
if multWork == 1:
row[index] = "I have multiple jobs."
else:
row[index] = ""
# Occupation in main job - OCCBC
def mainOccupation(row, index):
occup = int([row[index]][0])
if occup == 1:
row[index] = "I'm a manager."
elif occup == 4:
row[index] = "I'm a social worker."
elif occup == 5:
row[index] = "I'm an administrative worker."
elif occup == 6:
row[index] = "I'm a salesperson."
elif occup == 8:
row[index] = "I'm a labourer."
else:
row[index] = ""
# Overall Life Satisfaction - OLSQ01
def lifeSatisfaction(row, index):
life = int([row[index]][0])
if life == 1 or life == 2:
row[index] = "I'm happy with my life."
elif life == 3:
row[index] = "I'm mostly satisfied with my life."
elif life == 7 or life == 6:
row[index] = "I hate my life."
else:
row[index] = ""
# Frequency of telephone email and mail contact with family or friends - OTHRCON
def familyContact(row, index):
contact = int([row[index]][0])
if contact == 1:
row[index] = "I contact my family and friends a few times a day."
elif contact == 2:
row[index] = "I contact my family and friends everyday."
elif contact == 3 or contact == 4:
row[index] = "I contact my family and friends every week."
elif contact == 5 or contact == 6:
row[index] = "I contact my family and friends every year."
elif contact == 7:
row[index] = "I don't have contact with my family and friends."
elif contact == 8:
row[index] = "I don't have any living family and friends."
else:
row[index] = ""
# Registered marital status - REGMAR
def maritalStatus(row, index):
marry = int([row[index]][0])
if marry == 1:
row[index] = "I've never been married."
elif marry == 2:
row[index] = "I'm a widow."
elif marry == 3:
row[index] = "I'm divorced."
elif marry == 4:
row[index] = "I'm separated from my spouse."
elif marry == 5:
row[index] = "I'm married."
else:
row[index] = ""
# Weekly rent payments - RENTBCF
def rent(row, index):
rent = int([row[index]][0])
if rent == 1:
row[index] = "I pay less than $60 rent a week."
elif rent == 2:
row[index] = "I pay $" + str(random.randint(60, 99)) + " rent a week."
elif rent == 3:
row[index] = "I pay $" + str(random.randint(100, 149)) + " rent a week."
elif rent == 4:
row[index] = "I pay $" + str(random.randint(150, 199)) + " rent a week."
elif rent == 5:
row[index] = "I pay $" + str(random.randint(200, 249)) + " rent a week."
elif rent == 6:
row[index] = "I pay $" + str(random.randint(250, 399)) + " rent a week."
elif rent == 7:
row[index] = "I pay $" + str(random.randint(300, 349)) + " rent a week."
elif rent == 8:
row[index] = "I pay $" + str(random.randint(350, 399)) + " rent a week."
elif rent == 9:
row[index] = "I pay $" + str(random.randint(400, 449)) + " rent a week."
elif rent == 10:
row[index] = "I pay more than $500 rent a week."
else:
row[index] = ""
# Retirement status - RETSTACF
def retireStatus(row, index):
retired = int([row[index]][0])
if retired == 4:
row[index] = "I'm retired."
elif retired == 5:
row[index] = "I've never worked for more than 2 weeks in my life."
else:
row[index] = ""
# Feelings of safety at home alone during day - SAFEQ01
def safeDay(row, index):
safe = int([row[index]][0])
if safe == 4 or safe == 5:
row[index] = "I don't feel safe at home during the day."
elif safe == 6:
row[index] = "I'm never home during the day."
else:
row[index] = ""
# Feelings of safety at home alone after dark - SAFEQ02
def safeNight(row, index):
safe = int([row[index]][0])
if safe == 4 or safe == 5:
row[index] = "I don't feel safe at home at night."
elif safe == 6:
row[index] = "I'm never home during the night."
else:
row[index] = ""
# Feelings of safety walking alone in local area after dark - SAFEQ03
def safeWalkNight(row, index):
safe = int([row[index]][0])
if safe == 4:
row[index] = "I fear for my safety when walking home at night."
elif safe == 5:
row[index] = "I feel very unsafe when walking home at night."
elif safe == 6:
row[index] = "I'm never home during the night."
else:
row[index] = ""
def serviceReturn(row, index):
service = int([row[index]][0])
if service == 12:
return("disability services")
elif service == 13:
return("dental services")
elif service == 14:
return("doctors")
elif service == 16:
return("hospitals")
elif service == 15:
return("employment services")
elif service == 17:
return("legal services")
elif service == 18:
return("mental health services")
else:
return("")
# Services had difficulty accessing - SERDIFF
def serviceAccess(row, index):
service = int([row[index]][0])
a = []
# generate an array of service
for i in range(1, 11):
addOn = serviceReturn(row, index + i)
if len(addOn) != 0:
a.append(addOn)
# add service to base sentence
if len(a) != 0:
if len(a) >= 4:
randomPop(a, 4)
row[index] = joinStrings("I have difficulty accessing ", a)
else:
row[index] = "I have difficulty accessing basic services."
clearInts(row, index + 1, index + 11)
# Whether provided unpaid care help - SOHQ01A
def unpaidCarer(row, index):
carer = int([row[index]][0])
if carer == 1:
row[index] = "I'm an unpaid carer."
else:
row[index] = ""
# Delayed medical consultation because could not afford it - SPHQ02
def medicalAfford(row, index):
med = int([row[index]][0])
if med == 1:
row[index] = "I have trouble paying for healthcare."
else:
row[index] = ""
# Proficiency in spoken English - SPOKENG
def englishProf(row, index):
english = int([row[index]][0])
if english == 3:
row[index] = "My English is poor."
elif english == 4:
row[index] = "I don't speak any English."
else:
row[index] = ""
# State or Territory of residence - STATEUR
def stateReside(row, index):
state = int([row[index]][0])
if state == 1:
row[index] = "I live in New South Wales."
elif state == 2:
row[index] = "I live in Victoria."
elif state == 3:
row[index] = "I live in Queensland."
elif state == 4:
row[index] = "I live in South Australia."
elif state == 5:
row[index] = "I live in Western Australia."
elif state == 6:
row[index] = "I live in Tasmania."
elif state == 7:
row[index] = "I live in Northern Territory."
elif state == 8:
row[index] = "I live in Canberra."
else:
row[index] = ""
def stressReturn(row, index):
stress = [row[index]][0]
if stress == 10:
return("a recent divorce")
elif stress == 11:
return("a recent death")
elif stress == 12:
return("an illness")
elif stress == 13:
return("a serious accident")
elif stress == 14:
return("alcohol and drug")
elif stress == 15:
return("mental illness")
elif stress == 16:
return("my disability")
elif stress == 17:
return("unemployment")
elif stress == 18:
return("involuntary redundancy")
elif stress == 19:
return("witnessing a violence")
elif stress == 20:
return("abuse")
elif stress == 21:
return("trouble with the police")
elif stress == 22:
return("a gambling problem")
elif stress == 23:
return("discrimination")
else:
return("")
# Personal stressors experienced in last 12 months - STRESS
def stress(row, index):
stress = int([row[index]][0])
if stress == 25:
row[index] = "I haven't been stressed in the past year."
else:
a = []
# generate an array of stresser
for i in range(1, 16):
addOn = stressReturn(row, index + i)
if len(addOn) != 0:
a.append(addOn)
# add stresser to base sentence
if len(a) != 0:
if len(a) >= 5:
randomPop(a, 5)
row[index] = joinStrings("I have stress from ", a)
else:
row[index] = ""
clearInts(row, index + 1, index + 16)
# Government support in last 2 years - TIMEGVBC
def govSupport(row, index):
support = int([row[index]][0])
if support == 4:
row[index] = "I've been on government support for " + str(random.randint(9, 11)) + " months."
elif support == 5:
row[index] = "I've been on government support for " + str(random.randint(12, 17)) + " months."
elif support == 6:
row[index] = "I've been on government support for " + str(random.randint(18, 23)) + " months."
elif support == 7:
row[index] = "I've been on government support for the past 2 years."
else:
row[index] = ""
# Time travel work daily - TRAVEL
def travelWork(row, index):
travel = int([row[index]][0])
if travel == 1:
row[index] = "I live 10 min away from work."
elif travel == 2:
row[index] = "My commute is " + str(random.randint(11, 29)) + " min long."
elif travel == 3:
row[index] = "My commute is " + str(random.randint(30, 59)) + " min long."
elif travel == 4:
row[index] = "My commute is several hours long."
elif travel == 6:
row[index] = "I work from home."
else:
row[index] = ""
# Level of trust in police in local area - TRSQ04
def trustPolice(row, index):
trust = int([row[index]][0])
if trust == 1:
row[index] = "I trust the police in my area."
elif trust == 5 or trust == 4:
row[index] = "I don't trust the police in my area."
else:
row[index] = ""
# TYPORG
def volReturn(row, index):
volunteer = int([row[index]][0])
if volunteer == 10:
return("heritage services")
elif volunteer == 11:
return("unions")
elif volunteer == 12:
return("welfare")
elif volunteer == 13:
return("education services")
elif volunteer == 14:
return("youth services")
elif volunteer == 15:
return("emergency services")
elif volunteer == 16:
return("environmental organisations")
elif volunteer == 17:
return("animal welfare")
elif volunteer == 18:
return("international aid")
elif volunteer == 19:
return("health services")
elif volunteer == 20:
return("justice")
elif volunteer == 21:
return("religious organisations")
elif volunteer == 22:
return("sports")
elif volunteer == 24:
return("ethnic groups")
else:
return("")
# Orgs volunteered for in last 12 months - TYPORG
def typeVolunteer(row, index):
volunteer = int([row[index]][0])
if volunteer == 25 or volunteer == 23:
row[index] = "I volunteer in my free time."
elif volunteer == 26:
row[index] = ""
else:
a = []
# generate an array of volunteer organisations
for i in range(1, 16):
addOn = volReturn(row, index + i)
if len(addOn) != 0:
a.append(addOn)
# add volunteer organisations to base sentence
if len(a) != 0:
if len(a) >= 4:
randomPop(a, 4)
row[index] = joinStrings("I volunteer in ", a)
else:
row[index] = ""
clearInts(row, index + 1, index + 16)
# Home broken into in past 12 months - VICTIM
def breakInVictim(row, index):
breakIn = int([row[index]][0])
if breakIn == 1:
row[index] = "My home was broken into this year."
else:
row[index] = ""
# Visa type - VISCURCF
def visaType(row, index):
visa = [row[index]][0]
clearInts(row, index, index)
if visa == 1:
return("permanent visa.")
elif visa == 2:
return("temporary visa.")
else:
return("")
# Year arrived in Australia - YRARRBC
def yearArrived(row, index):
year = int([row[index]][0])
visa = visaType(row, VISCURCF)
if year == 1:
row[index] = "I was born in Australia."
elif year == 2:
row[index] = "I moved to Australia before 1990."
elif year == 3:
if len(visa) == 0:
row[index] = "I came to Australia in " + str(random.randint(1991, 1995)) + "."
else:
row[index] = "I came to Australia in " + str(random.randint(1991, 1995)) + \
" with a " + visaType(row, index)
elif year == 4:
row[index] = "I moved to Australia in " + str(random.randint(1996, 2000)) + "."
elif year == 5:
row[index] = "I arrived in Australia in " + str(random.randint(2001, 2005)) + "."
elif year == 6:
if len(visa) == 0:
row[index] = "I came to Australia in " + str(random.randint(2006, 2010)) + "."
else:
row[index] = "I came to Australia in " + str(random.randint(2006, 2010)) + \
" with a " + visa
else:
row[index] = ""
# Define headers in input .csv file
headers = ["ALLHOMA", "ALLHOMB", "ALLHOMC", "ALLHOMD", "ALLHOME", "ALLHOMF", "ALLHOMG",
"ALLHOMH", "ALLHOMI", "ALLHOMJ", "ALLHOMK", "ALLHOML", "ALLHOMM", "ALLHOMN", "ALLHOMO",
"ALLHOMP", "ALLHOMQ", "ASSAULT", "ATRQ01CF", "BEDCURF", "CASHFLTA", "CASHFLTB", "CASHFLTC",
"CASHFLTD", "CASHFLTE", "CASHFLTF", "CASHFLTG", "CASHFLTH", "CASHFLTI", "CASHFLTJ", "CASHFLTK",
"COBBC", "COMHOM", "COTVALCF", "DISTYPCA", "DISTYPCB", "DISTYPCC", "DISTYPCD", "DISTYPCE",
"DWSTBC", "EDATTBC", "EDFIECF", "EVRHMLES", "FAMCOMB", "FPTSTA", "FPTSTUDY", "FREQVORG",
"FSRQ03", "HOMQ01A", "HOMQ01B", "HOMQ01C", "HOMQ01D", "HOMQ01E", "HOMQ01F",
"HOMQ01G", "HOMQ01H", "HOMQ01I", "HOMQ01J", "HRSWKBC", "LEVTOL", "MRDSTU",
"MULTIJOB", "OCCBC", "OLSQ01", "OTHRCON", "REGMAR", "RENTBCF", "RETSTACF", "SAFEQ01", "SAFEQ02",
"SAFEQ03", "SERDIFFA", "SERDIFFB", "SERDIFFC", "SERDIFFD", "SERDIFFE", "SERDIFFF", "SERDIFFG",
"SERDIFFH", "SERDIFFI", "SERDIFFJ", "SERDIFFK", "SOHQ01A", "SPHQ02", "SPOKENG", "STATEUR",
"STRESSA", "STRESSB", "STRESSC", "STRESSD", "STRESSE", "STRESSF", "STRESSG", "STRESSH",
"STRESSI", "STRESSJ", "STRESSK", "STRESSL", "STRESSM", "STRESSN", "STRESSO", "STRESSOP",
"TIMEGVBC", "TRAVEL", "TRSQ04", "TYPORGA", "TYPORGB", "TYPORGC", "TYPORGD", "TYPORGE",
"TYPORGF", "TYPORGG", "TYPORGH", "TYPORGI", "TYPORGJ", "TYPORGK", "TYPORGL", "TYPORGM",
"TYPORGN", "TYPORGO", "TYPORGP", "VICTIM", "VISCURCF", "YRARRBC"]
# Method to derive row of headers
# Returns row of headers
def dataHeaders():
# open file, read binary
# was rb
with open('input.csv', 'r') as csvfile:
spamreader = csv.reader(csvfile)
for row in spamreader:
return row
# Method to get header indexes
# Returns int respresenting index
def getHeaderIndex():
headerDictionary = {}
indexList = []
for header in headers:
# print(header)
headerIndex = dataHeaders().index(header)
headerDictionary[header] = headerIndex
indexList.append(headerIndex)
# return headerDictionary
return indexList
# Method to reduce the csv by column of needed headers
# Returns none
def reduceDataByColumn(infile, outfile):
print("Reducing to useful columns...")
indexList = getHeaderIndex()
reducedRowsList = []
# open output file for binary writing
with open(outfile, 'w') as outputFile:
spamwriter = csv.writer(outputFile)
# open infile for binary reading
with open(infile, 'r') as csvfile:
spamreader = csv.reader(csvfile)
# headerDictionary = replaceHeaderCodes()
rowsDone = 0
for row in spamreader:
reducedRow = []
for index in indexList:
reducedRow.append(row[index])
if reducedRow in reducedRowsList:
print("Dupilicate row")
else:
spamwriter.writerow(reducedRow)
# print(reducedRow)
# Method to translate ints to sentences
# Returns none
def fillInData(infile, outfile):
print("Filling in data ...")
rowsDone = 0
# open outfile for binary writing
with open(outfile, 'w') as outputfile:
w = csv.writer(outputfile)
# open infile for binary reading
with open(infile, 'r') as datafile:
r = csv.reader(datafile)
# headers = r.next()
headers = next(r)
print(headers)
#print(currentIndex)
for row in r:
rowsDone += 1
print("Computing row " + str(f'{rowsDone:n}') + "...")
if rowsDone == 28404:
#28404
print("Computation complete.")
break
for i in headers:
currentIndex = headers.index(i)
if i == "ALLHOMA":
withoutHome(row, currentIndex)
elif i == "ASSAULT":
assault(row, currentIndex)
elif i == "ATRQ01CF":
transportDifficulty(row, currentIndex)
elif i == "BEDCURF":
numBedrooms(row, currentIndex)
elif i == "CASHFLTA":
cashProblems(row, currentIndex)
elif i == "COBBC":
birthCountry(row, currentIndex)
elif i == "COMHOM":
computerUse(row, currentIndex)
elif i == "COTVALCF":
debtValue(row, currentIndex)
elif i == "DISTYPCA":
disabilityType(row, currentIndex)
elif i == "DWSTBC":
dwellingType(row, currentIndex)
elif i == "EDATTBC":
educationHighest(row, currentIndex)
elif i == "EVRHMLES":
homelessness(row, currentIndex)
elif i == "FPTSTA":
workStatus(row, currentIndex)
elif i == "FPTSTUDY":
studyStatus(row, currentIndex)
elif i == "FREQVORG":
volunteer(row, currentIndex)
elif i == "FSRQ03":
billPaying(row, currentIndex)
elif i == "HOMQ01A":
homelessExperience(row, currentIndex)
elif i == "HRSWKBC":
hoursWork(row, currentIndex)
elif i == "LEVTOL":
cultureAccept(row, currentIndex)
elif i == "MULTIJOB":
multipleJobs(row, currentIndex)
elif i == "OCCBC":
mainOccupation(row, currentIndex)
elif i == "OLSQ01":
lifeSatisfaction(row, currentIndex)
elif i == "OTHRCON":
familyContact(row, currentIndex)
elif i == "REGMAR":
maritalStatus(row, currentIndex)
elif i == "RENTBCF":
rent(row, currentIndex)
elif i == "RETSTACF":
retireStatus(row, currentIndex)
elif i == "SAFEQ01":
safeDay(row, currentIndex)
elif i == "SAFEQ02":
safeNight(row, currentIndex)
elif i == "SAFEQ03":
safeWalkNight(row, currentIndex)
elif i == "SERDIFFA":
serviceAccess(row, currentIndex)
elif i == "SOHQ01A":
unpaidCarer(row, currentIndex)
elif i == "SPHQ02":
medicalAfford(row, currentIndex)
elif i == "SPOKENG":
englishProf(row, currentIndex)
elif i == "STATEUR":
stateReside(row, currentIndex)
elif i == "STRESSA":
stress(row, currentIndex)
elif i == "TIMEGVBC":
govSupport(row, currentIndex)
elif i == "TRAVEL":
travelWork(row, currentIndex)
elif i == "TRSQ04":
trustPolice(row, currentIndex)
elif i == "TYPORGA":
typeVolunteer(row, currentIndex)
elif i == "VICTIM":
breakInVictim(row, currentIndex)
elif i == "VISCURCF":
visaType(row, currentIndex)
elif i == "YRARRBC":
yearArrived(row, currentIndex)
w.writerow(row)
states = ['input']
fileRoot = ''
for i in range(len(states)):
print(i)
infile = fileRoot + states[i] + ".csv"
outfile = fileRoot + states[i] + "_out.csv"
outfile2 = fileRoot + states[i] + "_filledin.csv"
print(infile, outfile, outfile2)
reduceDataByColumn(infile, outfile)
fillInData(outfile, outfile2)
|
import torch
import numpy as np
import torch.nn as nn
from torch.nn import init
import torch.optim as optim
import os
from torch.autograd import Variable
from torch.nn.functional import conv1d
from scipy import signal
import torch.nn.functional as F
import pdb
class ordLoss(nn.Module):
"""
Ordinal loss is defined as the average of pixelwise ordinal loss F(h, w, X, O)
over the entire image domain:
"""
def __init__(self):
super(ordLoss, self).__init__()
self.loss = 0.0
def forward(self, orig_ord_labels, orig_target):
"""
:param ord_labels: ordinal labels for each position of Image I.
:param target: the ground_truth discreted using SID strategy.
:return: ordinal loss
"""
device = orig_ord_labels.device
ord_labels = orig_ord_labels.clone()
# ord_labels = ord_labels.unsqueeze(0)
ord_labels = torch.transpose(ord_labels, 1, 2)
N, C, W = ord_labels.size()
ord_num = C
self.loss = 0.0
# faster version
if torch.cuda.is_available():
K = torch.zeros((N, C, W), dtype=torch.int).to(device)
for i in range(ord_num):
K[:, i, :] = K[:, i, :] + i * \
torch.ones((N, W), dtype=torch.int).to(device)
else:
K = torch.zeros((N, C, W), dtype=torch.int)
for i in range(ord_num):
K[:, i, :] = K[:, i, :] + i * \
torch.ones((N, W), dtype=torch.int)
# pdb.set_trace()
# target = orig_target.clone().type(torch.DoubleTensor)
if device == torch.device('cpu'):
target = orig_target.clone().type(torch.IntTensor)
else:
target = orig_target.clone().type(torch.cuda.IntTensor)
mask_0 = torch.zeros((N, C, W), dtype=torch.bool)
mask_1 = torch.zeros((N, C, W), dtype=torch.bool)
for i in range(N):
mask_0[i] = (K[i] <= target[i]).detach()
mask_1[i] = (K[i] > target[i]).detach()
one = torch.ones(ord_labels[mask_1].size())
if torch.cuda.is_available():
one = one.to(device)
self.loss += torch.sum(torch.log(torch.clamp(ord_labels[mask_0], min=1e-8, max=1e8))) \
+ torch.sum(torch.log(torch.clamp(one - ord_labels[mask_1], min=1e-8, max=1e8)))
N = N * W
self.loss /= (-N) # negative
# pdb.set_trace()
return self.loss
class customLoss(nn.Module):
"""
This customize loss is contained of Ordloss and MSELoss of the frequency magnitude
"""
def __init__(self, device):
super(customLoss, self).__init__()
self.loss = 0.0
self.ord = ordLoss()
self.vis = Visdom(port=8093, env='main')
# self.cross = torch.nn.CrossEntropyLoss()
# self.cross = torch.nn.NLLLoss()
# self.cross = torch.nn.MSELoss()
self.reg = regressLoss()
# self.weight = torch.autograd.Variable(torch.tensor(1.0), requires_grad=True).to(device)
self.weight = nn.Linear(2,1).to(device)
with torch.no_grad():
self.weight.weight.copy_(torch.tensor([1.0,1.0]))
pdb.set_trace()
self.t = torch.tensor([2.0,2.0]).to(device)
self.device = device
def forward(self, predict, true_rPPG):
self.loss1 = self.ord(predict[0], true_rPPG)
self.true_fft = self.torch_style_fft(true_rPPG) # (batch size x 60)
self.predict_fft = self.torch_style_fft(predict[1]) # (batch size x 60)
self.loss2 = self.reg(self.predict_fft, self.true_fft)
if torch.isnan(self.loss2):
pdb.set_trace()
# self.loss = self.loss1 + self.weight * self.loss2
# pdb.set_trace()
self.t1 = self.weight(self.t)
self.loss = self.weight(torch.stack([self.loss1, self.loss2]))
pdb.set_trace()
return self.loss
# pdb.set_trace()
def torch_style_fft(self, sig):
# pdb.set_trace()
S, _ = torch_welch(sig, fps = 30)
return S
class regressLoss(nn.Module):
def __init__(self):
super(regressLoss, self).__init__()
self.softmax = nn.Softmax(dim=1)
# self.weight = weight
def forward(self, outputs, targets):
preoutput = outputs.clone()
if torch.isnan(preoutput.cpu().detach()).any():
pdb.set_trace()
# small_number = torch.tensor(1e-45).to(targets.get_device())
targets = self.softmax(targets)
outputs = self.softmax(outputs)
if torch.isnan(outputs.cpu().detach()).any():
pdb.set_trace()
# outputs = outputs + small_number
loss = -targets.float() * torch.log(outputs)
# if np.isnan(torch.mean(loss).cpu().detach().numpy()):
# pdb.set_trace()
return torch.mean(loss)
class KLDivLoss(nn.Module):
def __init__(self, reduction="mean"):
super(KLDivLoss, self).__init__()
self.criterion = torch.nn.KLDivLoss(reduction=reduction)
# self.weight = weight
def forward(self, outputs, targets):
out = outputs.clone()
tar = targets.clone()
out.uniform_(0, 1)
tar.uniform_(0, 1)
# loss = self.criterion(F.log_softmax(out, -1), tar)
loss = self.criterion(F.log_softmax(outputs, dim=1), F.softmax(targets, dim=1))
return loss
def torch_welch(sig, fps):
nperseg = sig.size(1)
nfft = sig.size(1)
noverlap = nperseg//2
# pdb.set_trace()
sig = sig.type(torch.cuda.FloatTensor)
win = torch.from_numpy(signal.hann(sig.size(1))).to(sig.get_device()).type(torch.cuda.FloatTensor)
sig = sig.unsqueeze(1)
# pdb.set_trace()
'''detrend'''
sig = sig - torch.from_numpy(np.expand_dims(np.mean(sig.detach().cpu().numpy(), -1), -1)).to(sig.get_device())
sig = sig * win
S = torch.rfft(sig, 1, normalized=True, onesided=True)
S = torch.sqrt(S[..., 0]**2 + S[..., 1]**2)
freqs = torch.from_numpy(np.fft.rfftfreq(nfft, 1/float(fps)))
S = S.squeeze(1)
return S, freqs
|
from django.conf.urls import include, patterns, url
from tastypie.api import Api
import mozillians.groups.api
import mozillians.users.api
v1_api = Api(api_name='v1')
v1_api.register(mozillians.users.api.UserResource())
v1_api.register(mozillians.groups.api.GroupResource())
v1_api.register(mozillians.groups.api.SkillResource())
urlpatterns = patterns(
'',
url(r'', include(v1_api.urls)),)
|
import random
import time
def temp_model(filename):
# ans : eval_image_classifier.py 로 넘기기
print('[ {} ]'.format(ans), flush=True)
return ans
|
import pytest
from k8_vmware.helpers.TestCase_VM import TestCase_VM
from k8_vmware.vsphere.Datastore import Datastore
from k8_vmware.vsphere.VM_Device import VM_Device
class test_VM_Device(TestCase_VM):
vm_name = f"tests__unit__" + __name__
def setUp(self) -> None:
self.vm_name = test_VM_Device.vm_name
self.vm_device = VM_Device(vm=self.vm)
def test_cdrom_iso_add_to_vm(self):
iso_paths = Datastore().files_paths("*.iso")
if len(iso_paths) == 0:
pytest.skip(f"target server did not have an ISO we can use")
iso_path = iso_paths.pop()
assert len(self.vm_device.vm.devices()) == 9
self.vm_device.cdrom_iso_add_to_vm(iso_path)
assert len(self.vm_device.vm.devices()) == 10
cdrom = self.vm_device.vm.devices_Cdroms().pop()
assert cdrom.deviceInfo.label == 'CD/DVD drive 1'
# the replace calls below are caused by a weird bug of a test difference between running the tests in dev (OSX) and GitHub Actions (Linux)
# where the deviceInfo.summary when executed locally doesn't have the '/' between the datastore name and the path
assert cdrom.deviceInfo.summary.replace('/',' ') == f'ISO {iso_path}'.replace('/',' ')
assert cdrom.backing.fileName.replace('/',' ') == iso_path.replace('/',' ')
self.vm_device.remove_device(cdrom)
def test_cdrom_add_to_vm(self):
assert len(self.vm_device.vm.devices()) == 9
self.vm_device.cdrom_add_to_vm()
assert len(self.vm_device.vm.devices()) == 10
cdrom = self.vm_device.vm.devices_Cdroms().pop()
assert cdrom.deviceInfo.label == 'CD/DVD drive 1'
self.vm_device.remove_device(cdrom)
assert len(self.vm_device.vm.devices()) == 9
def test_disk_ide_add_to_vm(self):
assert len(self.vm_device.vm.devices()) == 9
self.vm_device.disk_ide_add_to_vm(100)
self.vm_device.disk_ide_add_to_vm(20)
# self.vm_device.disk_ide_add_to_vm(20) # at the moment there is error that occurs when trying to add more than 2 disk (related to unit_number)
disks = self.vm_device.vm.devices_Disks()
self.vm_device.remove_device(disks[0]) # remove disk 1 from vm
self.vm_device.remove_device(disks[1]) # remove disk 2 from vm
self.vm_device.disk_delete (disks[0]) # delete disk 1 vmdk file
self.vm_device.disk_delete (disks[1]) # delete disk 2 vmdk file
assert len(self.vm_device.vm.devices()) == 9
def test_disk_scsi_add_to_vm(self):
disk_1_size = 10
disk_2_size = 20
assert self.vm_device.vm.controller_scsi() is None
assert len(self.vm_device.vm.devices() ) == 9
assert len(self.vm_device.vm.devices_Disks()) == 0
self.vm_device.scsi_controller__add_to_vm() # add scsi controller
self.vm_device.disk_scsi_add_to_vm(disk_1_size)
self.vm_device.disk_scsi_add_to_vm(disk_2_size)
disks= self.vm_device.vm.devices_Disks()
assert disks[0].capacityInBytes == disk_1_size * 1024 * 1024 * 1024
assert disks[1].capacityInBytes == disk_2_size * 1024 * 1024 * 1024
assert len(self.vm_device.vm.devices()) == 12
controller_scsi = self.vm_device.vm.controller_scsi()
self.vm_device.remove_device(disks[0]) # remove disk 1 from vm
self.vm_device.remove_device(disks[1]) # remove disk 2 from vm
self.vm_device.remove_device(controller_scsi) # remove scsi controller
self.vm_device.disk_delete(disks[0]) # delete disk 1 vmdk file
self.vm_device.disk_delete(disks[1]) # delete disk 2 vmdk file
assert len(self.vm_device.vm.devices() ) == 9
assert len(self.vm_device.vm.devices_Disks()) == 0
assert self.vm_device.vm.controller_scsi() is None
def test_scsi_controller__add_to_vm(self):
assert len(self.vm_device.vm.devices()) == 9
assert self.vm_device.vm.controller_scsi() is None
self.vm_device.scsi_controller__add_to_vm()
scsi_controller = self.vm_device.vm.controller_scsi()
assert scsi_controller.deviceInfo.label == 'SCSI controller 0'
assert len(self.vm_device.vm.devices()) == 10
self.vm_device.remove_device(scsi_controller)
assert self.vm_device.vm.controller_scsi() == None
assert len(self.vm_device.vm.devices()) == 9
|
# -*- coding: utf-8 -*-
from sqlalchemy import Column, String
from sqlalchemy.orm import declarative_base
from zvt.contract import Mixin
from zvt.contract.register import register_schema
NewsBase = declarative_base()
class StockNews(NewsBase, Mixin):
__tablename__ = "stock_news"
#: 新闻标题
news_title = Column(String)
register_schema(providers=["em"], db_name="stock_news", schema_base=NewsBase, entity_type="stock")
# the __all__ is generated
__all__ = ["StockNews"]
|
import smtplib
from email import encoders
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
EMAIL = 'sender@gmail.com'
try:
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.ehlo()
with open('password.txt', 'r') as f:
password = f.read()
server.login(EMAIL, password)
except :
print('Login error')
msg = MIMEMultipart()
msg['From'] = 'Joon J'
msg['To'] = 'to@email.com'
msg['Subject'] = 'Just a test message'
with open('message.txt','r') as f:
message = f.read()
msg.attach(MIMEText(message, 'plain'))
filename = 'test.jpg'
attachment = open(filename, 'rb')
p = MIMEBase('application', 'octet-stream')
p.set_payload(attachment.read())
encoders.encode_base64(p)
p.add_header('Content-Disposition', f'attachment, filename = {filename}')
msg.attach(p)
text = msg.as_string()
try:
server.sendmail(EMAIL,'to@email.com', text)
server.quit()
except:
print('Send error')
|
"""
transformDiagMatDemo: demo for transfDiagMagDemo
- source:https://github.com/flaberenne/python3rep/transformDiagMatDemo.py
- author:flaberenne
"""
import transformDiagMat as f
a=[[1 ,2 ,3, 4,5],[6 ,7 ,8 ,9, 10 ],[ 11 ,12 ,13,14,15],[16 ,17,18,19,20],[21,22,23,24,25]]
print("*************")
print("Original matrix")
print("*************")
for matLine in a:
print(*matLine)
print("\n")
print("*************")
print("Data on diagonal SW => NE")
print("*************")
r=f.transformDiagMat(a,0)
for matLine in r:
print(*matLine)
print("\n")
print("*************")
print("Data on diagonal NE => SW")
print("*************")
r=f.transformDiagMat(a,1)
for matLine in r:
print(*matLine)
print("\n")
print("*************")
print("Alternate")
print("*************")
r=f.transformDiagMat(a,2)
for matLine in r:
print(*matLine)
|
#!/usr/bin/python
import commands
import glob
import itertools
import Queue
import os
import os.path
import shutil
import subprocess
import sys
import threading
import time
import traceback
import zipfile
import class_cache
import symlink
BUILD_DIR = "build"
ICBM_PATH = os.path.abspath(os.path.dirname(__file__))
class BuildError(Exception):
def __init__(self, target):
Exception.__init__(self, "Error building %s" % target.Name())
class Engine(object):
def __init__(self):
# target name -> target
self.targets = {}
# target -> set(filename)
self.target_deps = {}
# filename -> target
self.target_provides = {}
self.ready_queue = Queue.Queue()
self.waitor_lock = threading.Lock()
self.done = set()
self.waitors = []
self.build_visited = set()
self.success = True
self.class_cache = class_cache.ClassCache(
os.path.join(BUILD_DIR, "classcache"))
def Worker(self):
while True:
try:
item = self.ready_queue.get()
except:
return
with self.waitor_lock:
print "building", item.Name(), time.time()
try:
item.Setup(self)
if not item.Run(self):
raise BuildError(item)
self.done.add(item)
except Exception:
traceback.print_exc()
self.success = False
with self.waitor_lock:
self.EvalWaitors()
self.ready_queue.task_done()
def EvalWaitors(self):
todel = []
for waitor in self.waitors:
deps = set()
for f in self.target_deps[waitor]:
deps.add(self.target_provides[f])
if not (deps - self.done):
todel.append(waitor)
for waitor in todel:
self.waitors.remove(waitor)
self.ready_queue.put(waitor)
def Depend(self, target, f):
#print "----- Requiring", target, f
self.target_deps.setdefault(target, set()).add(f)
def Provide(self, target, f):
#print "----- Providing", target, f
assert f not in self.target_provides
self.target_provides[f] = target
def AddTarget(self, target):
assert target.Name() not in self.targets, "duplicate target: %s" % target.Name()
self.targets[target.Name()] = target
def ComputeDependencies(self):
for target in self.targets.itervalues():
target.AddDependencies(self)
def GetTarget(self, target):
return self.targets.get(target)
def GetFilename(self, path):
if path.startswith("/"):
return path
assert path in self.target_provides, "path not provided: %s" % path
return os.path.abspath(self.target_provides[path].GetOutput(path))
def BuildTarget(self, target):
if target in self.build_visited:
return
deps = self.target_deps.get(target, [])
for f in deps:
assert f in self.target_provides, "No target provides %s" % f
self.BuildTarget(self.target_provides[f])
if not deps:
self.ready_queue.put(target)
else:
self.waitors.append(target)
self.build_visited.add(target)
def Go(self, workers=4):
# Start up workers
for i in xrange(workers):
t = threading.Thread(target=self.Worker)
t.daemon = True
t.start()
self.ready_queue.join()
if self.waitors:
print "Following targets not built:", map(
lambda x: x.name, self.waitors)
return self.success
def VerifyGraph(self, target, current=None, seen=None):
# Make sure that there aren't any cyclical dependencies. Does
# a DFS, keeping track of the current path so far to make sure
# that there are no cycles, as well as a list of nodes that
# have been verified as "good" and don't need to be recursed
# down.
return True
class Target(object):
def __init__(self, path, name):
self.path = path
self.name = name
def Name(self):
return self.name
def AddDependencies(self, engine):
raise NotImplementedError
def Setup(self, engine):
raise NotImplementedError
def Run(self, engine):
raise NotImplementedError
def GetOutput(self, path):
raise NotImplementedError
@staticmethod
def NewerChanges(paths, timestamp):
"""Computes whether the task needs to do any changes
Iterates through all the paths and recursively finds the
newest file. If it was modified after the timestamp, then
there are changes that need to be addressed by the target.
Args:
paths: An array of paths. Directories are walked recursively.
Returns True if the target needs to perform work.
"""
if not os.path.exists(timestamp):
return True
newest = [0]
def _Update(path):
s = os.stat(path)
if s.st_mtime > newest[0]:
newest[0] = s.st_mtime
def _Visit(arg, dirname, names):
for name in names:
path = os.path.join(dirname, name)
if not os.path.isfile(path):
continue
if os.path.samefile(path, timestamp):
continue
_Update(path)
for path in paths:
if os.path.isdir(path):
os.path.walk(path, _Visit, newest)
else:
_Update(path)
return newest[0] > os.stat(timestamp).st_mtime
@staticmethod
def DependenciesChanged(depstr, store):
if not os.path.exists(store):
return True
f = open(store)
with f:
stored = f.read()
return stored != depstr
class JavaCompile(Target):
def __init__(self, path, name, sources, jars, data, main, flags):
Target.__init__(self, path, name)
self.sources = dict(sources)
self.jars = dict(jars)
self.data = dict(data)
self.main = main
self.flags = flags
def AddDependencies(self, engine):
if self.flags:
engine.Depend(self, "flag_processor")
for fake, real in self.sources.iteritems():
if not real.startswith("/"):
engine.Depend(self, real)
for fake, real in self.data.iteritems():
if not real.startswith("/"):
engine.Depend(self, real)
engine.Provide(self, self.name)
def Setup(self, engine):
# Create the prefix where we're going to build everything
prefix = self.prefix = os.path.join(BUILD_DIR, self.name)
if not os.path.exists(prefix):
os.makedirs(prefix)
# Link in the compile.xml which will tell ant to build things
compile_xml = os.path.join(prefix, "compile.xml")
if not os.path.exists(compile_xml):
symlink.symlink(
os.path.join(ICBM_PATH, "compile.xml"),
compile_xml)
# Set up the src/ directory, by symlinking in all the
# depending source files.
srcprefix = self.srcprefix = os.path.join(prefix, "src")
if os.path.exists(srcprefix):
shutil.rmtree(srcprefix)
os.makedirs(srcprefix)
for source, filename in self.sources.iteritems():
path = os.path.join(srcprefix, os.path.dirname(source))
if not os.path.exists(path):
os.makedirs(path)
dest = os.path.join(path, os.path.basename(source))
symlink.symlink(engine.GetFilename(filename), dest)
# Set up the jars/ directory by symlinking in all the depending jars.
jarprefix = self.jarprefix = os.path.join(prefix, "jars")
if os.path.exists(jarprefix):
shutil.rmtree(jarprefix)
os.makedirs(jarprefix)
for jar, filename in self.jars.iteritems():
symlink.symlink(engine.GetFilename(filename),
os.path.join(jarprefix, os.path.basename(jar)))
# Set up the output directory where all the class files will go
outprefix = self.outprefix = os.path.join(prefix, "classes")
if not os.path.exists(outprefix):
os.makedirs(outprefix)
# Data files are meant to be on the classpath, so put them
# into classes as well.
for data, filename in self.data.iteritems():
path = os.path.join(outprefix, os.path.dirname(data))
if not os.path.exists(path):
os.makedirs(path)
dest = os.path.join(path, os.path.basename(data))
if os.path.exists(dest):
os.unlink(dest)
symlink.symlink(engine.GetFilename(filename), dest)
# Map in any existing class files from the class cache
engine.class_cache.PopulateFromCache(outprefix, self.sources)
# Create an eclipse file
with open(os.path.join(prefix, ".classpath"), "w") as f:
f.write("""<?xml version="1.0" encoding="UTF-8"?>
<classpath>
<classpathentry kind="src" path="src"/>
<classpathentry kind="output" path="classes"/>
<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER"/>
""")
for jar in self.jars:
f.write('<classpathentry kind="lib" path="jars/%s"/>\n' %
os.path.basename(jar))
f.write("</classpath>\n")
# Create a findbugs file
with open(os.path.join(prefix, "findbugs.fbp"), "w") as f:
loc = os.path.abspath(prefix)
f.write('<Project projectName="">\n')
for jar in self.jars:
f.write("<AuxClasspathEntry>%s</AuxClasspathEntry>\n" %
os.path.join(loc, "jars", os.path.basename(jar)))
f.write("<Jar>%s</Jar>\n" % os.path.join(loc, "classes"))
f.write("<SrcDir>%s</SrcDir>\n" % os.path.join(loc, "src"))
f.write("""<SuppressionFilter>
<LastVersion value="-1" relOp="NEQ"/>
</SuppressionFilter>
""")
f.write("</Project>\n")
def GenerateRunner(self):
# Create a script to run the whole thing with appropriate
# class path and main class.
srcrunner = open(ICBM_PATH + "/java_run.sh")
with srcrunner:
text = srcrunner.read()
runner_path = os.path.join(self.prefix, self.name)
if not os.path.exists(os.path.dirname(runner_path)):
os.makedirs(os.path.dirname(runner_path))
outrunner = open(runner_path, "w")
with outrunner:
outrunner.write(text % {"main_class": self.main})
os.chmod(runner_path, 0755)
srcdebugger = open(ICBM_PATH + "/java_debug.sh")
with srcdebugger:
text = srcdebugger.read()
debugger_path = os.path.join(self.prefix, "%s-debug" % self.name)
if not os.path.exists(os.path.dirname(debugger_path)):
os.makedirs(os.path.dirname(debugger_path))
outdebugger = open(debugger_path, "w")
with outdebugger:
outdebugger.write(text % {"main_class": self.main})
os.chmod(debugger_path, 0755)
def Run(self, engine):
# Ant is slow at figuring out that it has nothing to do, so
# check for a build tstamp, and compare against files. If none
# of them are newer, skip this step.
depstr = "%r%r%r%r%r" % (
self.sources, self.jars, self.data, self.main, self.flags)
deplist = os.path.join(self.prefix, ".deplist")
tstamp_path = os.path.join(self.prefix, self.name)
if (not self.NewerChanges(
[self.srcprefix, self.jarprefix], tstamp_path) and
not self.DependenciesChanged(depstr, deplist)):
return True
cmd = ["ant", "-f", os.path.join(self.prefix, "compile.xml")]
print cmd
p = subprocess.Popen(cmd,
bufsize=1,
#stdout=subprocess.STDOUT,
#stderr=subprocess.STDOUT,
close_fds=True,
shell=False)
p.wait()
engine.class_cache.UpdateCache(self.outprefix)
if p.returncode != 0:
return False
if not self.flags:
self.Complete(deplist, depstr)
return True
# Execute the flagprocessor with all of its classpath, as well
# as with the classpath of the target. We can assume that the
# target is a java_binary, so it has a fairly standard layout.
#
# java -cp flag_processor/*:target/* \
# com.alphaco.util.flags.FlagProcessor target/classes
flags = subprocess.Popen(
"java -cp flag_processor/classes:flag_processor/jars/* "
"com.alphaco.util.flags.FlagProcessor "
"%(target)s/classes "
"'%(target)s/jars/*'" % {"target" : self.name},
cwd=BUILD_DIR,
bufsize=1,
stdout=subprocess.PIPE,
close_fds=True,
shell=True)
output = flags.stdout.read()
if flags.wait() != 0:
return False
f = open(os.path.join(self.outprefix, "flagdescriptors.cfg"), "w")
with f:
f.write(output)
self.Complete(deplist, depstr)
return True
def Complete(self, deplist, depstr):
self.GenerateRunner()
f = open(deplist, "w")
with f:
f.write(depstr)
def GetOutput(self, path):
assert path == os.path.join(self.name, self.name)
return os.path.join(self.prefix, self.name)
class JarBuild(Target):
def __init__(self, path, name, target, jars, main, premain):
Target.__init__(self, path, name)
self.target = target
self.jars = dict(jars)
self.main = main
self.premain = premain
def AddDependencies(self, engine):
engine.Depend(self, self.target)
engine.Provide(self, self.name)
def Setup(self, engine):
pass
def Run(self, engine):
prefix = os.path.join(BUILD_DIR, self.target, "classes")
# Verify that we actually need to do something. Otherwise
# leave it alone.
tstamp_path = os.path.join(BUILD_DIR, self.name)
if not self.NewerChanges(self.jars.values() + [prefix], tstamp_path):
return True
# Put together the classes dir from the compiles, as well as
# all of the jars into a single jar.
out = os.path.join(BUILD_DIR, ".%s" % self.name)
f = open(out, "wb")
os.fchmod(f.fileno(), 0755)
f.write("""#!/bin/sh
exec java ${JVM_ARGS} -jar $0 "$@"
""")
f = zipfile.ZipFile(f, "w")
added = set()
def _Add(arg, dirname, files):
for fn in files:
fn = os.path.join(dirname, fn)
if os.path.isfile(fn):
f.write(fn, os.path.relpath(fn, arg))
added.add(os.path.relpath(fn, arg))
os.path.walk(prefix, _Add, prefix)
def _Exclude(fn):
# Don't include manifest file or signatures
if fn.startswith("META-INF/"):
for end in ("MANIFEST.MF", ".SF", ".RSA"):
if fn.endswith(end):
return True
# Don't include play.plugins file as this causes play to load
# duplicate plugins
if fn == "play.plugins":
return True
if fn in added:
return True
return False
for jar, filename in self.jars.iteritems():
j = zipfile.ZipFile(engine.GetFilename(filename), "r")
for info in j.infolist():
if not _Exclude(info.filename):
contents = j.open(info).read()
f.writestr(info, contents)
# Clear VERSIONER_PYTHON_VERSION for mac, so that hg can use the default python version
rev = commands.getoutput("unset VERSIONER_PYTHON_VERSION; hg parent --template '{rev}:{node}\\n'")
rev_hash = ''
if rev and ":" in rev:
rev, rev_hash = rev.split(":")
premain = "Premain-Class: %s\n" % self.premain if self.premain else ""
manifest = (
"""Manifest-Version: 1.0
Main-Class: %s
%sBuilt-By: %s
Built-On: %s
Build-Revision: %s
Build-Revision-Hash: %s
Yext-Jar: %s
""" % (self.main,
premain,
os.getenv("USER"),
time.strftime("%b %d, %Y %I:%M:%S %p"),
rev.strip(),
rev_hash,
self.target))
f.writestr("META-INF/MANIFEST.MF", manifest)
f.close()
os.rename(out, os.path.join(BUILD_DIR, self.name))
return True
def GetOutput(self, path):
assert path == self.name
return os.path.join(BUILD_DIR, self.name)
class WarBuild(Target):
def __init__(self, path, name, data, target, jars):
Target.__init__(self, path, name)
self.data = dict(data)
self.target = target
self.jars = dict(jars)
def AddDependencies(self, engine):
engine.Depend(self, self.target)
for fake, real in self.data.iteritems():
if not real.startswith("/"):
engine.Depend(self, real)
engine.Provide(self, self.name)
def Setup(self, engine):
pass
def Run(self, engine):
prefix = os.path.join(BUILD_DIR, self.target, "classes")
# Verify that we actually need to do something. Otherwise
# leave it alone.
tstamp_path = os.path.join(BUILD_DIR, self.name)
if not self.NewerChanges(
self.jars.values() + self.data.values() + [prefix], tstamp_path):
return True
# Put together the classes dir from the compiles, as well as
# all of the jars into a single jar.
out = os.path.join(BUILD_DIR, ".%s" % self.name)
f = zipfile.ZipFile(out, "w")
for fake, fn in self.data.iteritems():
fn = engine.GetFilename(fn)
if os.path.isfile(fn):
f.write(fn, fake)
def _Add(arg, dirname, files):
for fn in files:
fn = os.path.join(dirname, fn)
if os.path.isfile(fn):
f.write(fn, os.path.join("WEB-INF/classes", os.path.relpath(fn, arg)))
os.path.walk(prefix, _Add, prefix)
for jar, fn in self.jars.iteritems():
fn = engine.GetFilename(fn)
f.write(fn, os.path.join("WEB-INF/lib", jar))
# Clear VERSIONER_PYTHON_VERSION for mac, so that hg can use the default python version
rev = commands.getoutput("unset VERSIONER_PYTHON_VERSION; hg parent -q")
if rev and ":" in rev:
rev = rev.split(":")[0]
manifest = (
"""Manifest-Version: 1.0
Built-By: %s
Built-On: %s
Build-Revision: %s
""" % (os.getenv("USER"), time.strftime("%b %d, %Y %I:%M:%S %p"), rev.strip()))
f.writestr("META-INF/MANIFEST.MF", manifest)
f.close()
os.rename(out, os.path.join(BUILD_DIR, self.name))
return True
def GetOutput(self, path):
assert path == self.name
return os.path.join(BUILD_DIR, self.name)
class PlayCompile(Target):
def __init__(self, path, name, modules, deps, data, play_home):
Target.__init__(self, path, name)
self.modules = modules
self.deps = deps
self.data = dict(data)
self.play_home = play_home
def AddDependencies(self, engine):
for dep in self.deps:
engine.Depend(self, dep)
for fake, real in self.data.iteritems():
if not real.startswith("/"):
engine.Depend(self, real)
engine.Provide(self, self.name)
def Setup(self, engine):
# Make the directory, set up symlinks
prefix = self.prefix = os.path.join(BUILD_DIR, self.name.rsplit(".zip")[0])
if not os.path.exists(self.prefix):
os.makedirs(self.prefix)
def Run(self, engine):
# Always run the precompilation for now
def _CopyPlayApp(src):
for dir in ("app", "conf", "public"):
for root, dirs, files in os.walk(os.path.join(src, dir)):
dest_dir = os.path.join(self.prefix, root)
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
for file in files:
dest = os.path.join(dest_dir, file)
if not os.path.exists(dest):
symlink.symlink(
os.path.realpath(os.path.join(root, file)),
dest)
# Copy over the play modules
for module in self.modules:
_CopyPlayApp(module)
# Execute the play compiler
generate = subprocess.Popen(
[self.play_home + '/play',
'precompile',
os.path.join(self.prefix, self.modules[0])],
bufsize=1,
close_fds=True,
shell=False)
if generate.wait() != 0:
return False
# Copy all the data file as well
for data, filename in self.data.iteritems():
srcprefix = os.path.join(self.prefix, "src")
path = os.path.join(srcprefix, os.path.dirname(data))
if not os.path.exists(path):
os.makedirs(path)
dest = os.path.join(path, os.path.basename(data))
if os.path.exists(dest):
os.unlink(dest)
symlink.symlink(engine.GetFilename(filename), dest)
# Zip up the compiled play application
tmp = os.path.join(self.prefix, ".%s" % self.name)
f = zipfile.ZipFile(tmp, "w")
for root, dirs, files in os.walk(self.prefix):
for name in files:
path = os.path.join(root, name)
f.write(path, os.path.relpath(path, self.prefix))
f.close()
os.rename(tmp, os.path.join(BUILD_DIR, self.name))
return True
def GetOutput(self, path):
assert path == self.name
return os.path.join(BUILD_DIR, self.name)
class Generate(Target):
def __init__(self, path, name, compiler, args, sources, outputs, deps):
Target.__init__(self, path, name)
self.sources = sources
self.outputs = set(outputs)
self.compiler = compiler
self.args = args or []
self.deps = deps
def AddDependencies(self, engine):
for dep in self.deps:
engine.Depend(self, dep)
for fake, real in self.sources:
if not real.startswith("/"):
engine.Depend(self, real)
for out in self.outputs:
engine.Provide(self, out)
def Setup(self, engine):
# Make the directory, set up symlinks
prefix = self.prefix = os.path.join(BUILD_DIR, self.name)
if not os.path.exists(self.prefix):
os.makedirs(self.prefix)
for fake, real in self.sources:
path = os.path.join(prefix, os.path.dirname(fake))
if not os.path.exists(path):
os.makedirs(path)
dest = os.path.join(path, os.path.basename(fake))
if not os.path.exists(dest):
symlink.symlink(engine.GetFilename(real), dest)
for out in self.outputs:
path = os.path.join(prefix, os.path.dirname(out))
if not os.path.exists(path):
os.makedirs(path)
def Run(self, engine):
# The assumption is that the generation is fully dependent on
# the inputs. So if none of them have changed, then no need to
# do anything.
tstamp_path = os.path.join(self.prefix, "TIMESTAMP")
if not self.NewerChanges([self.prefix], tstamp_path):
return True
# Execute the compiler in the prefix cwd with the sources and
# outputs as the arguments. It is assumed that it will know
# what to do with them.
args = ([self.compiler] + self.args + list(x[0] for x in self.sources) +
list(self.outputs))
print args
generate = subprocess.Popen(
args,
cwd=self.prefix,
bufsize=1,
close_fds=True,
shell=False)
if generate.wait() != 0:
return False
with open(tstamp_path, "w"):
pass
return True
def GetOutput(self, path):
assert path in self.outputs, path
return os.path.join(self.prefix, path)
class Alias(Target):
def __init__(self, path, name, deps):
Target.__init__(self, path, name)
self.deps = deps
def AddDependencies(self, engine):
for dep in self.deps:
engine.Depend(self, dep)
engine.Provide(self, self.name)
def Setup(self, engine):
pass
def Run(self, engine):
return True
def GetOutput(self, path):
return path
|
from django.contrib import auth
from django.contrib.auth import logout
from django.shortcuts import redirect, render_to_response, RequestContext
from django.template.context_processors import csrf
from django.core.mail import EmailMultiAlternatives
from django.template.loader import render_to_string
from django.contrib.auth.decorators import login_required
from userprofile.models import UserSettings, UserRssPortals, User
from news.models import NewsPortal, NewsCategory, RssPortals
from .forms import UserCreationFormNew, UserAuthenticationForm
from .models import UserProfile
import uuid
import datetime
import json
import string
from random import choice, randint
from django.contrib.sites.models import Site, RequestSite
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect
SESSION_LIFE_TIME = 86400
SESSION_LIFE_TIME_REMEMBERED = 31536000
def login(request):
args = {}
args.update(csrf(request))
args["form"] = UserAuthenticationForm(request.POST)
if auth.get_user(request).is_authenticated():
return redirect("/")
else:
if request.POST and ("pause" not in request.session):
username = request.POST.get('username', '')
password = request.POST.get('password', '')
user = auth.authenticate(username=username, password=password)
if user is not None:
auth.login(request, user)
if "remember-true" in request.POST:
request.session.set_expiry(SESSION_LIFE_TIME_REMEMBERED)
request.session["pause"] = True
else:
request.session.set_expiry(SESSION_LIFE_TIME)
request.session["pause"] = True
return redirect('/')
else:
args['login_error'] = 'User not found. Please try again.'
return render_to_response('login.html', args)
else:
# args["img-num"] = randint(1, 4)
args["background_url"] = "/static/static/img/login/{file_num}.jpg".format(file_num=randint(1, 27))
return render_to_response('login.html', args, context_instance=RequestContext(request))
@login_required(login_url='/auth/login/')
def user_logout(request):
logout(request)
return HttpResponseRedirect("/?next=%s" % request.get_full_path())
def register(request):
if auth.get_user(request).is_authenticated():
return redirect("/")
else:
args = {}
args.update(csrf(request))
args['form'] = UserCreationFormNew()
if request.POST:
new_user_form = UserCreationFormNew(request.POST)
user_name = request.POST['username']
if not check_username(request, username=user_name) == False:
if new_user_form.is_valid():
new_user_form.save()
new_user = auth.authenticate(username=user_name,
password=request.POST['password1'])
auth.login(request, new_user)
# User settings
UserSettings.objects.create(
user_id=User.objects.get(username=auth.get_user(request).username).id,
)
user_email = request.POST["email"]
if User.objects.filter(email=user_email).exists():
return HttpResponseRedirect("/auth/register/", {"ce": "Current email is used"})
user_phone = "+0-000-000-00-00"
# request.POST["phone"]
UserProfile.objects.create(
user_id=User.objects.get(username=auth.get_user(request).username).id,
confirmation_code=''.join(choice(string.ascii_uppercase + string.digits + string.ascii_lowercase) for _
in range(33)),
user_cell_number=user_phone,
uuid=set_uuid(User.objects.get(username=auth.get_user(request).username).id)
)
list_portals = RssPortals.objects.all().values()
[UserRssPortals.objects.create(
user_id=User.objects.get(username=auth.get_user(request).username).id,
portal_id=int(list_portals[i]["id"]),
check=False
) for i in range(len(list_portals))]
mail_subject = "Confirm your account on Insydia, %s" % user_name
user_instance = User.objects.get(username=user_name)
text_content = 'This is an important message.'
htmly = render_to_string("confirm.html",
{'username': user_instance.username,
'site': get_site(request),
'email': user_email,
'ucid': user_instance.profile.confirmation_code,
'uuid': user_instance.profile.uuid})
html_content = htmly
mail_from = settings.DEFAULT_FROM_EMAIL
mail_to = user_email
msg = EmailMultiAlternatives(mail_subject, text_content, mail_from, [mail_to])
msg.attach_alternative(html_content, "text/html")
msg.send()
instance = User.objects.get(username=auth.get_user(request).username)
instance.is_active = False
instance.email = user_email
instance.save()
return redirect('/')
# args["img-num"] = randint(1, 4)
args["background_url"] = "/static/static/img/login/{file_num}.jpg".format(file_num=randint(1, 27))
return render_to_response('register.html', args)
@login_required(login_url="/auth/login/")
def render_user_preferences_categories_page(request):
if User.objects.get(username=auth.get_user(request).username).is_active:
return HttpResponseRedirect("/")
else:
args = {
"username": auth.get_user(request).username,
"categories": get_categories_names(request),
}
args.update(csrf(request))
return render_to_response("user_preferences_categories.html", args)
@login_required(login_url="/auth/login/")
def render_user_preferences_portal_page(request):
if User.objects.get(username=auth.get_user(request).username).is_active:
return HttpResponseRedirect("/")
else:
args = {
"username": auth.get_user(request).username,
"portals": get_portals_names(request),
}
args.update(csrf(request))
return render_to_response("user_preferences_portals.html", args)
@login_required(login_url="/auth/login/")
def skip_preferences(request):
instance = User.objects.get(username=auth.get_user(request).username)
instance.is_active = True
instance.save()
return HttpResponseRedirect("/")
def get_portals_names(request):
return NewsPortal.objects.all()
def get_categories_names(request):
return NewsCategory.objects.all()
def pref_cat_save(request):
args = {}
args.update(csrf(request))
settings_instance = UserSettings.objects.get(user_id=User.objects.get(username=auth.get_user(request).username).id)
if request.POST:
categories_list = request.POST.getlist("categories[]")
for i in categories_list:
if i not in settings_instance.portals_to_show:
settings_instance.categories_to_show += "%s," % i
settings_instance.save()
return HttpResponseRedirect("/auth/preferences=portals")
def pref_portals_save(request):
portals_settings = UserSettings.objects.get(user_id=User.objects.get(username=auth.get_user(request).username).id)
if request.POST:
portals_list = request.POST.getlist("portals[]")
for i in portals_list:
if i not in portals_settings.portals_to_show:
portals_settings.portals_to_show += "%s," % i
portals_settings.save()
user_instance = User.objects.get(username=auth.get_user(request).username)
user_instance.is_active = True
user_instance.save()
return HttpResponseRedirect("/")
def confirm_email(request, confirm_code, user_uuid):
user_take = UserProfile.objects.get(uuid=user_uuid.replace('-', ''))
user_instance = User.objects.get(id=user_take.user_id)
if confirm_code == user_instance.profile.confirmation_code:
user_instance.is_active = True
user_instance.save()
return HttpResponseRedirect('/')
# ################################## SMS PIN #########################################
def send_message_via_sms(request, verify_code, phone_number):
from twilio.rest import TwilioRestClient
account_sid = "AC23d3af9ee2f38d74d4217e1ddb7b4c1c"
auth_token = "6037a6a6474cf31ff68cf0b13146da45"
client = TwilioRestClient(account_sid, auth_token)
text = ", thank you for registration. Your verification code: %s" % verify_code
client.messages.create(to=phone_number, from_="+12166001832", body=text,)
def set_uuid(user_id):
user_instance = User.objects.get(id=user_id)
return uuid.uuid3(uuid.NAMESPACE_DNS, "%s %s" % (user_instance.username, datetime.datetime.now()))
def check_email(request):
if request.POST:
email = request.POST['email']
if User.objects.filter(email=email).exists():
return HttpResponse(json.dumps({"data": True}), content_type="application/json")
else:
return HttpResponse(json.dumps({"data": False}), content_type="application/json")
else:
return HttpResponse(json.dumps({"data": False}), content_type="application/json")
def check_username(request, username):
if User.objects.filter(username=username).exists():
return HttpResponse(json.dumps({"data": True}), content_type="application/json")
else:
return HttpResponse(json.dumps({"data": False}), content_type="application/json")
def render_help_login(request):
from password_reset.forms import PasswordRecoveryForm
args = {
"form": PasswordRecoveryForm,
}
if auth.get_user(request).username:
args["username"] = User.objects.get(username=auth.get_user(request).username)
args.update(csrf(request))
args["background_url"] = "/static/static/img/login/{file_num}.jpg".format(file_num=randint(1, 27))
return render_to_response("cant_login.html", args)
def get_site(request):
if Site._meta.installed:
return Site.objects.get_current()
else:
return RequestSite(request)
|
import base64
import json
import zlib
from typing import Any, List
import pytest
from aws_lambda_powertools.utilities.parser import ValidationError, envelopes, event_parser
from aws_lambda_powertools.utilities.parser.models import CloudWatchLogsLogEvent, CloudWatchLogsModel
from aws_lambda_powertools.utilities.typing import LambdaContext
from tests.functional.parser.schemas import MyCloudWatchBusiness
from tests.functional.utils import load_event
@event_parser(model=MyCloudWatchBusiness, envelope=envelopes.CloudWatchLogsEnvelope)
def handle_cloudwatch_logs(event: List[MyCloudWatchBusiness], _: LambdaContext):
assert len(event) == 1
log: MyCloudWatchBusiness = event[0]
assert log.my_message == "hello"
assert log.user == "test"
@event_parser(model=CloudWatchLogsModel)
def handle_cloudwatch_logs_no_envelope(event: CloudWatchLogsModel, _: LambdaContext):
assert event.awslogs.decoded_data.owner == "123456789123"
assert event.awslogs.decoded_data.logGroup == "testLogGroup"
assert event.awslogs.decoded_data.logStream == "testLogStream"
assert event.awslogs.decoded_data.subscriptionFilters == ["testFilter"]
assert event.awslogs.decoded_data.messageType == "DATA_MESSAGE"
assert len(event.awslogs.decoded_data.logEvents) == 2
log_record: CloudWatchLogsLogEvent = event.awslogs.decoded_data.logEvents[0]
assert log_record.id == "eventId1"
convert_time = int(round(log_record.timestamp.timestamp() * 1000))
assert convert_time == 1440442987000
assert log_record.message == "[ERROR] First test message"
log_record: CloudWatchLogsLogEvent = event.awslogs.decoded_data.logEvents[1]
assert log_record.id == "eventId2"
convert_time = int(round(log_record.timestamp.timestamp() * 1000))
assert convert_time == 1440442987001
assert log_record.message == "[ERROR] Second test message"
def test_validate_event_user_model_with_envelope():
my_log_message = {"my_message": "hello", "user": "test"}
inner_event_dict = {
"messageType": "DATA_MESSAGE",
"owner": "123456789123",
"logGroup": "testLogGroup",
"logStream": "testLogStream",
"subscriptionFilters": ["testFilter"],
"logEvents": [{"id": "eventId1", "timestamp": 1440442987000, "message": json.dumps(my_log_message)}],
}
dict_str = json.dumps(inner_event_dict)
compressesd_str = zlib.compress(str.encode(dict_str), -1)
event_dict = {"awslogs": {"data": base64.b64encode(compressesd_str)}}
handle_cloudwatch_logs(event_dict, LambdaContext())
def test_validate_event_does_not_conform_with_user_dict_model():
event_dict = load_event("cloudWatchLogEvent.json")
with pytest.raises(ValidationError):
handle_cloudwatch_logs(event_dict, LambdaContext())
def test_handle_cloudwatch_trigger_event_no_envelope():
event_dict = load_event("cloudWatchLogEvent.json")
handle_cloudwatch_logs_no_envelope(event_dict, LambdaContext())
def test_handle_invalid_cloudwatch_trigger_event_no_envelope():
event_dict: Any = {"awslogs": {"data": "invalid_data"}}
with pytest.raises(ValidationError) as context:
handle_cloudwatch_logs_no_envelope(event_dict, LambdaContext())
assert context.value.errors()[0]["msg"] == "unable to decompress data"
def test_handle_invalid_event_with_envelope():
with pytest.raises(ValidationError):
handle_cloudwatch_logs(event={}, context=LambdaContext())
|
#!/usr/bin/env python3
from copy import deepcopy
import os
from typing import List, Union
from .config import Config, ConfigDict
from .env import docker
class Image:
def __init__(
self,
name: str,
image_cfg: ConfigDict,
) -> None:
self.name = name
self.path = image_cfg.path
self.labels = image_cfg.labels
def __repr__(self) -> str:
return (
f"Image: {self.name}\n"
f"\tpath: {self.path}\n"
f"\tlabels: {self.labels}\n"
)
def build(self):
print(f"Building {self.name}...")
image = docker.build(
context_path=self.path,
file=os.path.join(self.path, f"{self.name}.dockerfile"),
labels=self.labels,
tags=[self.name],
progress=False, # False to suppress
)
assert image is not None, f"ERR: {self.name} did not build correctly"
return image
def remove(self):
print(f"Trying to remove {self.name}")
# docker.image.prune(all=True, filter=self.labels) # doesn't work!
image = docker.image.inspect(self.name)
image.remove()
class ImageCollection:
def __init__(
self,
images: List[str],
cfg: Config,
) -> None:
if len(set(images)) < len(images):
print("WARN: there are duplicate images")
images = list(set(images))
self.names = images
default_path = deepcopy(cfg.image_path)
default_labels = deepcopy(cfg.labels)
image_cfg = {}
for image in images:
if image not in cfg.keys():
cfg[image] = {
"name": image,
"path": default_path,
"labels": default_labels,
}
else:
keys = cfg[image].keys()
# name
if "name" not in keys:
cfg[image].name = image
# path
if "path" not in keys:
cfg[image].path = default_path
# labels
labels = {}
labels.update(default_labels)
if "labels" in keys:
labels.update(cfg[image].labels)
cfg[image].labels = labels
image_cfg[image] = cfg[image]
self.image_cfg = ConfigDict(image_cfg)
self.images = {}
for k, v in self.image_cfg.items():
self.images[k] = Image(
name=k,
image_cfg=v,
)
def __len__(self) -> int:
return len(self.names)
def __getitem__(self, i: Union[int, str]) -> Image:
if isinstance(i, int):
name = self.names[i]
elif isinstance(i, str):
name = i
assert name in self.images.keys(), \
f"ERR: {name} is not a valid image name"
return self.images[name]
|
# -*- coding: utf-8 -*-
import layers
import tensorflow as tf
from network import Network
class QNetwork(Network):
def __init__(self, conf):
super(QNetwork, self).__init__(conf)
with tf.variable_scope(self.name):
self.target_ph = tf.placeholder('float32', [None], name='target')
encoded_state = self._build_encoder()
self.loss = self._build_q_head(encoded_state)
self._build_gradient_ops(self.loss)
def _build_q_head(self, input_state):
self.w_out, self.b_out, self.output_layer = layers.fc('fc_out', input_state, self.num_actions, activation="linear")
self.q_selected_action = tf.reduce_sum(self.output_layer * self.selected_action_ph, axis=1)
diff = tf.subtract(self.target_ph, self.q_selected_action)
return self._value_function_loss(diff)
|
import ConfigParser, json, logging, socket, time
import pika
import pika.exceptions
from multiprocessing import Process, Queue
from Queue import Empty
from ssl import CERT_OPTIONAL
from monitor.delayqueue import *
import monitor.core
class Message(object):
"""
Data for a message to be pushed to RabbitMQ
"""
def __init__(self, message_id, data, binlog_filename=None, binlog_position=None):
"""
:param str message_id:
:param dict data: Message data to be serialized into a JSON string
:param str binlog_filename:
:param int binlog_position:
"""
self.message_id = message_id
self.data = data
self.binlog_filename = binlog_filename
self.binlog_position = binlog_position
# String version of the data, excluding timestamps, is used to test for uniqueness in delayqueue.py
data_filtered = {k: v for k, v in data.iteritems() if k not in ['timestamp', 'binlog_timestamp']}
self.unique_data_str = json.dumps(data_filtered, sort_keys=True)
class Amqp(Process):
"""
Generic queue object that should be sub-classed for specific queues
"""
def __init__(self, config, config_section, message_process_queue):
"""
:param ConfigParser.RawConfigParser config: Application configuration
:param str config_section: Configuration section which should be looked at for connection info
:param Queue message_process_queue: Inter-process queue where messages to be sent are pushed for this process to handle
"""
super(Amqp, self).__init__(name=type(self).__name__)
self._config = config # type: ConfigParser.RawConfigParser
self._config_section = config_section
self._retry_count = 0 # On message delivery failure keep track of retry attempts
self._message_process_queue = message_process_queue # type: Queue
self._last_sent_time = 0.0
self._state = monitor.core.State(config.get('monitor', 'state_path'))
self._amqp_exchange = config.get(self._config_section, 'exchange')
self._amqp_exchange_type = config.get(self._config_section, 'exchange_type')
self._amqp_routing_key = config.get(self._config_section, 'routing_key')
self._connection = None
self._channel = None
# Confirmed delivery will throw warning if there are no client queues connected to the exchange
self._confirm_queued = False
def __del__(self):
if self._connection:
self._connection.close()
def run(self):
try:
self.connect()
while True:
try:
message = self._message_process_queue.get(False) # type: Message
self._publish(message)
except Empty:
self._heartbeat()
time.sleep(0.5)
except KeyboardInterrupt:
pass
except Exception as e:
logging.error(e.message)
def connect(self):
logging.info("Connecting to %s...", self._config_section)
parameters = pika.ConnectionParameters(
host=self._config.get(self._config_section, 'host'),
port=self._config.getint(self._config_section, 'port'),
ssl=self._config.getboolean(self._config_section, 'ssl'),
ssl_options={
'ca_certs': self._config.get(self._config_section, 'ca_certs'),
'cert_reqs': CERT_OPTIONAL
},
virtual_host=self._config.get(self._config_section, 'vhost'),
credentials=pika.PlainCredentials(
self._config.get(self._config_section, 'user'), self._config.get(self._config_section, 'password')
),
connection_attempts=5,
retry_delay=5
)
self._connection = pika.BlockingConnection(parameters)
channel_number = 1
self._channel = self._connection.channel(channel_number)
# self._channel.confirm_delivery()
self._setup_channel()
def _setup_channel(self):
logging.info("Configuring AMQP exchange...")
self._channel.exchange_declare(
exchange=self._amqp_exchange,
exchange_type=self._amqp_exchange_type,
passive=False,
durable=True,
auto_delete=False
)
def _publish(self, message):
"""
Write a message to the connected RabbitMQ exchange
:param Message message:
"""
data = message.data
message_id = message.message_id
# Append UNIX timestamp to every message
timestamp = int(round(time.time()))
data['timestamp'] = timestamp
# Set last sent time now to avoid stacking up heartbeat messages if connection is closed
self._last_sent_time = time.time()
try:
published = self._channel.basic_publish(
self._amqp_exchange,
self._amqp_routing_key,
json.dumps(data),
pika.BasicProperties(
content_type="application/json",
delivery_mode=2,
message_id=message_id
),
mandatory=self._confirm_queued
)
# Confirm delivery or retry
if published:
self._retry_count = 0
# Save state often, but not for every message.
# In production we may process hundreds per second.
if message.binlog_filename and timestamp % 2 == 0:
self._state.binlog_filename = message.binlog_filename
self._state.binlog_position = message.binlog_position
self._state.save()
else:
logging.warning("Message publish to queue could not be confirmed.")
raise EnqueueException("Message publish to queue could not be confirmed.")
except (EnqueueException, pika.exceptions.AMQPChannelError, pika.exceptions.AMQPConnectionError,
pika.exceptions.ChannelClosed, pika.exceptions.ConnectionClosed, pika.exceptions.UnexpectedFrameError,
pika.exceptions.UnroutableError, socket.timeout) as e:
self._retry_count += 1
if self._retry_count < 5:
logging.warning(
"Reconnecting to %s and sending message again (Attempt # %d)",
self._config_section, self._retry_count
)
if self._connection.is_open:
try:
self._connection.close()
except:
pass
self.connect()
self._publish(message)
else:
raise e
def _heartbeat(self):
"""
Send a heartbeat message through RabbitMQ if we've been inactive for a time.
This is necessary because our connections to Rabbit time out when quiet for too long. This may be fixed in the
latest pre-release updates to the pika library. The proper solution is for pika to internally use the
heartbeat feature of RabbitMQ. This method is a workaround, although it also lets our clients on the other
side of queues see that we're up and running.
See https://github.com/pika/pika/issues/418
See https://stackoverflow.com/questions/14572020/handling-long-running-tasks-in-pika-rabbitmq
"""
if time.time() - self._last_sent_time > 30:
self._publish(Message('hb-' + str(time.time()), {"type": "heartbeat"}))
class BufferedAmqp(Amqp):
"""
Amqp database updates as they are analyzed by an instance of Processor
into a fanout queue for subscribers.
Includes a configurable time delay buffer. This is useful to allow time for a slave DB to write updates
before a worker processes a queue message.
"""
def __init__(self, config, message_process_queue):
"""
:param ConfigParser.RawConfigParser config: Application configuration
:param Queue message_process_queue: Queue where messages to be sent are pushed for this process to handle
"""
super(BufferedAmqp, self).__init__(config, 'amqp', message_process_queue)
self.buffer = None # type: MessageDelayQueue
if config.get('monitor', 'delay') and int(config.get('monitor', 'delay')) > 0:
self.buffer_delay = int(config.get('monitor', 'delay'))
else:
self.buffer_delay = 0
def run(self):
try:
self.connect()
except KeyboardInterrupt:
pass
except Exception as e:
logging.error(e.message)
return
if self.buffer_delay:
self.buffer = MessageDelayQueue(self.buffer_delay)
try:
# Loop indefinitely to process queues
self._publish_from_queues()
except KeyboardInterrupt:
# Flush whatever is left in queues
self._flush_queues()
def _publish_from_queues(self):
"""
Loop indefinitely on the process queues and publish messages
"""
while True:
message_q_empty = False
buffer_empty = False
# First pick off the in-bound inter-process message queue
try:
message = self._message_process_queue.get(False) # type: Message
if self.buffer:
self.buffer.put(message)
else:
self._publish(message)
except Empty:
message_q_empty = True
# Then check the delay buffer
if self.buffer:
try:
message = self.buffer.pop() # type: Message
self._publish(message)
except (MessageDelayQueueEmpty, MessageDelayQueueNotReady):
# Nothing to do
buffer_empty = True
if message_q_empty and buffer_empty:
self._heartbeat()
time.sleep(0.5)
def _flush_queues(self):
"""
Process whatever is left in the queue
"""
if self.buffer:
while True:
try:
message = self.buffer.pop(True) # type: Message
self._publish(message)
except MessageDelayQueueEmpty:
break
while True:
try:
message = self._message_process_queue.get(False)
self._publish(message)
except Empty:
break
class EnqueueException(Exception):
pass
|
#! /usr/local/bin/python
"""
See LICENSE file for copyright and license details.
"""
from modules.constant import *
from sqlalchemy import Column, Integer, String, Numeric
from meta import Base
class V_FINANCE(Base):
""" V_FINANCE """
__tablename__ = View.FINANCE
__table_args__ = {'autoload':True}
finance_id = Column('finance_id', Integer, primary_key=True)
class V_COMMODITY(Base):
""" V_COMMODITY """
__tablename__ = View.COMMODITY
__table_args__ = {'autoload':True}
commodity_id = Column('commodity_id', Integer, primary_key=True)
class V_MARKET(Base):
""" V_MARKET """
__tablename__ = View.MARKET
__table_args__ = {'autoload':True}
market_id = Column('market_id', Integer, primary_key=True)
class V_ACCOUNT(Base):
""" V_ACCOUNT """
__tablename__ = View.ACCOUNT
__table_args__ = {'autoload':True}
account_id = Column('account_id', Integer, primary_key=True)
class V_CURRENCY(Base):
""" V_CURRENCY """
__tablename__ = View.CURRENCY
__table_args__ = {'autoload':True}
currency_id = Column('currency_id', Integer, primary_key=True)
class V_CURRENCY_EXCHANGE(Base):
""" V_CURRENCY_EXCHANGE """
__tablename__ = View.CURRENCY_EXCHANGE
__table_args__ = {'autoload':True}
currency_exchange_id = Column('currency_exchange_id', Integer, primary_key=True)
class V_TRADE(Base):
""" V_TRADE """
__tablename__ = View.TRADE
__table_args__ = {'autoload':True}
trade_id = Column('trade_id', Integer, primary_key=True)
class V_RATE(Base):
""" V_RATE """
__tablename__ = View.RATE
__table_args__ = {'autoload':True}
rate_id = Column('rate_id', Integer, primary_key=True)
class V_DRAWDOWN(Base):
""" V_DRAWDOWN """
__tablename__ = View.DRAWDOWN
__table_args__ = {'autoload':True}
drawdown_id = Column('drawdown_id', Integer, primary_key=True)
class V_PARAMETER(Base):
""" V_PARAMETER """
__tablename__ = View.PARAMETER
__table_args__ = {'autoload':True}
parameter_id = Column('parameter_id', Integer, primary_key=True)
class V_REP_CHECK_TOTAL(Base):
""" V_REP_CHECK_TOTAL """
__tablename__ = View.REP_CHECK_TOTAL
__table_args__ = {'autoload':True}
account_name = Column('account_name', String(6) , primary_key=True)
class V_POOL(Base):
""" V_POOL """
__tablename__ = View.POOL
__table_args__ = {'autoload':True}
pool_id = Column('pool_id', Integer, primary_key=True)
class V_ACCOUNT_NAME(Base):
""" V_ACCOUNT_NAME """
__tablename__ = View.ACCOUNT_NAME
__table_args__ = {'autoload':True}
pool_id = Column('account_id', Integer, primary_key=True)
class V_EXPECTANCY(Base):
""" V_EXPECTANCY """
__tablename__ = View.EXPECTANCY
__table_args__ = {'autoload':True}
expectancy = Column('expectancy', Numeric, primary_key=True)
class V_COMMODITY_INFO(Base):
""" V_COMMODITY_INFO """
__tablename__ = View.COMMODITY_INFO
__table_args__ = {'autoload':True}
commodity_id = Column('commodity_id', Numeric, primary_key=True)
|
# -*- coding: utf-8 -*-
"""
Copyright (c) 2017 beyond-blockchain.org.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import random
import os
import sys
sys.path.extend(["../../", os.path.abspath(os.path.dirname(__file__))])
from bbc1.core.message_key_types import to_2byte, PayloadType, KeyType, InfraMessageCategory
import bbclib
from bbc1.core import query_management, message_key_types, logger
ticker = query_management.get_ticker()
def direct_send_to_user(sock, msg, name=None):
if name is None:
sock.sendall(message_key_types.make_message(PayloadType.Type_msgpack, msg))
else:
sock.sendall(message_key_types.make_message(PayloadType.Type_encrypted_msgpack, msg, key_name=name))
class UserMessageRouting:
"""Handle message for clients"""
REFRESH_FORWARDING_LIST_INTERVAL = 300
RESOLVE_TIMEOUT = 5
MAX_CROSS_REF_STOCK = 10
RESOLVE_USER_LOCATION = to_2byte(0)
RESPONSE_USER_LOCATION = to_2byte(1)
RESPONSE_NO_SUCH_USER = to_2byte(2)
JOIN_MULTICAST_RECEIVER = to_2byte(3)
LEAVE_MULTICAST_RECEIVER = to_2byte(4)
CROSS_REF_ASSIGNMENT = to_2byte(5)
def __init__(self, networking, domain_id, loglevel="all", logname=None):
self.networking = networking
self.stats = networking.core.stats
self.domain_id = domain_id
self.logger = logger.get_logger(key="user_message_routing", level=loglevel, logname=logname)
self.aes_name_list = dict()
self.cross_ref_list = list()
self.registered_users = dict()
self.forwarding_entries = dict()
self.on_going_timers = set()
def stop_all_timers(self):
"""Cancel all running timers"""
for user_id in self.forwarding_entries.keys():
if self.forwarding_entries[user_id]['refresh'] is not None:
self.forwarding_entries[user_id]['refresh'].deactivate()
for q in self.on_going_timers:
ticker.get_entry(q).deactivate()
def set_aes_name(self, socket, name):
"""Set name for specifying AES key for message encryption
Args:
socket (Socket): socket for the client
name (bytes): name of the client (4-byte random value generated in message_key_types.get_ECDH_parameters)
"""
self.aes_name_list[socket] = name
def register_user(self, user_id, socket, on_multiple_nodes=False):
"""Register user to forward message
Args:
user_id (bytes): user_id of the client
socket (Socket): socket for the client
on_multiple_nodes (bool): If True, the user_id is also registered in other nodes, meaning multicasting.
"""
self.registered_users.setdefault(user_id, set())
self.registered_users[user_id].add(socket)
if on_multiple_nodes:
self.send_multicast_join(user_id)
def unregister_user(self, user_id, socket):
"""Unregister user from the list and delete AES key if exists
Args:
user_id (bytes): user_id of the client
socket (Socket): socket for the client
"""
if user_id not in self.registered_users:
return
self.registered_users[user_id].remove(socket)
if len(self.registered_users[user_id]) == 0:
self.registered_users.pop(user_id, None)
if socket in self.aes_name_list:
message_key_types.unset_cipher(self.aes_name_list[socket])
del self.aes_name_list[socket]
self.send_multicast_leave(user_id=user_id)
def _add_user_for_forwarding(self, user_id, node_id, permanent=False):
"""Register user to forwarding list
Args:
user_id (bytes): target user_id
node_id (bytes): node_id which the client with the user_id connects to
parmanent (bool): If True, the entry won't expire
"""
self.forwarding_entries.setdefault(user_id, dict())
if not permanent:
if 'refresh' not in self.forwarding_entries[user_id]:
query_entry = query_management.QueryEntry(expire_after=UserMessageRouting.REFRESH_FORWARDING_LIST_INTERVAL,
callback_expire=self._remove_user_from_forwarding,
data={
KeyType.user_id: user_id,
}, retry_count=0)
self.forwarding_entries[user_id]['refresh'] = query_entry
else:
self.forwarding_entries[user_id]['refresh'].update(fire_after=UserMessageRouting.REFRESH_FORWARDING_LIST_INTERVAL)
self.forwarding_entries[user_id].setdefault('nodes', set())
self.forwarding_entries[user_id]['nodes'].add(node_id)
self.stats.update_stats("user_message", "registered_users_in_forwarding_list", len(self.forwarding_entries))
def _remove_user_from_forwarding(self, query_entry=None, user_id=None, node_id=None):
"""Unregister user to forwarding list"""
if query_entry is not None:
user_id = query_entry.data[KeyType.user_id]
self.forwarding_entries.pop(user_id, None)
return
if user_id not in self.forwarding_entries:
return
self.forwarding_entries[user_id]['nodes'].remove(node_id)
if len(self.forwarding_entries[user_id]['nodes']) == 0:
if 'refresh' in self.forwarding_entries[user_id]:
self.forwarding_entries[user_id]['refresh'].deactivate()
self.forwarding_entries.pop(user_id, None)
self.stats.update_stats("user_message", "registered_users_in_forwarding_list", len(self.forwarding_entries))
def send_message_to_user(self, msg, direct_only=False):
"""Forward message to connecting user
Args:
msg (dict): message to send
direct_only (bool): If True, _forward_message_to_another_node is not called.
"""
if KeyType.destination_user_id not in msg:
return True
msg[KeyType.infra_msg_type] = InfraMessageCategory.CATEGORY_USER
if msg.get(KeyType.is_anycast, False):
return self._send_anycast_message(msg)
socks = self.registered_users.get(msg[KeyType.destination_user_id], None)
if socks is None:
if direct_only:
return False
self._forward_message_to_another_node(msg)
return True
count = len(socks)
for s in socks:
if not self._send(s, msg):
count -= 1
return count > 0
def _send(self, sock, msg):
"""Raw function to send a message"""
try:
if sock in self.aes_name_list:
direct_send_to_user(sock, msg, name=self.aes_name_list[sock])
else:
direct_send_to_user(sock, msg)
self.stats.update_stats_increment("user_message", "sent_msg_to_user", 1)
except:
return False
return True
def _send_anycast_message(self, msg):
"""Send message as anycast"""
dst_user_id = msg[KeyType.destination_user_id]
if dst_user_id not in self.forwarding_entries:
return False
ttl = msg.get(KeyType.anycast_ttl, 0)
if ttl == 0:
return False
randmax = len(self.forwarding_entries[dst_user_id]['nodes'])
if dst_user_id in self.registered_users:
randmax += 1
while ttl > 0:
idx = random.randrange(randmax)
msg[KeyType.anycast_ttl] = ttl - 1
ttl -= 1
if idx == randmax - 1:
if len(self.registered_users) > 0:
sock = random.choice(tuple(self.registered_users.get(dst_user_id, None)))
if sock is not None and self._send(sock, msg):
return True
else:
try:
msg[KeyType.destination_node_id] = random.choice(tuple(self.forwarding_entries[dst_user_id]['nodes']))
self.networking.send_message_in_network(nodeinfo=None, payload_type=PayloadType.Type_any,
domain_id=self.domain_id, msg=msg)
except:
import traceback
traceback.print_exc()
continue
return True
return False
def _forward_message_to_another_node(self, msg):
"""Try to forward message to another node"""
dst_user_id = msg[KeyType.destination_user_id]
if dst_user_id in self.forwarding_entries:
for dst_node_id in self.forwarding_entries[dst_user_id]['nodes']:
msg[KeyType.destination_node_id] = dst_node_id
try:
self.networking.send_message_in_network(nodeinfo=None, payload_type=PayloadType.Type_any,
domain_id=self.domain_id, msg=msg)
except:
import traceback
traceback.print_exc()
pass
return
src_user_id = msg[KeyType.source_user_id]
self._resolve_accommodating_core_node(dst_user_id, src_user_id, msg)
def _resolve_accommodating_core_node(self, dst_user_id, src_user_id, orig_msg=None):
"""Resolve which node the user connects to
Find the node that accommodates the user_id first, and then, send the message to the node.
Args:
dst_user_id (bytes): destination user_id
src_user_id (bytes): source user_id
orig_msg (dict): message to send
"""
if orig_msg is not None:
query_entry = query_management.QueryEntry(expire_after=UserMessageRouting.RESOLVE_TIMEOUT,
callback_expire=self._resolve_failure,
callback=self._resolve_success,
data={
KeyType.message: orig_msg,
},
retry_count=0)
self.on_going_timers.add(query_entry.nonce)
msg = {
KeyType.infra_msg_type: InfraMessageCategory.CATEGORY_USER,
KeyType.domain_id: self.domain_id,
KeyType.infra_command: UserMessageRouting.RESOLVE_USER_LOCATION,
KeyType.destination_user_id: dst_user_id,
}
if orig_msg is not None:
msg[KeyType.nonce] = query_entry.nonce
if src_user_id is not None:
msg[KeyType.source_user_id] = src_user_id
self.networking.broadcast_message_in_network(domain_id=self.domain_id, msg=msg)
def _resolve_success(self, query_entry):
"""Callback for successful of resolving the location"""
self.on_going_timers.remove(query_entry.nonce)
msg = query_entry.data[KeyType.message]
self._forward_message_to_another_node(msg=msg)
def _resolve_failure(self, query_entry):
"""Callback for failure of resolving the location"""
self.on_going_timers.remove(query_entry.nonce)
msg = query_entry.data[KeyType.message]
msg[KeyType.destination_user_id] = msg[KeyType.source_user_id]
msg[KeyType.result] = False
msg[KeyType.reason] = "Cannot find such user"
self.send_message_to_user(msg)
def send_multicast_join(self, user_id, permanent=False):
"""Broadcast JOIN_MULTICAST_RECEIVER"""
msg = {
KeyType.infra_msg_type: InfraMessageCategory.CATEGORY_USER,
KeyType.domain_id: self.domain_id,
KeyType.infra_command: UserMessageRouting.JOIN_MULTICAST_RECEIVER,
KeyType.user_id: user_id,
KeyType.static_entry: permanent,
}
self.stats.update_stats_increment("multicast", "join", 1)
self.networking.broadcast_message_in_network(domain_id=self.domain_id, msg=msg)
def send_multicast_leave(self, user_id):
"""Broadcast LEAVE_MULTICAST_RECEIVER"""
msg = {
KeyType.domain_id: self.domain_id,
KeyType.infra_msg_type: InfraMessageCategory.CATEGORY_USER,
KeyType.infra_command: UserMessageRouting.LEAVE_MULTICAST_RECEIVER,
KeyType.user_id: user_id,
}
self.stats.update_stats_increment("multicast", "leave", 1)
self.networking.broadcast_message_in_network(domain_id=self.domain_id, msg=msg)
def _distribute_cross_refs_to_clients(self):
"""Distribute cross ref assined by the domain0_manager to client"""
if len(self.registered_users) == 0:
return
try:
for i in range(len(self.cross_ref_list)):
msg = {
KeyType.domain_id: self.domain_id,
KeyType.command: bbclib.MsgType.NOTIFY_CROSS_REF,
KeyType.destination_user_id: random.choice(tuple(self.registered_users.keys())),
KeyType.cross_ref: self.cross_ref_list.pop(0),
}
self.send_message_to_user(msg)
except:
import traceback
traceback.print_exc()
return
def process_message(self, msg):
"""Process received message
Args:
msg (dict): received message
"""
if KeyType.infra_command in msg:
if msg[KeyType.infra_command] == UserMessageRouting.RESOLVE_USER_LOCATION:
self.stats.update_stats_increment("user_message", "RESOLVE_USER_LOCATION", 1)
user_id = msg[KeyType.destination_user_id]
if user_id not in self.registered_users:
return
self._add_user_for_forwarding(msg[KeyType.source_user_id], msg[KeyType.source_node_id])
msg[KeyType.destination_node_id] = msg[KeyType.source_node_id]
if KeyType.source_user_id in msg:
msg[KeyType.destination_user_id] = msg[KeyType.source_user_id]
msg[KeyType.source_user_id] = user_id
msg[KeyType.infra_command] = UserMessageRouting.RESPONSE_USER_LOCATION
self.networking.send_message_in_network(nodeinfo=None, payload_type=PayloadType.Type_any,
domain_id=self.domain_id, msg=msg)
elif msg[KeyType.infra_command] == UserMessageRouting.RESPONSE_USER_LOCATION:
self.stats.update_stats_increment("user_message", "RESPONSE_USER_LOCATION", 1)
self._add_user_for_forwarding(msg[KeyType.source_user_id], msg[KeyType.source_node_id])
if KeyType.nonce in msg:
query_entry = ticker.get_entry(msg[KeyType.nonce])
if query_entry is not None and query_entry.active:
query_entry.callback()
elif msg[KeyType.infra_command] == UserMessageRouting.RESPONSE_NO_SUCH_USER:
self.stats.update_stats_increment("user_message", "RESPONSE_NO_SUCH_USER", 1)
self._remove_user_from_forwarding(user_id=msg[KeyType.user_id], node_id=msg[KeyType.source_node_id])
elif msg[KeyType.infra_command] == UserMessageRouting.JOIN_MULTICAST_RECEIVER:
self.stats.update_stats_increment("user_message", "JOIN_MULTICAST_RECEIVER", 1)
self._add_user_for_forwarding(msg[KeyType.user_id], msg[KeyType.source_node_id],
permanent=msg.get(KeyType.static_entry, False))
elif msg[KeyType.infra_command] == UserMessageRouting.LEAVE_MULTICAST_RECEIVER:
self.stats.update_stats_increment("user_message", "LEAVE_MULTICAST_RECEIVER", 1)
if msg[KeyType.user_id] in self.forwarding_entries:
self._remove_user_from_forwarding(user_id=msg[KeyType.user_id],
node_id=msg[KeyType.source_node_id])
elif msg[KeyType.infra_command] == UserMessageRouting.CROSS_REF_ASSIGNMENT:
self.stats.update_stats_increment("user_message", "CROSS_REF_ASSIGNMENT", 1)
if KeyType.cross_ref in msg:
self.cross_ref_list.append(msg[KeyType.cross_ref])
if len(self.cross_ref_list) > UserMessageRouting.MAX_CROSS_REF_STOCK:
self._distribute_cross_refs_to_clients()
return
src_user_id = msg[KeyType.source_user_id]
if src_user_id in self.forwarding_entries:
self.forwarding_entries[src_user_id]['refresh'].update(
fire_after=UserMessageRouting.REFRESH_FORWARDING_LIST_INTERVAL)
dst_user_id = msg[KeyType.destination_user_id]
if dst_user_id not in self.registered_users:
if msg.get(KeyType.is_anycast, False):
self._send_anycast_message(msg)
return
retmsg = {
KeyType.domain_id: self.domain_id,
KeyType.infra_msg_type: InfraMessageCategory.CATEGORY_USER,
KeyType.destination_node_id: msg[KeyType.source_node_id],
KeyType.infra_command: UserMessageRouting.RESPONSE_NO_SUCH_USER,
KeyType.user_id: dst_user_id,
}
self.stats.update_stats_increment("user_message", "fail_to_find_user", 1)
self.networking.send_message_in_network(nodeinfo=None, payload_type=PayloadType.Type_any,
domain_id=self.domain_id, msg=retmsg)
return
if KeyType.is_anycast in msg:
del msg[KeyType.is_anycast]
self.stats.update_stats_increment("user_message", "send_to_user", 1)
self.send_message_to_user(msg)
class UserMessageRoutingDummy(UserMessageRouting):
"""Dummy class for bbc_core.py"""
def stop_all_timers(self):
pass
def register_user(self, user_id, socket, on_multiple_nodes=False):
pass
def unregister_user(self, user_id, socket=None):
pass
def _add_user_for_forwarding(self, user_id, node_id, permanent=False):
pass
def _remove_user_from_forwarding(self, query_entry=None, user_id=None, node_id=None):
pass
def send_message_to_user(self, msg, direct_only=False):
pass
def _forward_message_to_another_node(self, msg):
pass
def _resolve_accommodating_core_node(self, dst_user_id, src_user_id, orig_msg=None):
pass
def _resolve_success(self, query_entry):
pass
def _resolve_failure(self, query_entry):
pass
def send_multicast_join(self, user_id, permanent=False):
pass
def process_message(self, msg):
pass
|
import os
import pytest
from parso.utils import PythonVersionInfo
from jedi.inference.gradual import typeshed, stub_value
from jedi.inference.value import TreeInstance, BoundMethod, FunctionValue, \
MethodValue, ClassValue
TYPESHED_PYTHON3 = os.path.join(typeshed.TYPESHED_PATH, 'stdlib', '3')
def test_get_typeshed_directories():
def get_dirs(version_info):
return {
d.replace(typeshed.TYPESHED_PATH, '').lstrip(os.path.sep)
for d in typeshed._get_typeshed_directories(version_info)
}
def transform(set_):
return {x.replace('/', os.path.sep) for x in set_}
dirs = get_dirs(PythonVersionInfo(2, 7))
assert dirs == transform({'stdlib/2and3', 'stdlib/2', 'third_party/2and3', 'third_party/2'})
dirs = get_dirs(PythonVersionInfo(3, 4))
assert dirs == transform({'stdlib/2and3', 'stdlib/3', 'third_party/2and3', 'third_party/3'})
dirs = get_dirs(PythonVersionInfo(3, 5))
assert dirs == transform({'stdlib/2and3', 'stdlib/3', 'stdlib/3.5',
'third_party/2and3', 'third_party/3', 'third_party/3.5'})
dirs = get_dirs(PythonVersionInfo(3, 6))
assert dirs == transform({'stdlib/2and3', 'stdlib/3', 'stdlib/3.5',
'stdlib/3.6', 'third_party/2and3',
'third_party/3', 'third_party/3.5', 'third_party/3.6'})
def test_get_stub_files():
def get_map(version_info):
return typeshed._create_stub_map(version_info)
map_ = typeshed._create_stub_map(TYPESHED_PYTHON3)
assert map_['functools'] == os.path.join(TYPESHED_PYTHON3, 'functools.pyi')
def test_function(Script, environment):
code = 'import threading; threading.current_thread'
def_, = Script(code).goto_definitions()
value = def_._name._value
assert isinstance(value, FunctionValue), value
def_, = Script(code + '()').goto_definitions()
value = def_._name._value
assert isinstance(value, TreeInstance)
def_, = Script('import threading; threading.Thread').goto_definitions()
assert isinstance(def_._name._value, ClassValue), def_
def test_keywords_variable(Script):
code = 'import keyword; keyword.kwlist'
for seq in Script(code).goto_definitions():
assert seq.name == 'Sequence'
# This points towards the typeshed implementation
stub_seq, = seq.goto_assignments(only_stubs=True)
assert typeshed.TYPESHED_PATH in stub_seq.module_path
def test_class(Script):
def_, = Script('import threading; threading.Thread').goto_definitions()
value = def_._name._value
assert isinstance(value, ClassValue), value
def test_instance(Script):
def_, = Script('import threading; threading.Thread()').goto_definitions()
value = def_._name._value
assert isinstance(value, TreeInstance)
def test_class_function(Script):
def_, = Script('import threading; threading.Thread.getName').goto_definitions()
value = def_._name._value
assert isinstance(value, MethodValue), value
def test_method(Script):
code = 'import threading; threading.Thread().getName'
def_, = Script(code).goto_definitions()
value = def_._name._value
assert isinstance(value, BoundMethod), value
assert isinstance(value._wrapped_value, MethodValue), value
def_, = Script(code + '()').goto_definitions()
value = def_._name._value
assert isinstance(value, TreeInstance)
assert value.class_value.py__name__() == 'str'
def test_sys_exc_info(Script):
code = 'import sys; sys.exc_info()'
none, def_ = Script(code + '[1]').goto_definitions()
# It's an optional.
assert def_.name == 'BaseException'
assert def_.type == 'instance'
assert none.name == 'NoneType'
none, def_ = Script(code + '[0]').goto_definitions()
assert def_.name == 'BaseException'
assert def_.type == 'class'
def test_sys_getwindowsversion(Script, environment):
# This should only exist on Windows, but type inference should happen
# everywhere.
definitions = Script('import sys; sys.getwindowsversion().major').goto_definitions()
if environment.version_info.major == 2:
assert not definitions
else:
def_, = definitions
assert def_.name == 'int'
def test_sys_hexversion(Script):
script = Script('import sys; sys.hexversion')
def_, = script.completions()
assert isinstance(def_._name, stub_value._StubName), def_._name
assert typeshed.TYPESHED_PATH in def_.module_path
def_, = script.goto_definitions()
assert def_.name == 'int'
def test_math(Script):
def_, = Script('import math; math.acos()').goto_definitions()
assert def_.name == 'float'
value = def_._name._value
assert value
def test_type_var(Script):
def_, = Script('import typing; T = typing.TypeVar("T1")').goto_definitions()
assert def_.name == 'TypeVar'
assert def_.description == 'TypeVar = object()'
@pytest.mark.parametrize(
'code, full_name', (
('import math', 'math'),
('from math import cos', 'math.cos')
)
)
def test_math_is_stub(Script, code, full_name):
s = Script(code)
cos, = s.goto_definitions()
wanted = os.path.join('typeshed', 'stdlib', '2and3', 'math.pyi')
assert cos.module_path.endswith(wanted)
assert cos.is_stub() is True
assert cos.goto_assignments(only_stubs=True) == [cos]
assert cos.full_name == full_name
cos, = s.goto_assignments()
assert cos.module_path.endswith(wanted)
assert cos.goto_assignments(only_stubs=True) == [cos]
assert cos.is_stub() is True
assert cos.full_name == full_name
def test_goto_stubs(Script):
s = Script('import os; os')
os_module, = s.goto_definitions()
assert os_module.full_name == 'os'
assert os_module.is_stub() is False
stub, = os_module.goto_assignments(only_stubs=True)
assert stub.is_stub() is True
os_module, = s.goto_assignments()
def _assert_is_same(d1, d2):
assert d1.name == d2.name
assert d1.module_path == d2.module_path
assert d1.line == d2.line
assert d1.column == d2.column
@pytest.mark.parametrize('type_', ['goto', 'infer'])
@pytest.mark.parametrize(
'code', [
'import os; os.walk',
'from collections import Counter; Counter',
'from collections import Counter; Counter()',
'from collections import Counter; Counter.most_common',
])
def test_goto_stubs_on_itself(Script, code, type_):
"""
If goto_stubs is used on an identifier in e.g. the stdlib, we should goto
the stub of it.
"""
s = Script(code)
if type_ == 'infer':
def_, = s.goto_definitions()
else:
def_, = s.goto_assignments(follow_imports=True)
stub, = def_.goto_assignments(only_stubs=True)
script_on_source = Script(
path=def_.module_path,
line=def_.line,
column=def_.column
)
if type_ == 'infer':
definition, = script_on_source.goto_definitions()
else:
definition, = script_on_source.goto_assignments()
same_stub, = definition.goto_assignments(only_stubs=True)
_assert_is_same(same_stub, stub)
_assert_is_same(definition, def_)
assert same_stub.module_path != def_.module_path
# And the reverse.
script_on_stub = Script(
path=same_stub.module_path,
line=same_stub.line,
column=same_stub.column
)
if type_ == 'infer':
same_definition, = script_on_stub.goto_definitions()
same_definition2, = same_stub.infer()
else:
same_definition, = script_on_stub.goto_assignments()
same_definition2, = same_stub.goto_assignments()
_assert_is_same(same_definition, definition)
_assert_is_same(same_definition, same_definition2)
|
import re
import unidecode
from src import *
def clean_url(url_str):
"""
Cleans a given URL.
:param url_str: String formatted URL.
:return: Cleaned string formatted URL.
"""
url_str = url_str.lower()
url_str = url_str.strip()
return url_str
def clean_name(name_str):
"""
Cleans a given name (song or artist).
:param name_str: String formatted song.
:return: Cleaned string formatted song.
"""
name_str = name_str.lower()
name_str = name_str.strip()
name_str = unidecode.unidecode(name_str)
return name_str
def clean_lyrics(lyrics_str):
"""
Cleans a given string where song lyrics are.
:param lyrics_str: String formatted lyrics.
:return: Cleaned string formatted lyrics.
"""
lyrics_str = lyrics_str.lower()
lyrics_str = lyrics_str.strip()
lyrics_str = unidecode.unidecode(lyrics_str)
lyrics_str = re.sub('[(\[].*?[)\]]', '', lyrics_str)
for _ in range(0, STR_CLEAN_TIMES):
for to_be_replaced, to_replace in STR_CLEAN_DICT.items():
lyrics_str = lyrics_str.replace(to_be_replaced, to_replace)
lyrics_str = lyrics_str.strip()
return lyrics_str
|
import pytest
from aioresponses import aioresponses
from demo_project.api.dependencies import azure_scheme
from demo_project.core.config import settings
from demo_project.main import app
from pytest_cases import parametrize_with_cases
from tests.utils import build_openid_keys, keys_url, openid_config_url, openid_configuration
from fastapi_azure_auth import SingleTenantAzureAuthorizationCodeBearer
@pytest.mark.parametrize('version', [1, 2])
def token_version(version):
"""
This will make your test _run_ multiple times, with given parameter.
"""
return version
@pytest.fixture
@parametrize_with_cases('token_version', cases=token_version)
def single_tenant_app(token_version):
"""
Single tenant app fixture, which also inherits token_version. Every single tenant test is run twice,
either with v1 or v2 tokens
"""
if token_version == 1:
azure_scheme_overrides = SingleTenantAzureAuthorizationCodeBearer(
app_client_id=settings.APP_CLIENT_ID,
scopes={
f'api://{settings.APP_CLIENT_ID}/user_impersonation': 'User impersonation',
},
tenant_id=settings.TENANT_ID,
token_version=1,
)
app.dependency_overrides[azure_scheme] = azure_scheme_overrides
yield azure_scheme
elif token_version == 2:
azure_scheme_overrides = SingleTenantAzureAuthorizationCodeBearer(
app_client_id=settings.APP_CLIENT_ID,
scopes={
f'api://{settings.APP_CLIENT_ID}/user_impersonation': 'User impersonation',
},
tenant_id=settings.TENANT_ID,
token_version=2,
)
app.dependency_overrides[azure_scheme] = azure_scheme_overrides
yield azure_scheme
@pytest.fixture
@parametrize_with_cases('token_version', cases=token_version)
def mock_openid_v1_v2(token_version):
with aioresponses() as mock:
mock.get(
openid_config_url(version=token_version),
payload=openid_configuration(version=token_version),
)
yield mock
@pytest.fixture
@parametrize_with_cases('token_version', cases=token_version)
def mock_openid_and_keys_v1_v2(mock_openid_v1_v2, token_version):
mock_openid_v1_v2.get(
keys_url(version=token_version),
payload=build_openid_keys(),
)
yield mock_openid_v1_v2
@pytest.fixture
@parametrize_with_cases('token_version', cases=token_version)
def mock_openid_and_empty_keys_v1_v2(mock_openid_v1_v2, token_version):
mock_openid_v1_v2.get(
keys_url(version=token_version),
payload=build_openid_keys(empty_keys=True),
)
yield mock_openid_v1_v2
@pytest.fixture
@parametrize_with_cases('token_version', cases=token_version)
def mock_openid_ok_then_empty_v1_v2(mock_openid_v1_v2, token_version):
mock_openid_v1_v2.get(
keys_url(version=token_version),
payload=build_openid_keys(),
)
mock_openid_v1_v2.get(
keys_url(version=token_version),
payload=build_openid_keys(empty_keys=True),
)
mock_openid_v1_v2.get(
openid_config_url(version=token_version),
payload=openid_configuration(version=token_version),
)
mock_openid_v1_v2.get(
openid_config_url(version=token_version),
payload=openid_configuration(version=token_version),
)
yield mock_openid_v1_v2
@pytest.fixture
@parametrize_with_cases('token_version', cases=token_version)
def mock_openid_and_no_valid_keys_v1_v2(mock_openid_v1_v2, token_version):
mock_openid_v1_v2.get(
keys_url(version=token_version),
payload=build_openid_keys(no_valid_keys=True),
)
yield mock_openid_v1_v2
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Credits for this code goes to https://github.com/dcos/dcos-cli. Please
# note, it might have been slightly edited.
import requests
from requests.auth import AuthBase
from six.moves.urllib.parse import urlparse
from mesos import util
from mesos.errors import (MesosAuthenticationException,
MesosAuthorizationException, MesosBadRequest,
MesosConnectionError, MesosException,
MesosHTTPException, MesosUnprocessableException)
logger = util.get_logger(__name__)
DEFAULT_TIMEOUT = 5
def _default_is_success(status_code):
"""Returns true if the success status is between [200, 300).
:param response_status: the http response status
:type response_status: int
:returns: True for success status; False otherwise
:rtype: bool
"""
return 200 <= status_code < 300
@util.duration
def _request(method,
url,
is_success=_default_is_success,
timeout=DEFAULT_TIMEOUT,
auth=None,
verify=None,
toml_config=None,
**kwargs):
"""Sends an HTTP request.
:param method: method for the new Request object
:type method: str
:param url: URL for the new Request object
:type url: str
:param is_success: Defines successful status codes for the request
:type is_success: Function from int to bool
:param timeout: request timeout
:type timeout: int
:param auth: authentication
:type auth: AuthBase
:param verify: whether to verify SSL certs or path to cert(s)
:type verify: bool | str
:param toml_config: cluster config to use
:type toml_config: Toml
:param kwargs: Additional arguments to requests.request
(see http://docs.python-requests.org/en/latest/api/#requests.request)
:type kwargs: dict
:rtype: Response
"""
if 'headers' not in kwargs:
kwargs['headers'] = {'Accept': 'application/json'}
verify = False
# Silence 'Unverified HTTPS request' and 'SecurityWarning' for bad certs
if verify is not None:
silence_requests_warnings()
logger.info(
'Sending HTTP [%r] to [%r]: %r',
method,
url,
kwargs.get('headers'))
try:
response = requests.request(
method=method,
url=url,
timeout=timeout,
auth=auth,
verify=verify,
**kwargs)
except requests.exceptions.SSLError as e:
logger.exception("HTTP SSL Error")
msg = ("An SSL error occurred.")
if description is not None:
msg += "\n<value>: {}".format(description)
raise MesosException(msg)
except requests.exceptions.ConnectionError as e:
logger.exception("HTTP Connection Error")
raise MesosConnectionError(url)
except requests.exceptions.Timeout as e:
logger.exception("HTTP Timeout")
raise MesosException('Request to URL [{0}] timed out.'.format(url))
except requests.exceptions.RequestException as e:
logger.exception("HTTP Exception")
raise MesosException('HTTP Exception: {}'.format(e))
logger.info('Received HTTP response [%r]: %r',
response.status_code,
response.headers)
return response
def request(method,
url,
is_success=_default_is_success,
timeout=DEFAULT_TIMEOUT,
verify=None,
toml_config=None,
**kwargs):
"""Sends an HTTP request. If the server responds with a 401, ask the
user for their credentials, and try request again (up to 3 times).
:param method: method for the new Request object
:type method: str
:param url: URL for the new Request object
:type url: str
:param is_success: Defines successful status codes for the request
:type is_success: Function from int to bool
:param timeout: request timeout
:type timeout: int
:param verify: whether to verify SSL certs or path to cert(s)
:type verify: bool | str
:param toml_config: cluster config to use
:type toml_config: Toml
:param kwargs: Additional arguments to requests.request
(see http://docs.python-requests.org/en/latest/api/#requests.request)
:type kwargs: dict
:rtype: Response
"""
response = _request(method, url, is_success, timeout,
verify=verify, toml_config=toml_config,
**kwargs)
if is_success(response.status_code):
return response
elif response.status_code == 401:
raise MesosAuthenticationException(response)
elif response.status_code == 422:
raise MesosUnprocessableException(response)
elif response.status_code == 403:
raise MesosAuthorizationException(response)
elif response.status_code == 400:
raise MesosBadRequest(response)
else:
raise MesosHTTPException(response)
def head(url, **kwargs):
"""Sends a HEAD request.
:param url: URL for the new Request object
:type url: str
:param kwargs: Additional arguments to requests.request
(see py:func:`request`)
:type kwargs: dict
:rtype: Response
"""
return request('head', url, **kwargs)
def get(url, **kwargs):
"""Sends a GET request.
:param url: URL for the new Request object
:type url: str
:param kwargs: Additional arguments to requests.request
(see py:func:`request`)
:type kwargs: dict
:rtype: Response
"""
return request('get', url, **kwargs)
def post(url, data=None, json=None, **kwargs):
"""Sends a POST request.
:param url: URL for the new Request object
:type url: str
:param data: Request body
:type data: dict, bytes, or file-like object
:param json: JSON request body
:type data: dict
:param kwargs: Additional arguments to requests.request
(see py:func:`request`)
:type kwargs: dict
:rtype: Response
"""
return request('post', url, data=data, json=json, **kwargs)
def put(url, data=None, **kwargs):
"""Sends a PUT request.
:param url: URL for the new Request object
:type url: str
:param data: Request body
:type data: dict, bytes, or file-like object
:param kwargs: Additional arguments to requests.request
(see py:func:`request`)
:type kwargs: dict
:rtype: Response
"""
return request('put', url, data=data, **kwargs)
def patch(url, data=None, **kwargs):
"""Sends a PATCH request.
:param url: URL for the new Request object
:type url: str
:param data: Request body
:type data: dict, bytes, or file-like object
:param kwargs: Additional arguments to requests.request
(see py:func:`request`)
:type kwargs: dict
:rtype: Response
"""
return request('patch', url, data=data, **kwargs)
def delete(url, **kwargs):
"""Sends a DELETE request.
:param url: URL for the new Request object
:type url: str
:param kwargs: Additional arguments to requests.request
(see py:func:`request`)
:type kwargs: dict
:rtype: Response
"""
return request('delete', url, **kwargs)
def silence_requests_warnings():
"""Silence warnings from requests.packages.urllib3."""
requests.packages.urllib3.disable_warnings()
|
# Read entries from JSON file; create new file with entries
# in a new (randomized) order
import json
import random
# Read in the file (assumes it is in current working directory)
with open('questions.json') as f:
questionDict = json.load(f)
# There is only the 1 list as the 1 object's value
questionList = list(questionDict.values())[0]
'''
# For debugging
for q in questionList:
print(q)
'''
# Shuffle the order of the questions in the list
random.shuffle(questionList)
'''
# For debugging
for q in questionList:
print(q)
'''
# Recreate the "outer-level" dictionary, then
# write it to file in current working directory
newQuestionDict = {'questions': questionList}
with open('questionsShuffled.json', 'w') as f:
json.dump(newQuestionDict, f, indent=4)
|
import torch
import torch.nn.functional as F
def reduce_mean(tensor, dim=None, keepdim=False, out=None):
"""
Returns the mean value of each row of the input tensor in the given dimension dim.
Support multi-dim mean
:param tensor: the input tensor
:type tensor: torch.Tensor
:param dim: the dimension to reduce
:type dim: int or list[int]
:param keepdim: whether the output tensor has dim retained or not
:type keepdim: bool
:param out: the output tensor
:type out: torch.Tensor
:return: mean result
:rtype: torch.Tensor
"""
# mean all dims
if dim is None:
return torch.mean(tensor)
# prepare dim
if isinstance(dim, int):
dim = [dim]
dim = sorted(dim)
# get mean dim by dim
for d in dim:
tensor = tensor.mean(dim=d, keepdim=True)
# squeeze reduced dimensions if not keeping dim
if not keepdim:
for cnt, d in enumerate(dim):
tensor.squeeze_(d - cnt)
if out is not None:
out.copy_(tensor)
return tensor
def reduce_sum(tensor, dim=None, keepdim=False, out=None):
"""
Returns the sum of all elements in the input tensor.
Support multi-dim sum
:param tensor: the input tensor
:type tensor: torch.Tensor
:param dim: the dimension to reduce
:type dim: int or list[int]
:param keepdim: whether the output tensor has dim retained or not
:type keepdim: bool
:param out: the output tensor
:type out: torch.Tensor
:return: sum result
:rtype: torch.Tensor
"""
# summarize all dims
if dim is None:
return torch.sum(tensor)
# prepare dim
if isinstance(dim, int):
dim = [dim]
dim = sorted(dim)
# get summary dim by dim
for d in dim:
tensor = tensor.sum(dim=d, keepdim=True)
# squeeze reduced dimensions if not keeping dim
if not keepdim:
for cnt, d in enumerate(dim):
tensor.squeeze_(d - cnt)
if out is not None:
out.copy_(tensor)
return tensor
def tensor_equal(a, b, eps=1e-5):
"""
Compare two tensors
:param a: input tensor a
:type a: torch.Tensor
:param b: input tensor b
:type b: torch.Tensor
:param eps: epsilon
:type eps: float
:return: whether two tensors are equal
:rtype: bool
"""
if a.shape != b.shape:
return False
return 0 <= float(torch.max(torch.abs(a - b))) <= eps
def split_channel(tensor, split_type='simple'):
"""
Split channels of tensor
:param tensor: input tensor
:type tensor: torch.Tensor
:param split_type: type of splitting
:type split_type: str
:return: split tensor
:rtype: tuple(torch.Tensor, torch.Tensor)
"""
assert len(tensor.shape) == 4
assert split_type in ['simple', 'cross']
nc = tensor.shape[1]
if split_type == 'simple':
return tensor[:, :nc // 2, ...], tensor[:, nc // 2:, ...]
elif split_type == 'cross':
return tensor[:, 0::2, ...], tensor[:, 1::2, ...]
def cat_channel(*args):
"""
Concatenates channels of tensors
:return: concatenated tensor
:rtype: torch.Tensor
"""
return torch.cat(args, dim=1)
def cat_batch(*args):
"""
Concatenates batches of tensors
:return: concatenated tensor
:rtype: torch.Tensor
"""
return torch.cat(args, dim=0)
def count_pixels(tensor):
"""
Count number of pixels in given tensor
:param tensor: input tensor
:type tensor: torch.Tensor
:return: number of pixels
:rtype: int
"""
assert len(tensor.shape) == 4
return int(tensor.shape[2] * tensor.shape[3])
def onehot(y, num_classes):
"""
Generate one-hot vector
:param y: ground truth labels
:type y: torch.Tensor
:param num_classes: number os classes
:type num_classes: int
:return: one-hot vector generated from labels
:rtype: torch.Tensor
"""
assert len(y.shape) in [1, 2], "Label y should be 1D or 2D vector"
y_onehot = torch.zeros(y.shape[0], num_classes).to(y.device, non_blocking=True)
if len(y.shape) == 1:
y_onehot = y_onehot.scatter_(1, y.unsqueeze(-1), 1)
else:
y_onehot = y_onehot.scatter_(1, y, 1)
return y_onehot
def de_onehot(y_onehot):
"""
Convert one-hot vector back to class label
:param y_onehot: one-hot label
:type y_onehot: torch.Tensor
:return: corresponding class
:rtype: int or Torch.Tensor
"""
assert len(y_onehot.shape) in [1, 2], \
"Label y_onehot should be 1D or 2D vector"
if len(y_onehot.shape) == 1:
return torch.argmax(y_onehot)
else:
return torch.argmax(y_onehot, dim=1)
def resize_feature_map(x, out_shape, interpolate_mode='nearest'):
"""
Resize feature map into desired shape
:param x: input feature map
:type x: torch.Tensor
:param out_shape: desired tensor shape
:type out_shape: tuple(int) or list[int]
:param interpolate_mode: mode for interpolation
:type interpolate_mode: str
:return: resized feature map
:rtype: torch.Tensor
"""
in_shape = list(x.shape)
if not isinstance(out_shape, list):
out_shape = list(out_shape)
if len(out_shape) == 3 and len(in_shape) == 4:
out_shape.insert(0, in_shape[0])
assert len(in_shape) == len(out_shape) and in_shape[0] == out_shape[0], \
'Cannot resize tensor from {} to {}'.format(tuple(in_shape), tuple(out_shape))
# shrink channels
if in_shape[1] > out_shape[1]:
x = x[:, :out_shape[1]]
# shrink spatial axes.
if len(in_shape) == 4 and (in_shape[2] > out_shape[2] or in_shape[3] > out_shape[3]):
assert in_shape[2] % out_shape[2] == 0 and in_shape[3] % out_shape[3] == 0
scale_factor = (in_shape[2] // out_shape[2],
in_shape[3] // out_shape[3])
x = F.avg_pool2d(x,
kernel_size=scale_factor,
stride=scale_factor,
ceil_mode=False,
padding=0,
count_include_pad=False)
# extend spatial axes
if in_shape[2] < out_shape[2]:
assert out_shape[2] % in_shape[2] == 0 and \
out_shape[2] / in_shape[2] == out_shape[3] / in_shape[3]
scale_factor = out_shape[2] // in_shape[2]
if interpolate_mode == 'bilinear':
x = F.interpolate(x, scale_factor=scale_factor, mode='bilinear', align_corners=True)
else:
x = F.interpolate(x, scale_factor=scale_factor, mode=interpolate_mode)
# extend channels
if in_shape[1] < out_shape[1]:
z = torch.zeros([x.shape[0], out_shape[1] - in_shape[1]] + out_shape[2:]).to(x.device)
x = torch.cat([x, z], 1)
return x
def flatten(tensor):
"""
Flatten input tensor as the shape of (nb, nf)
:param tensor: input Tensor
:type tensor: torch.Tensor
:return: flattened tensor
:rtype: torch.Tensor
"""
assert len(tensor.shape) >= 2
if len(tensor.shape) > 2:
flattened = tensor.view(tensor.shape[0], -1)
else:
flattened = tensor
return flattened
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Integration test for App Engine.
Primarily tests the deploy operation and upsert load balancer pipeline stage,
which are relatively complex and not well covered by unit tests.
Sample Usage:
Before running this test, verify that the App Engine application
in your GCP project has a default service. If it does not, deploy
any App Engine version to your application that will use the default service.
Assuming you have created $PASSPHRASE_FILE (which you should chmod 400)
and $CITEST_ROOT points to the root directory of the citest library.
The passphrase file can be omited if you run ssh-agent and add
.ssh/compute_google_engine.
PYTHONPATH=$CITEST_ROOT \
python spinnaker/testing/citest/tests/appengine_smoke_test.py \
--gce_ssh_passphrase_file=$PASSPHRASE_FILE \
--gce_project=$PROJECT \
--gce_zone=$ZONE \
--gce_instance=$INSTANCE
or
PYTHONPATH=$CITEST_ROOT \
python spinnaker/testing/citest/tests/appengine_smoke_test.py \
--native_hostname=host-running-smoke-test
"""
import logging
import os
import shutil
import subprocess
import sys
import tempfile
import citest.gcp_testing as gcp
import citest.json_contract as jc
import citest.json_predicate as jp
import citest.service_testing as st
ov_factory = jc.ObservationPredicateFactory()
import spinnaker_testing as sk
import spinnaker_testing.gate as gate
import spinnaker_testing.frigga as frigga
import citest.base
class AppengineSmokeTestScenario(sk.SpinnakerTestScenario):
"""Defines the scenario for the integration test.
We're going to:
Create a Spinnaker Application
Create a Spinnaker Server Group (implicitly creates a Load Balancer)
Create a Pipeline with the following stages
- Deploy
- Upsert Load Balancer
Delete Load Balancer (implicitly destroys the Server Groups
created within this test)
Delete Application
"""
@classmethod
def new_agent(cls, bindings):
return gate.new_agent(bindings)
@classmethod
def initArgumentParser(cls, parser, defaults=None):
"""Initialize command line argument parser."""
super(AppengineSmokeTestScenario, cls).initArgumentParser(
parser, defaults=defaults)
parser.add_argument(
'--test_gcs_bucket', default=None,
help='URL to use for testing appengine deployment from a bucket.'
' The test will write into this bucket'
' then deploy what it writes.')
parser.add_argument(
'--test_storage_account_name', default=None,
help='Storage account when testing GCS buckets.'
' If not specified, use the application default credentials.')
parser.add_argument('--git_repo_url', default=None,
help='URL of a GIT source code repository used by Spinnaker to deploy to App Engine.')
parser.add_argument('--branch', default='master',
help='Git branch to be used when deploying from source code repository.')
parser.add_argument('--app_directory_root', default=None,
help='Path from the root of source code repository to the application directory.')
def __init__(self, bindings, agent=None):
super(AppengineSmokeTestScenario, self).__init__(bindings, agent)
if not bindings['GIT_REPO_URL']:
raise ValueError('Must supply value for --git_repo_url')
if not bindings['APP_DIRECTORY_ROOT']:
raise ValueError('Must supply value for --app_directory_root')
self.TEST_APP = bindings['TEST_APP']
self.TEST_STACK = bindings['TEST_STACK']
self.__path = 'applications/%s/tasks' % self.TEST_APP
self.__gcp_project = bindings['APPENGINE_PRIMARY_MANAGED_PROJECT_ID']
self.__cluster_name = frigga.Naming.cluster(self.TEST_APP, self.TEST_STACK)
self.__server_group_name = frigga.Naming.server_group(self.TEST_APP, self.TEST_STACK)
self.__lb_name = self.__cluster_name
# Python is clearly hard-coded as the runtime here, but we're just asking App Engine to be a static file server.
self.__app_yaml = ('\n'.join(['runtime: python27',
'api_version: 1',
'threadsafe: true',
'service: {service}',
'handlers:',
' - url: /.*',
' static_dir: .']).format(service=self.__lb_name))
self.__app_directory_root = bindings['APP_DIRECTORY_ROOT']
self.__branch = bindings['BRANCH']
self.pipeline_id = None
test_bucket = bindings['TEST_GCS_BUCKET']
if test_bucket:
self.__prepare_bucket(test_bucket)
self.__test_repository_url = 'gs://' + test_bucket
else:
self.__test_repository_url = bindings['GIT_REPO_URL']
def __prepare_bucket(self, bucket):
root = self.bindings['APP_DIRECTORY_ROOT']
temp = tempfile.mkdtemp()
local_path = os.path.join(temp, root)
branch = self.bindings['BRANCH']
git_repo = self.bindings['GIT_REPO_URL']
gcs_path = 'gs://{bucket}/{root}'.format(
bucket=self.bindings['TEST_GCS_BUCKET'], root=root)
try:
command = 'git clone {repo} -b {branch} {dir}'.format(
repo=git_repo, branch=branch, dir=temp)
logging.info('Fetching %s', git_repo)
subprocess.Popen(command, stderr=sys.stderr, shell=True).wait()
command = 'gsutil -m rsync {local} {gcs}'.format(
local=local_path, gcs=gcs_path)
logging.info('Preparing %s', gcs_path)
subprocess.Popen(command, stderr=sys.stderr, shell=True).wait()
finally:
shutil.rmtree(local_path)
def create_app(self):
# Not testing create_app, since the operation is well tested elsewhere.
# Retryable to handle platform flakiness.
contract = jc.Contract()
return st.OperationContract(
self.agent.make_create_app_operation(
bindings=self.bindings,
application=self.TEST_APP,
account_name=self.bindings['SPINNAKER_APPENGINE_ACCOUNT']),
contract=contract)
def delete_app(self):
# Not testing delete_app, since the operation is well tested elsewhere.
# Retryable to handle platform flakiness.
contract = jc.Contract()
return st.OperationContract(
self.agent.make_delete_app_operation(
application=self.TEST_APP,
account_name=self.bindings['SPINNAKER_APPENGINE_ACCOUNT']),
contract=contract)
def create_server_group(self):
group_name = frigga.Naming.server_group(
app=self.TEST_APP,
stack=self.bindings['TEST_STACK'],
version='v000')
job_spec = {
'application': self.TEST_APP,
'stack': self.TEST_STACK,
'credentials': self.bindings['SPINNAKER_APPENGINE_ACCOUNT'],
'repositoryUrl': self.__test_repository_url,
'applicationDirectoryRoot': self.__app_directory_root,
'configFiles': [self.__app_yaml],
'type': 'createServerGroup',
'cloudProvider': 'appengine',
'region': self.bindings['TEST_GCE_REGION']
}
storageAccountName = self.bindings.get('TEST_STORAGE_ACCOUNT_NAME')
if storageAccountName is not None:
job_spec['storageAccountName'] = storageAccountName
if not self.__test_repository_url.startswith('gs://'):
job_spec.update({
'gitCredentialType': 'NONE',
'branch': self.__branch
})
payload = self.agent.make_json_payload_from_kwargs(job=[job_spec],
description='Create Server Group in ' + group_name,
application=self.TEST_APP)
builder = gcp.GcpContractBuilder(self.appengine_observer)
(builder.new_clause_builder('Version Added', retryable_for_secs=30)
.inspect_resource('apps.services.versions',
group_name,
appsId=self.__gcp_project,
servicesId=self.__lb_name)
.EXPECT(ov_factory.value_list_path_contains(
'servingStatus', jp.STR_EQ('SERVING'))))
return st.OperationContract(
self.new_post_operation(
title='create_server_group', data=payload, path='tasks'),
contract=builder.build())
def make_deploy_stage(self):
cluster_spec = {
'account': self.bindings['SPINNAKER_APPENGINE_ACCOUNT'],
'applicationDirectoryRoot': self.__app_directory_root,
'configFiles': [self.__app_yaml],
'application': self.TEST_APP,
'cloudProvider': 'appengine',
'provider': 'appengine',
'region': self.bindings['TEST_GCE_REGION'],
'repositoryUrl': self.__test_repository_url,
'stack': self.TEST_STACK
}
if not self.__test_repository_url.startswith('gs://'):
cluster_spec.update({
'gitCredentialType': 'NONE',
'branch': self.__branch
})
result = {
'clusters': [cluster_spec],
'name': 'Deploy',
'refId': '1',
'requisiteStageRefIds': [],
'type': 'deploy'
}
return result
def make_upsert_load_balancer_stage(self):
result = {
'cloudProvider': 'appengine',
'loadBalancers': [
{
'cloudProvider': 'appengine',
'credentials': self.bindings['SPINNAKER_APPENGINE_ACCOUNT'],
'loadBalancerName': self.__lb_name,
'migrateTraffic': False,
'name': self.__lb_name,
'region': self.bindings['TEST_GCE_REGION'],
'splitDescription': {
'allocationDescriptions': [
{
'allocation': 0.1,
'cluster': self.__cluster_name,
'locatorType': 'targetCoordinate',
'target': 'current_asg_dynamic'
},
{
'allocation': 0.9,
'cluster': self.__cluster_name,
'locatorType': 'targetCoordinate',
'target': 'ancestor_asg_dynamic'
}
],
'shardBy': 'IP'
}
}
],
'name': 'Edit Load Balancer',
'refId': '2',
'requisiteStageRefIds': ['1'],
'type': 'upsertAppEngineLoadBalancers'
}
return result
def create_deploy_upsert_load_balancer_pipeline(self):
name = 'promoteServerGroupPipeline'
self.pipeline_id = name
deploy_stage = self.make_deploy_stage()
upsert_load_balancer_stage = self.make_upsert_load_balancer_stage()
pipeline_spec = dict(
name=name,
stages=[deploy_stage, upsert_load_balancer_stage],
triggers=[],
application=self.TEST_APP,
stageCounter=2,
parallel=True,
limitConcurrent=True,
appConfig={},
index=0
)
payload = self.agent.make_json_payload_from_kwargs(**pipeline_spec)
builder = st.HttpContractBuilder(self.agent)
(builder.new_clause_builder('Has Pipeline',
retryable_for_secs=5)
.get_url_path('applications/{0}/pipelineConfigs'.format(self.TEST_APP))
.contains_path_value(None, pipeline_spec))
return st.OperationContract(
self.new_post_operation(
title='create_deploy_upsert_load_balancer_pipeline', data=payload, path='pipelines',
status_class=st.SynchronousHttpOperationStatus),
contract=builder.build())
def run_deploy_upsert_load_balancer_pipeline(self):
url_path = 'pipelines/{0}/{1}'.format(self.TEST_APP, self.pipeline_id)
previous_group_name = frigga.Naming.server_group(
app=self.TEST_APP,
stack=self.TEST_STACK,
version='v000')
deployed_group_name = frigga.Naming.server_group(
app=self.TEST_APP,
stack=self.TEST_STACK,
version='v001')
payload = self.agent.make_json_payload_from_kwargs(
type='manual',
user='[anonymous]')
builder = gcp.GcpContractBuilder(self.appengine_observer)
(builder.new_clause_builder('Service Modified', retryable_for_secs=30)
.inspect_resource('apps.services',
self.__lb_name,
appsId=self.__gcp_project)
.EXPECT(
ov_factory.value_list_path_contains(
jp.build_path('split', 'allocations'),
jp.DICT_MATCHES({previous_group_name: jp.NUM_EQ(0.9),
deployed_group_name: jp.NUM_EQ(0.1)}))))
return st.OperationContract(
self.new_post_operation(
title='run_deploy_upsert_load_balancer_pipeline',
data=payload, path=url_path),
builder.build())
def delete_load_balancer(self):
bindings = self.bindings
payload = self.agent.make_json_payload_from_kwargs(
job=[{
'type': 'deleteLoadBalancer',
'cloudProvider': 'appengine',
'loadBalancerName': self.__lb_name,
'account': bindings['SPINNAKER_APPENGINE_ACCOUNT'],
'credentials': bindings['SPINNAKER_APPENGINE_ACCOUNT'],
'user': '[anonymous]'
}],
description='Delete Load Balancer: {0} in {1}'.format(
self.__lb_name,
bindings['SPINNAKER_APPENGINE_ACCOUNT']),
application=self.TEST_APP)
builder = gcp.GcpContractBuilder(self.appengine_observer)
(builder.new_clause_builder('Service Deleted', retryable_for_secs=30)
.inspect_resource('apps.services',
self.__lb_name,
appsId=self.__gcp_project)
.EXPECT(
ov_factory.error_list_contains(gcp.HttpErrorPredicate(http_code=404))))
return st.OperationContract(
self.new_post_operation(
title='delete_load_balancer', data=payload, path='tasks'),
contract=builder.build())
class AppengineSmokeTest(st.AgentTestCase):
@property
def scenario(self):
return citest.base.TestRunner.global_runner().get_shared_data(AppengineSmokeTestScenario)
def test_a_create_app(self):
self.run_test_case(self.scenario.create_app(),
retry_interval_secs=8, max_retries=8)
def test_b_create_server_group(self):
self.run_test_case(self.scenario.create_server_group())
def test_c_create_pipeline(self):
self.run_test_case(self.scenario.create_deploy_upsert_load_balancer_pipeline())
def test_d_run_pipeline(self):
self.run_test_case(self.scenario.run_deploy_upsert_load_balancer_pipeline())
def test_y_delete_load_balancer(self):
self.run_test_case(self.scenario.delete_load_balancer(),
retry_interval_secs=8, max_retries=8)
def test_z_delete_app(self):
self.run_test_case(self.scenario.delete_app(),
retry_interval_secs=8, max_retries=8)
def main():
defaults = {
'TEST_STACK': AppengineSmokeTestScenario.DEFAULT_TEST_ID,
'TEST_APP': 'gaesmoketest' + AppengineSmokeTestScenario.DEFAULT_TEST_ID,
}
return citest.base.TestRunner.main(
parser_inits=[AppengineSmokeTestScenario.initArgumentParser],
default_binding_overrides=defaults,
test_case_list=[AppengineSmokeTest])
if __name__ == '__main__':
sys.exit(main())
|
# Copyright (c) 2020 Horizon Robotics. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model-based RL Algorithm."""
import numpy as np
import gin
import torch
import torch.nn as nn
import torch.distributions as td
from typing import Callable
from alf.algorithms.config import TrainerConfig
from alf.algorithms.off_policy_algorithm import OffPolicyAlgorithm
from alf.algorithms.one_step_loss import OneStepTDLoss
from alf.algorithms.rl_algorithm import RLAlgorithm
from alf.data_structures import (AlgStep, Experience, LossInfo, namedtuple,
TimeStep)
from alf.nest import nest
from alf.networks import ActorDistributionNetwork, CriticNetwork
from alf.tensor_specs import TensorSpec, BoundedTensorSpec
from alf.utils import losses, common, dist_utils, tensor_utils
from alf.utils.math_ops import add_ignore_empty
from alf.algorithms.dynamics_learning_algorithm import DynamicsLearningAlgorithm
from alf.algorithms.reward_learning_algorithm import RewardEstimationAlgorithm
from alf.algorithms.planning_algorithm import PlanAlgorithm
MbrlState = namedtuple("MbrlState", ["dynamics", "reward", "planner"])
MbrlInfo = namedtuple(
"MbrlInfo", ["dynamics", "reward", "planner"], default_value=())
@gin.configurable
class MbrlAlgorithm(OffPolicyAlgorithm):
"""Model-based RL algorithm
"""
def __init__(self,
observation_spec,
feature_spec,
action_spec,
dynamics_module: DynamicsLearningAlgorithm,
reward_module: RewardEstimationAlgorithm,
planner_module: PlanAlgorithm,
env=None,
config: TrainerConfig = None,
dynamics_optimizer=None,
reward_optimizer=None,
planner_optimizer=None,
debug_summaries=False,
name="MbrlAlgorithm"):
"""Create an MbrlAlgorithm.
The MbrlAlgorithm takes as input the following set of modules for
making decisions on actions based on the current observation:
1) learnable/fixed dynamics module
2) learnable/fixed reward module
3) learnable/fixed planner module
Args:
action_spec (nested BoundedTensorSpec): representing the actions.
dynamics_module (DynamicsLearningAlgorithm): module for learning to
predict the next feature based on the previous feature and action.
It should accept input with spec [feature_spec,
encoded_action_spec] and output a tensor of shape
feature_spec. For discrete action, encoded_action is an one-hot
representation of the action. For continuous action, encoded
action is same as the original action.
reward_module (RewardEstimationAlgorithm): module for calculating
the reward, i.e., evaluating the reward for a (s, a) pair
planner_module (PlanAlgorithm): module for generating planned action
based on specified reward function and dynamics function
env (Environment): The environment to interact with. env is a batched
environment, which means that it runs multiple simulations
simultateously. env only needs to be provided to the root
Algorithm.
config (TrainerConfig): config for training. config only needs to be
provided to the algorithm which performs `train_iter()` by
itself.
debug_summaries (bool): True if debug summaries should be created.
name (str): The name of this algorithm.
"""
train_state_spec = MbrlState(
dynamics=dynamics_module.train_state_spec,
reward=reward_module.train_state_spec,
planner=planner_module.train_state_spec)
super().__init__(
feature_spec,
action_spec,
train_state_spec=train_state_spec,
env=env,
config=config,
debug_summaries=debug_summaries,
name=name)
flat_action_spec = nest.flatten(action_spec)
action_spec = flat_action_spec[0]
assert action_spec.is_continuous, "only support \
continious control"
num_actions = action_spec.shape[-1]
flat_feature_spec = nest.flatten(feature_spec)
assert len(flat_feature_spec) == 1, "Mbrl doesn't support nested \
feature_spec"
self._action_spec = action_spec
self._num_actions = num_actions
if dynamics_optimizer is not None:
self.add_optimizer(dynamics_optimizer, [dynamics_module])
if planner_optimizer is not None:
self.add_optimizer(planner_optimizer, [planner_module])
if reward_optimizer is not None:
self.add_optimizer(reward_optimizer, [reward_module])
self._dynamics_module = dynamics_module
self._reward_module = reward_module
self._planner_module = planner_module
self._planner_module.set_reward_func(self._calc_step_reward)
self._planner_module.set_dynamics_func(self._predict_next_step)
def _predict_next_step(self, time_step, state: MbrlState):
"""Predict the next step (observation and state) based on the current
time step and state
Args:
time_step (TimeStep): input data for next step prediction
state (MbrlState): input state next step prediction
Returns:
next_time_step (TimeStep): updated time_step with observation
predicted from the dynamics module
next_state (MbrlState): updated state from the dynamics module
"""
with torch.no_grad():
dynamics_step = self._dynamics_module.predict_step(
time_step, state.dynamics)
pred_obs = dynamics_step.output
next_time_step = time_step._replace(observation=pred_obs)
next_state = state._replace(dynamics=dynamics_step.state)
return next_time_step, next_state
def _calc_step_reward(self, obs, action, state: MbrlState):
"""Calculate the step reward based on the given observation, action
and state.
Args:
obs (Tensor): observation
action (Tensor): action
state: state for reward calculation
Returns:
reward (Tensor): compuated reward for the given input
updated_state (MbrlState): updated state from the reward module
"""
reward, reward_state = self._reward_module.compute_reward(
obs, action, state.reward)
updated_state = state._replace(reward=reward_state)
return reward, updated_state
def _predict_with_planning(self, time_step: TimeStep, state,
epsilon_greedy):
# full state in
action = self._planner_module.generate_plan(time_step, state,
epsilon_greedy)
dynamics_state = self._dynamics_module.update_state(
time_step, state.dynamics)
return AlgStep(
output=action,
state=state._replace(dynamics=dynamics_state),
info=MbrlInfo())
def predict_step(self, time_step: TimeStep, state, epsilon_greedy=0.0):
return self._predict_with_planning(time_step, state, epsilon_greedy)
def rollout_step(self, time_step: TimeStep, state):
# note epsilon_greedy
# 0.1 for random exploration
return self._predict_with_planning(
time_step, state, epsilon_greedy=0.0)
def train_step(self, exp: Experience, state: MbrlState):
action = exp.action
dynamics_step = self._dynamics_module.train_step(exp, state.dynamics)
reward_step = self._reward_module.train_step(exp, state.reward)
plan_step = self._planner_module.train_step(exp, state.planner)
state = MbrlState(
dynamics=dynamics_step.state,
reward=reward_step.state,
planner=plan_step.state)
info = MbrlInfo(
dynamics=dynamics_step.info,
reward=reward_step.info,
planner=plan_step.info)
return AlgStep(action, state, info)
def calc_loss(self, experience, training_info: MbrlInfo):
loss = training_info.dynamics.loss
loss = add_ignore_empty(loss, training_info.reward)
loss = add_ignore_empty(loss, training_info.planner)
return LossInfo(loss=loss.loss, extra=(loss.loss))
def after_update(self, experience, training_info):
self._planner_module.after_update(
training_info._replace(planner=training_info.planner))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.