content
stringlengths 5
1.05M
|
|---|
from flask import Flask, render_template,request,redirect,url_for # For flask implementation
from bson import ObjectId # For ObjectId to work
from pymongo import MongoClient
import os
app = Flask(__name__)
title = "TODO with Flask"
heading = "ToDo Reminder"
@app.route("/")
return "Hello World!"
# define for IIS module registration.
wsgi_app = app.wsgi_app
if __name__ == "__main__":
app.run()
|
from rest_framework import serializers
from dateflix_api.models import Movie
class MovieSerializer(serializers.ModelSerializer):
class Meta:
model = Movie
fields = ["id", "title", "netflix_url", "image", "description"]
|
import os
import pprint
import jsonref
import numpy as np
import cv2
import pydicom
from pathlib import Path
from logs import logDecorator as lD
# ----------------------------------
config = jsonref.load(open("../config/config.json"))
logBase = config["logging"]["logBase"] + ".modules.cropPreprocessing.cropPreprocessing"
config_cropPre = jsonref.load(open("../config/modules/cropPreprocessing.json"))
@lD.log(logBase + ".minMaxNormalise")
def minMaxNormalise(logger, img):
"""
This function does min-max normalisation on
the given image.
Parameters
----------
img : {numpy.ndarray}
The image to normalise.
Returns
-------
norm_img: {numpy.ndarray}
The min-max normalised image.
"""
try:
norm_img = (img - img.min()) / (img.max() - img.min())
except Exception as e:
# logger.error(f'Unable to minMaxNormalise!\n{e}')
print((f"Unable to get minMaxNormalise!\n{e}"))
return norm_img
@lD.log(logBase + ".clahe")
def clahe(logger, img, clip=2.0, tile=(8, 8)):
"""
This function applies the Contrast-Limited Adaptive
Histogram Equalisation filter to a given image.
Parameters
----------
img : {numpy.ndarray}
The image to edit.
clip : {int or floa}
Threshold for contrast limiting.
tile : {tuple (int, int)}
Size of grid for histogram equalization. Input
image will be divided into equally sized
rectangular tiles. `tile` defines the number of
tiles in row and column.
Returns
-------
clahe_img : {numpy.ndarray, np.uint8}
The CLAHE edited image, with values ranging from [0, 255]
"""
try:
# Convert to uint8.
# img = skimage.img_as_ubyte(img)
img = cv2.normalize(
img,
None,
alpha=0,
beta=255,
norm_type=cv2.NORM_MINMAX,
dtype=cv2.CV_32F,
)
img_uint8 = img.astype("uint8")
# img = cv2.normalize(
# img, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U
# )
clahe_create = cv2.createCLAHE(clipLimit=clip, tileGridSize=tile)
clahe_img = clahe_create.apply(img_uint8)
except Exception as e:
# logger.error(f'Unable to clahe!\n{e}')
print((f"Unable to get clahe!\n{e}"))
return clahe_img
@lD.log(logBase + ".pad")
def pad(logger, img):
"""
This function pads a given image with black pixels,
along its shorter side, into a square and returns
the square image.
If the image is portrait, black pixels will be
padded on the right to form a square.
If the image is landscape, black pixels will be
padded on the bottom to form a square.
Parameters
----------
img : {numpy.ndarray}
The image to pad.
Returns
-------
padded_img : {numpy.ndarray}
The padded square image, if padding was required
and done.
img : {numpy.ndarray}
The original image, if no padding was required.
"""
try:
nrows, ncols = img.shape
# If padding is required...
if nrows != ncols:
# Take the longer side as the target shape.
if ncols < nrows:
target_shape = (nrows, nrows)
elif nrows < ncols:
target_shape = (ncols, ncols)
# pad.
padded_img = np.zeros(shape=target_shape)
padded_img[:nrows, :ncols] = img
# If padding is not required...
elif nrows == ncols:
# Return original image.
padded_img = img
except Exception as e:
# logger.error(f'Unable to pad!\n{e}')
print((f"Unable to pad!\n{e}"))
return padded_img
@lD.log(logBase + ".centerCrop")
def centerCrop(logger, img):
"""
This function takes a center square crop of a given image, with the length
of the square equal to the shorter length of the original image.
e.g. The original image (height, width) = (x, y), where x < y.
Then the square crop will be of sides with length (x, x).
Parameters
----------
img : {numpy.ndarray}
The image to crop.
Returns
-------
sq_img : {numpy.ndarray}
The cropped square image.
img : {numpy.ndarray}
The original image, if no padding was required.
"""
try:
h, w = img.shape
# If cropping is required...
if h != w:
# Take the shorter side as the square length.
if w < h: # Vertical rectangle, use w as square length.
start_w = 0
end_w = w
start_h = h // 2 - w // 2
end_h = start_h + w
elif h < w: # Horizontal rectangle, use h as square length.
start_h = 0
end_h = h
start_w = w // 2 - h // 2
end_w = start_w + h
# Crop.
sq_img = img[start_h:end_h, start_w:end_w]
return sq_img
# If padding is not required...
elif w == h:
# Return original image.
return img
except Exception as e:
# logger.error(f'Unable to centerCrop!\n{e}')
print((f"Unable to centerCrop!\n{e}"))
@lD.log(logBase + ".cropPreprocess")
def cropPreprocess(
logger,
img,
clip,
tile,
):
"""
This function chains and executes all the preprocessing
steps for a cropped ROI image, in the following order:
Step 1 - Min-max normalise
Step 2 - CLAHE enchancement
Step 3 - Center square crop
Step 4 - Min-max normalise
Parameters
----------
img : {numpy.ndarray}
The cropped ROI image to preprocess.
Returns
-------
img_pre : {numpy.ndarray}
The preprocessed cropped ROI image.
"""
try:
# Step 1: Min-max normalise.
norm_img = minMaxNormalise(img=img)
# cv2.imwrite("../data/preprocessed/Mass/testing/normed.png", norm_img)
# Step 2: CLAHE enhancement.
clahe_img = clahe(img=norm_img, clip=clip, tile=(tile, tile))
# cv2.imwrite("../data/preprocessed/Mass/testing/clahe_img.png", clahe_img)
# Step 3: Crop.
sq_img = centerCrop(img=clahe_img)
# Step 4: Min-max normalise.
img_pre = minMaxNormalise(img=sq_img)
# cv2.imwrite("../data/preprocessed/Mass/testing/img_pre.png", img_pre)
# padded_img = cv2.normalize(
# padded_img,
# None,
# alpha=0,
# beta=255,
# norm_type=cv2.NORM_MINMAX,
# dtype=cv2.CV_32F,
# )
# cv2.imwrite("../data/preprocessed/Mass/testing/padded_img.png", padded_img)
except Exception as e:
# logger.error(f'Unable to cropPreprocess!\n{e}')
print((f"Unable to cropPreprocess!\n{e}"))
return img_pre
# ----------------------------------
@lD.log(logBase + ".main")
def main(logger, resultsDict):
"""main function for cropPreprocessing module.
The hyperparameters in this module can be tuned in
the "../config/modules/cropPreprocessing.json" file.
Parameters
----------
logger : {logging.Logger}
The logger used for logging error information
resultsDict: {dict}
A dintionary containing information about the
command line arguments. These can be used for
overwriting command line arguments as needed.
"""
print("=" * 30)
print("Main function of cropPreprocessing.")
print("=" * 30)
print("We get a copy of the result dictionary over here ...")
pprint.pprint(resultsDict)
# Get path of the folder containing .dcm files.
input_path = config_cropPre["paths"]["input"]
output_path = config_cropPre["paths"]["output"]
# Output format.
output_format = config_cropPre["output_format"]
# Get individual .dcm paths.
dcm_paths = []
for curdir, dirs, files in os.walk(input_path):
files.sort()
for f in files:
if f.endswith(".dcm"):
dcm_paths.append(os.path.join(curdir, f))
# Get paths of full mammograms and ROI masks.
crop_paths = [f for f in dcm_paths if ("CROP" in f)]
count = 0
for crop_path in crop_paths:
# Read full mammogram .dcm file.
ds = pydicom.dcmread(crop_path)
# Get relevant metadata from .dcm file.
patient_id = ds.PatientID
# Calc-Test masks do not have the "Calc-Test_" suffix
# when it was originally downloaded (masks from Calc-Train,
# Mass-Test and Mass-Train all have their corresponding suffices).
patient_id = patient_id.replace(".dcm", "")
patient_id = patient_id.replace("Calc-Test_", "")
print(patient_id)
cropImg = ds.pixel_array
# ===================
# Preprocess Crop Img
# ===================
# Get all hyperparameters.
clip = config_cropPre["clahe"]["clip"]
tile = config_cropPre["clahe"]["tile"]
# Preprocess cropped ROI image.
cropImg_pre = cropPreprocess(img=cropImg, clip=clip, tile=tile)
# Need to normalise to [0, 255] before saving as .png.
cropImg_pre_norm = cv2.normalize(
cropImg_pre,
None,
alpha=0,
beta=255,
norm_type=cv2.NORM_MINMAX,
dtype=cv2.CV_32F,
)
# Save preprocessed crop image.
save_filename = (
os.path.basename(crop_path).replace(".dcm", "") + "___PRE" + output_format
)
save_path = os.path.join(output_path, save_filename)
print(save_path)
cv2.imwrite(save_path, cropImg_pre_norm)
print(f"DONE FULL: {crop_path}")
count += 1
# if count == 10:
# break
print(f"Total count = {count}")
print()
print("Getting out of cropPreprocessing module.")
print("-" * 30)
return
|
import os
import sys
root_path = os.path.abspath(os.path.dirname(__file__)).split('src')
sys.path.extend([root_path[0] + 'src'])
from sequence_process.DPCP import DPCP
import numpy as np
from sequence_process.sequence_process_def import get_cell_line_seq
names = ['GM12878', 'HeLa-S3', 'HUVEC', 'IMR90', 'K562', 'NHEK']
cell_name = names[7]
feature_name = "dpcp"
feature_dir, \
enhancers_tra, promoters_tra, y_tra, \
im_enhancers_tra, im_promoters_tra, \
y_imtra, enhancers_tes, promoters_tes, y_tes = get_cell_line_seq(data_source, cell_name, feature_name)
set_pc_list = ["Base stacking", "Protein induced deformability", "B-DNA twist", "A-philicity", "Propeller twist",
"Duplex stability (freeenergy)", "Duplex stability (disruptenergy)", "DNA denaturation",
"Bending stiffness", "Protein DNA twist", "Stabilising energy of Z-DNA", "Aida_BA_transition",
"Breslauer_dG", "Breslauer_dH", "Breslauer_dS", "Electron_interaction", "Hartman_trans_free_energy",
"Helix-Coil_transition", "Ivanov_BA_transition", "Lisser_BZ_transition", "Polar_interaction"]
def get_data(enhancers, promoters):
dpcp = DPCP(2, set_pc_list, n_jobs=1)
X_en = dpcp.run_DPCP(enhancers)
# print(X_en)
X_pr = dpcp.run_DPCP(promoters)
# print(X_pr)
return np.array(X_en), np.array(X_pr)
"""
get and save
"""
X_en_tra, X_pr_tra = get_data(enhancers_tra, promoters_tra)
np.savez(feature_dir + '%s_train.npz' % cell_name, X_en_tra=X_en_tra, X_pr_tra=X_pr_tra, y_tra=y_tra)
if data_source == "epivan":
X_en_imtra, X_pr_imtra = get_data(im_enhancers_tra, im_promoters_tra)
np.savez(feature_dir + 'im_%s_train.npz' % cell_name, X_en_tra=X_en_imtra, X_pr_tra=X_pr_imtra, y_tra=y_imtra)
X_en_tes, X_pr_tes = get_data(enhancers_tes, promoters_tes)
np.savez(feature_dir + '%s_test.npz' % cell_name, X_en_tes=X_en_tes, X_pr_tes=X_pr_tes, y_tes=y_tes)
|
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager, UserMixin
from flask_dance.consumer.storage.sqla import OAuthConsumerMixin
from werkzeug.security import generate_password_hash, check_password_hash
db = SQLAlchemy()
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
login_name = db.Column(db.String)
password = db.Column(db.String)
phone = db.Column(db.Integer, unique=True)
email = db.Column(db.String(256), unique=True)
address = db.Column(db.String)
gender = db.Column(db.String)
name = db.Column(db.String(256))
img_url = db.Column(db.String())
store = db.Column(db.Boolean, default=False)
store_name = db.Column(db.String)
product_own = db.relationship('Product', backref='user', lazy=True)
orders = db.relationship('Order', backref='user', lazy=True)
def set_password(self, password):
self.password = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password, password)
def jsonize(self):
return {
"id" : self.id,
"login_name": self.login_name,
"phone": self.phone,
"email": self.email,
"address": self.address,
"gender": self.gender,
"name": self.name,
"img_url": self.img_url,
"store": self.store,
"store_name": self.store_name,
}
class OAuth(OAuthConsumerMixin, db.Model):
provider_user_id = db.Column(db.String(256), unique=True, nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey(User.id), nullable=False)
user = db.relationship(User)
class Token(db.Model):
id = db.Column(db.Integer, primary_key=True)
uuid = db.Column(db.String, unique=True)
user_id = db.Column(db.Integer, db.ForeignKey(User.id), nullable=False)
user = db.relationship(User)
# setup login manager
login_manager = LoginManager()
@login_manager.user_loader
def load_user(a):
print(User,a)
b = User.query.get(1)
print(b)
return User.query.get(a)
@login_manager.request_loader
def load_user_from_request(request):
api_key = request.headers.get('Authorization')
if api_key:
api_key = api_key.replace('Token ', '', 1)
token = Token.query.filter_by(uuid=api_key).first()
if token:
return token.user
return None
class Order(db.Model):
__tablename__ = 'orders'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=False)
order_item = db.relationship('Order_item', backref='order', lazy = True)
ship = db.relationship('Shipment', backref='order', lazy = True)
invoice = db.relationship('Invoice', backref='order', lazy = True)
class Order_item(db.Model):
__tablename__ = 'order_items'
id = db.Column(db.Integer, primary_key=True)
quantity = db.Column(db.Integer, nullable=False)
total_price = db.Column(db.Integer)
date_of_sell = db.Column(db.Date)
order_id = db.Column(db.Integer, db.ForeignKey('orders.id'), nullable=False)
product_id = db.Column(db.Integer, db.ForeignKey('products.id'), nullable=False)
order_status_id = db.Column(db.Integer, db.ForeignKey('order_statuses.id'), default=5)
def jsonize(self):
return {
"id" : self.id,
"user_id" : self.order.user_id,
"product_id" : self.product_id,
"product" : self.product.name,
"order_status" : self.status.status,
"quantity" : self.quantity,
"total_price" : self.total_price,
"img_url" : self.product.img_url
}
class Order_status(db.Model):
__tablename__ = 'order_statuses'
id = db.Column(db.Integer, primary_key=True)
status = db.Column(db.String)
order_id = db.relationship('Order_item', backref='status', lazy=True)
|
from setuptools import setup, find_packages
version = '1.0.11'
setup(name='holland.lib.lvm',
version=version,
description="LVM support",
long_description="""\
""",
keywords='holland lib lvm',
author='Rackspace',
author_email='holland-devel@googlegroups.com',
url='http://www.hollandbackup.org/',
license='GPLv2',
packages=find_packages(exclude=['ez_setup',
'examples',
'tests',
'tests.*',]),
zip_safe=True,
namespace_packages=['holland','holland.lib']
)
|
# -*- coding: utf-8 -*-
#############################################################################
# _________ ____________ ___ #
# / _____// _____/\ \/ / ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# \_____ \/ \ ___ \ / THE E(X)TENDED (S)ELFISH (G)ENE ALGORITHM #
# / \ \_\ \/ \ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# /_________/\________/___/\ \ https://github.com/squillero/sgx #
# \_/ #
# #
# A quick 'n dirty versatile population-less evolutionary optimizer loosely #
# inspired by a cool interpretation of the Darwinian theory. #
# #
#############################################################################
# Copyright 2020 Giovanni Squillero
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
import setuptools
from sgx import __name__, __version__
with open('pypi-description.md', 'r', encoding='utf-8') as fh:
long_description = fh.read()
OPTIONAL = ['coloredlogs', 'matplotlib', 'psutil']
with open('requirements.txt', 'r', encoding='utf-8') as fh:
requirements = [r for r in fh.readlines() if not any(o in r for o in OPTIONAL)]
setuptools.setup(
name=__name__,
version=__version__,
author="Giovanni Squillero",
author_email="squillero@polito.it",
license="Apache-2.0",
description="A population-less EA loosely inspired by a cool interpretation of the Darwinian theory",
long_description=long_description,
#long_description_content_type="text/x-rst",
long_description_content_type='text/markdown',
url="https://github.com/squillero/sgx",
project_urls={
'Bug Tracker': "https://github.com/squillero/sgx/issues",
#'Documentation': "https://microgp4.readthedocs.io/en/pre-alpha/",
'Source Code': "https://github.com/squillero/sgx",
},
keywords="optimization evolutionary-algorithm computational-intelligence",
packages=setuptools.find_packages(),
package_dir={'': '.'},
package_data={'': ['requirements.txt', 'pypi-description.md']},
data_files=[('.', ['requirements.txt', 'pypi-description.md'])],
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: Mathematics",
"License :: OSI Approved :: Apache Software License"
],
install_requires=requirements,
)
|
import os, gnupg, time, ConfigParser
class encrypt:
def __init__(self):
self.gpg = gnupg.GPG()
self.key_data = open('backup_key.asc').read()
self.import_result = self.gpg.import_keys(self.key_data)
print(self.import_result.results)
config = ConfigParser.SafeConfigParser()
config.read('options.conf')
self.fingerprint = config.get('gpg','fingerprint',1)
self.encrypted = config.get('folder','temp',1)
def encrypt(self,file_path):
print file_path
newpath = os.path.basename(file_path)
encrypted_path = os.path.join(self.encrypted,newpath)
statinfo = os.stat(file_path)
if statinfo.st_size<100000000:
with open(file_path, 'rb') as f:
message = str(self.gpg.encrypt(f.read(),self.fingerprint,output=encrypted_path+'.gpg'))
return (encrypted_path+'.gpg')
|
from setuptools import setup, find_packages
VERSION = '1.1.0'
DESCRIPTION = 'The package allows to download, process and visualize climatological data from reliable sources'
README = open('README.md', 'r', encoding='utf8').read()
setup(
name='cloupy',
version=VERSION,
description=DESCRIPTION,
long_description=README,
long_description_content_type='text/markdown',
author='Kamil Grala',
author_email='kamil.grala32466@gmail.com',
url='https://github.com/pdGruby/cloupy',
license='MIT',
packages=['cloupy'],
install_requires=[
'pandas>=1.3.3,<=1.3.5',
'matplotlib>=3.4.3,<=3.5.1',
'requests>=2.26.0,<=2.27.1',
'beautifulsoup4>=4.9.3,<=4.10.0',
'numpy>=1.21.4,<=1.22.1',
'pyshp==2.1.3',
'pyproj>=3.2.1,<=3.3.0',
'scipy>=1.7.2,<=1.7.3',
'Pillow>=8.4.0,<=9.0.0',
'cycler==0.11.0'
],
tests_require=[
'pytest>=6.2.5',
'mock>=4.0.3'
],
package_data={
'cloupy': [
'data_processing/*',
'maps/*',
'maps/world/*',
'scraping/*',
'diagrams/*',
'test/test_integration/*',
'test/test_unit/*',
],
},
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Operating System :: Microsoft :: Windows :: Windows 10',
'Operating System :: POSIX :: Linux',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Framework :: Matplotlib',
'Topic :: Scientific/Engineering :: GIS',
'Topic :: Scientific/Engineering :: Atmospheric Science',
'Topic :: Scientific/Engineering :: Visualization'
]
)
|
# Distributed under MIT License
# Copyright (c) 2021 Remi BERTHOLET
""" Temperature homekit accessory """
from homekit import *
class TemperatureSensor(Accessory):
""" Temperature homekit accessory """
def __init__(self, **kwargs):
""" Create temperature accessory. Parameters : name(string), temperature(float) and all Accessory parameters """
Accessory.__init__(self, Accessory.CID_SENSOR, **kwargs)
self.server = Server(name=kwargs.get("name","Temperature"), server_uuid=Server.UUID_TEMPERATURE_SENSOR)
self.temperature = charact_float_create (Charact.UUID_CURRENT_TEMPERATURE, Charact.PERM_RE, kwargs.get("temperature",20.))
self.temperature.set_constraint(0.0, 100.0, 0.1)
self.temperature.set_unit(Charact.UNIT_CELSIUS)
self.server.add_charact(self.temperature)
self.add_server(self.server)
def set_temperature(self, temp):
""" Set the temperature """
self.temperature.set_value(temp)
def main():
""" Test """
# Initialize homekit engine
Homekit.init()
# Create temperature sensor
sensor = TemperatureSensor(name="My temperature sensor", temperature=10.)
# Create accessory
Homekit.play(sensor)
import time
temperature = 0.0
# Manage the temperature (simple sample)
while True:
time.sleep(2)
temperature += 1.
if temperature > 100.0:
temperature = 0.0
# Change the accessory temperature
sensor.set_temperature(temperature)
if __name__ == "__main__":
main()
|
import sys
sys.path.insert (0, '..')
import unittest
from categorizer_service import get_categorizer
from constants import KNOWN_CATEGORIES
class CategorizerServiceTest(unittest.TestCase):
def tests_categorizer_returns_a_known_category(self):
categorizer = get_categorizer()
sample_product = {
"title": "carrinho relampago macqueen",
"query": "carrinho de controle remoto",
"concatenated_tags": "pixar carrinho brinquedo",
}
inferred_category = categorizer(sample_product)
self.assertIn(inferred_category, KNOWN_CATEGORIES)
|
import logging
import argparse
import os
import shutil
import yaml
import glob
import hashlib
import errno
import tarfile
logging.getLogger("paramiko").setLevel(logging.WARNING)
logger = logging.getLogger("deploy")
deploy_modules = ['framework', 'common', 'controller', 'agent']
class mapconnect(object):
SSHOPTIONS = "-oUserKnownHostsFile=/dev/null -oStrictHostKeyChecking=no" # default ssh options
def __init__(self, conf):
self.proxy = conf['proxy']
self.target = conf['target']
self.__connect__()
def __connect__(self):
''' connect sftp and ssh clients '''
import paramiko
# start sftp connection
t = paramiko.Transport((self.proxy['ip'], 22))
t.connect(username=self.proxy['user'], password=self.proxy['pass'])
logger.info("connect SFTP @{}".format(self.proxy['ip']))
self.sftp = paramiko.SFTPClient.from_transport(t)
# start ssh connection
self.client = paramiko.SSHClient()
self.client.load_system_host_keys()
self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
logger.info("connect SSH {}@{}".format(self.proxy['user'], self.proxy['ip']))
self.client.connect(self.proxy['ip'], port=22,
username=self.proxy['user'], password=self.proxy['pass'])
def run(self, commands=[]):
''' Run commands on target via proxy '''
for c in commands:
logger.debug("@{} --> {}".format(self.proxy['ip'], c))
stdin, stdout, stderr = self.client.exec_command(c)
if stdout.channel.recv_exit_status():
logger.error("Command failed:\n{}\nexit code - {}, stderr - {}".format(
c, stdout.channel.recv_exit_status(), stderr.read()))
def upload(self, files, path):
''' Uploads files to the target via proxy
files - local files
path - path in target
'''
# update list - (local_path, proxy_path, perm, md5sum)
update_list = [(os.path.abspath(f), '/tmp/%s' % (os.path.basename(f)),
format(os.stat(f).st_mode & 0o0777, 'o'),
hashlib.md5(open(f, 'rb').read()).hexdigest()) for f in files]
# copy to proxy
for lpath, ppath, perm, md5sum in update_list:
logger.info("Upload {} (md5sum {}) to {}:{} via {}".format(
lpath, md5sum, self.target['ip'], path, self.proxy['ip']))
self.sftp.put(lpath, ppath)
ssh_cmd_template = 'sshpass -p {} ssh {} {}@{}'.format(
self.target['pass'], mapconnect.SSHOPTIONS, self.target['user'], self.target['ip'])
mkdir_cmd = '{} "mkdir -p {}"'.format(ssh_cmd_template, path)
scp_cmd = 'sshpass -p {} scp -r {} {} {}@{}:{}'.format(
self.target['pass'],
mapconnect.SSHOPTIONS,
' '.join(['{}'.format(ppath) for lpath, ppath, perm, md5sum in update_list]),
self.target['user'],
self.target['ip'],
path)
chmod_cmd = '{} "{}"'.format(
ssh_cmd_template,
';'.join(['chmod {} {}'.format(
perm,
os.path.join(path, os.path.basename(lpath)))
for lpath, ppath, perm, md5sum in update_list]))
# run commands at proxy
self.run([mkdir_cmd, scp_cmd, chmod_cmd])
# delete from proxy
for lpath, ppath, perm, md5sum in update_list:
self.sftp.remove(ppath)
class mapcopy(object):
def __init__(self, args):
if args.verbose:
logger.setLevel(logging.DEBUG)
self.modules_dir = os.path.realpath(args.map_path)
conf_file = args.conf if args.conf else os.path.realpath(self.modules_dir + '/deploy.yaml')
with open(conf_file, 'r') as f:
self.conf = yaml.load(f)
self.connect = mapconnect(self.conf)
logger.debug("copy configuration: {}".format(self.conf))
logger.info("copy {} to target @{}".format(args.file, args.path))
self.connect.upload(args.file, args.path)
@staticmethod
def configure_parser(parser=argparse.ArgumentParser(prog='copy')):
parser.help = "multiap_sw standalone deploy module"
parser.add_argument('file', nargs="+", help="only upload a file to target")
parser.add_argument("--path", "-p", default="/tmp/", help="path to copy on in target")
parser.add_argument("--conf", "-c", help="path to deploy.yaml")
parser.add_argument("--verbose", "-v", action="store_true", help="verbosity on")
return parser
def __str__(self):
return str(self.args)
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def reset(tarinfo):
tarinfo.uid = tarinfo.gid = 0
tarinfo.uname = tarinfo.gname = "root"
return tarinfo
class mapdeploy(object):
def __init__(self, args):
if args.verbose:
logger.setLevel(logging.DEBUG)
modules = deploy_modules if 'all' in args.modules else [
m for m in deploy_modules if m in args.modules]
self.modules_dir = os.path.realpath(args.map_path)
self.build_dir = os.path.realpath(self.modules_dir + '/build')
self.pack_dir = os.path.realpath(self.build_dir + '/pack')
conf_file = args.conf if args.conf else os.path.realpath(self.modules_dir + '/deploy.yaml')
logger.debug("modules_dir={}, build_dir={}, conf={}".format(
self.modules_dir, self.build_dir, conf_file))
with open(conf_file, 'r') as f:
self.conf = yaml.load(f)
if not args.pack_only:
self.connect = mapconnect(self.conf)
self.os = self.conf['target']['type']
logger.debug("deploy configuration: {}".format(self.conf))
logger.info("{} deploy {}".format(self.os, modules))
if os.path.exists(self.pack_dir):
logger.info("Delete previous packing {}".format(self.pack_dir))
shutil.rmtree(self.pack_dir)
for m in modules:
logger.debug("pack {}".format(m))
self.pack(m)
# create multiap_deploy.tar.gz
archive = os.path.join(self.pack_dir, "multiap_deploy.tar.gz")
with tarfile.open(archive, "w:gz") as tar:
tar.add(self.pack_dir, arcname='/', filter=reset)
deploy_sh = os.path.dirname(os.path.realpath(__file__)) + '/deploy_%s.sh' % self.os
shutil.copy(deploy_sh, os.path.join(self.pack_dir, os.path.basename(deploy_sh)))
# upload to target
if not args.pack_only:
self.connect.upload([archive, deploy_sh], args.path)
def pack(self, name):
pack_dir = self.pack_dir
out_dir = os.path.join(self.build_dir, name, 'out')
pack_dirs = [(d, self.conf['deploy'][self.os][name][d])
for d in os.listdir(out_dir) if os.path.isdir(os.path.join(out_dir, d))]
for src, dst in pack_dirs:
src_path = os.path.join(out_dir, src)
dst_path = os.path.join(pack_dir, os.path.relpath(dst, '/'))
mkdir_p(dst_path)
files = glob.glob('{}/*'.format(src_path))
for f in files:
md5sum = hashlib.md5(open(f, 'rb').read()).hexdigest()
logger.debug("packing {} (md5sum {})".format(f, md5sum))
if (os.path.islink(f)):
os.symlink(os.readlink(f), os.path.join(dst_path, os.path.basename(f)))
else:
shutil.copy(os.path.abspath(f), os.path.join(dst_path, os.path.basename(f)))
@staticmethod
def configure_parser(parser=argparse.ArgumentParser(prog='deploy')):
parser.help = "multiap_sw standalone deploy module"
parser.add_argument('modules', choices=['all'] +
deploy_modules, nargs='+', help='module[s] to deploy')
parser.add_argument("--verbose", "-v", action="store_true", help="verbosity on")
parser.add_argument("--pack-only", action="store_true",
help="only pack multiap for later deployment "
"(multiap_deploy.tar.gz and deploy.sh)")
parser.add_argument("--path", "-p", default="/tmp/multiap/deploy/",
help="path to deploy on in target")
parser.add_argument("--conf", "-c", help="path to deploy.yaml")
return parser
def __str__(self):
return str(self.args)
|
from operator import and_, not_, or_, xor
from typing import Dict, Iterator
from parse_2d import Diagram, Index, TinyTokenizer, tokenize
from samples.circuit_diagram.ast import OpNode
__all__ = ["parse_logic_gates"]
nand = lambda a, b: not a & b
nor = lambda a, b: not a | b
xnor = lambda a, b: not a ^ b
const_0 = lambda: False
const_1 = lambda: True
logic_gate_tokenizers = [
TinyTokenizer("a", (and_, 2)),
TinyTokenizer("A", (nand, 2)),
TinyTokenizer("o", (or_, 2)),
TinyTokenizer("O", (nor, 2)),
TinyTokenizer("x", (xor, 2)),
TinyTokenizer("X", (xnor, 2)),
TinyTokenizer("~", (not_, 1)),
TinyTokenizer("(", (const_0, 0)),
TinyTokenizer(")", (const_1, 0)),
]
def parse_logic_gates(
diagram: Diagram[str], node_ids: Iterator[int]
) -> Dict[Index, OpNode]:
logic_gates = {
token.region.location: OpNode(next(node_ids), *token.value)
for token in tokenize(diagram, logic_gate_tokenizers)
}
for logic_gate_index in logic_gates:
diagram[logic_gate_index] = "logic_gate"
return logic_gates
|
from setuptools import find_packages, setup
setup(
name="yalexs",
version="1.1.17",
python_requires=">=3.6",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
url="https://github.com/bdraco/yalexs",
license="MIT",
author="bdraco",
author_email="nick@koston.org",
description="Python API for Yale Access (formerly August) Smart Lock and Doorbell",
packages=find_packages(include=["yalexs", "yalexs.*"]),
install_requires=[
"requests",
"vol",
"python-dateutil",
"aiohttp",
"aiofiles",
"pubnub>=5.5.0",
],
)
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Frappe and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class ParcelServiceType(Document):
pass
def match_parcel_service_type_alias(parcel_service_type, parcel_service):
# Match and return Parcel Service Type Alias to Parcel Service Type if exists.
if frappe.db.exists('Parcel Service', parcel_service):
matched_parcel_service_type = \
frappe.db.get_value('Parcel Service Type Alias', {
'parcel_type_alias': parcel_service_type,
'parcel_service': parcel_service
}, 'parent')
if matched_parcel_service_type:
parcel_service_type = matched_parcel_service_type
return parcel_service_type
|
import marso
def issues(code):
grammar = marso.load_grammar()
module = marso.parse(code)
return grammar._get_normalizer_issues(module)
def test_eof_newline():
def assert_issue(code):
found = issues(code)
assert len(found) == 1
issue, = found
assert issue.code == 292
assert not issues('asdf = 1\n')
assert_issue('asdf = 1')
assert_issue('asdf = 1\n# foo')
assert_issue('# foobar')
assert_issue('')
assert_issue('foo = 1 # comment')
def test_eof_blankline():
def assert_issue(code):
found = issues(code)
assert len(found) == 1
issue, = found
assert issue.code == 391
assert_issue('asdf = 1\n\n')
assert_issue('# foobar\n\n')
assert_issue('\n\n')
def test_shebang():
assert not issues('#!\n')
assert not issues('#!/foo\n')
assert not issues('#! python\n')
|
from __future__ import division
from functools import partial
import numpy as np
from menpofit.fitter import raise_costs_warning
from menpofit.math import IRLRegression, IIRLRegression
from menpofit.result import euclidean_bb_normalised_error
from menpofit.sdm.algorithm.base import (BaseSupervisedDescentAlgorithm,
compute_parametric_delta_x,
update_parametric_estimates,
print_parametric_info)
from menpofit.visualize import print_progress
class ParametricSupervisedDescentAlgorithm(BaseSupervisedDescentAlgorithm):
r"""
Base class for defining a cascaded-regression Supervised Descent Algorithm
given a trained AAM model.
Parameters
----------
aam_interface : The AAM interface class from `menpofit.aam.algorithm.lk`.
Existing interfaces include:
============================== =============================
'LucasKanadeStandardInterface' Suitable for holistic AAMs
'LucasKanadeLinearInterface' Suitable for linear AAMs
'LucasKanadePatchInterface' Suitable for patch-based AAMs
============================== =============================
n_iterations : `int`, optional
The number of iterations (cascades).
compute_error : `callable`, optional
The function to be used for computing the fitting error when training
each cascade.
"""
def __init__(self, aam_interface, n_iterations=10,
compute_error=euclidean_bb_normalised_error):
super(ParametricSupervisedDescentAlgorithm, self).__init__()
self.interface = aam_interface
self.n_iterations = n_iterations
self._compute_error = compute_error
self._precompute()
@property
def appearance_model(self):
r"""
Returns the appearance model of the AAM.
:type: `menpo.model.PCAModel`
"""
return self.interface.appearance_model
@property
def transform(self):
r"""
Returns the model driven differential transform object of the AAM, e.g.
:map:`DifferentiablePiecewiseAffine` or
:map:`DifferentiableThinPlateSplines`.
:type: `subclass` of :map:`DL` and :map:`DX`
"""
return self.interface.transform
def _precompute(self):
# Grab appearance model mean
a_bar = self.appearance_model.mean()
# Vectorise it and mask it
self.a_bar_m = a_bar.as_vector()[self.interface.i_mask]
def _compute_delta_x(self, gt_shapes, current_shapes):
# This is called first - so train shape model here
return compute_parametric_delta_x(gt_shapes, current_shapes,
self.transform)
def _update_estimates(self, estimated_delta_x, delta_x, gt_x,
current_shapes):
update_parametric_estimates(estimated_delta_x, delta_x, gt_x,
current_shapes, self.transform)
def _compute_training_features(self, images, gt_shapes, current_shapes,
prefix='', verbose=False):
wrap = partial(print_progress,
prefix='{}Extracting patches'.format(prefix),
end_with_newline=not prefix, verbose=verbose)
features = []
for im, shapes in wrap(list(zip(images, current_shapes))):
for s in shapes:
param_feature = self._compute_test_features(im, s)
features.append(param_feature)
return np.vstack(features)
def _compute_test_features(self, image, current_shape):
# Make sure you call: self.transform.set_target(current_shape)
# before calculating the warp
raise NotImplementedError()
def _print_regression_info(self, _, gt_shapes, n_perturbations,
delta_x, estimated_delta_x, level_index,
prefix=''):
print_parametric_info(self.transform, gt_shapes, n_perturbations,
delta_x, estimated_delta_x, level_index,
self._compute_error, prefix=prefix)
def run(self, image, initial_shape, gt_shape=None, return_costs=False,
**kwargs):
r"""
Run the algorithm to an image given an initial shape.
Parameters
----------
image : `menpo.image.Image` or subclass
The image to be fitted.
initial_shape : `menpo.shape.PointCloud`
The initial shape from which the fitting procedure will start.
gt_shape : `menpo.shape.PointCloud` or ``None``, optional
The ground truth shape associated to the image.
return_costs : `bool`, optional
If ``True``, then the cost function values will be computed
during the fitting procedure. Then these cost values will be
assigned to the returned `fitting_result`. *Note that this
argument currently has no effect and will raise a warning if set
to ``True``. This is because it is not possible to evaluate the
cost function of this algorithm.*
Returns
-------
fitting_result : :map:`AAMAlgorithmResult`
The parametric iterative fitting result.
"""
# costs warning
if return_costs:
raise_costs_warning(self)
# initialize transform
self.transform.set_target(initial_shape)
p_list = [self.transform.as_vector()]
shapes = [self.transform.target]
# Cascaded Regression loop
for r in self.regressors:
# Assumes that the transform is correctly set
features = self._compute_test_features(image,
self.transform.target)
# solve for increments on the shape parameters
dx = r.predict(features)
# We need to update the transform to set the state for the warping
# of the image above.
new_x = p_list[-1] + dx
self.transform._from_vector_inplace(new_x)
p_list.append(new_x)
shapes.append(self.transform.target)
# return algorithm result
return self.interface.algorithm_result(
image=image, shapes=shapes, shape_parameters=p_list,
initial_shape=initial_shape, gt_shape=gt_shape)
class MeanTemplate(ParametricSupervisedDescentAlgorithm):
r"""
Base class for defining a cascaded-regression Supervised Descent Algorithm
given a trained AAM model. The algorithm uses the centered appearance vectors
as features in the regression.
"""
def _compute_test_features(self, image, current_shape):
self.transform.set_target(current_shape)
i = self.interface.warp(image)
i_m = i.as_vector()[self.interface.i_mask]
return i_m - self.a_bar_m
class MeanTemplateNewton(MeanTemplate):
r"""
Class for training a cascaded-regression Newton algorithm using Incremental
Regularized Linear Regression (:map:`IRLRegression`) given a trained AAM
model. The algorithm uses the centered appearance vectors as features in
the regression.
Parameters
----------
aam_interface : The AAM interface class from `menpofit.aam.algorithm.lk`.
Existing interfaces include:
============================== =============================
'LucasKanadeStandardInterface' Suitable for holistic AAMs
'LucasKanadeLinearInterface' Suitable for linear AAMs
'LucasKanadePatchInterface' Suitable for patch-based AAMs
============================== =============================
n_iterations : `int`, optional
The number of iterations (cascades).
compute_error : `callable`, optional
The function to be used for computing the fitting error when training
each cascade.
alpha : `float`, optional
The regularization parameter.
bias : `bool`, optional
Flag that controls whether to use a bias term.
"""
def __init__(self, aam_interface, n_iterations=3,
compute_error=euclidean_bb_normalised_error,
alpha=0, bias=True):
super(MeanTemplateNewton, self).__init__(
aam_interface, n_iterations=n_iterations,
compute_error=compute_error)
self._regressor_cls = partial(IRLRegression, alpha=alpha, bias=bias)
class MeanTemplateGaussNewton(MeanTemplate):
r"""
Class for training a cascaded-regression Gauss-Newton algorithm using
Indirect Incremental Regularized Linear Regression (:map:`IIRLRegression`)
given a trained AAM model. The algorithm uses the centered appearance
vectors as features in the regression.
Parameters
----------
aam_interface : The AAM interface class from `menpofit.aam.algorithm.lk`.
Existing interfaces include:
============================== =============================
'LucasKanadeStandardInterface' Suitable for holistic AAMs
'LucasKanadeLinearInterface' Suitable for linear AAMs
'LucasKanadePatchInterface' Suitable for patch-based AAMs
============================== =============================
n_iterations : `int`, optional
The number of iterations (cascades).
compute_error : `callable`, optional
The function to be used for computing the fitting error when training
each cascade.
alpha : `float`, optional
The regularization parameter.
alpha2 : `float`, optional
The regularization parameter of the Hessian matrix.
bias : `bool`, optional
Flag that controls whether to use a bias term.
"""
def __init__(self, aam_interface, n_iterations=3,
compute_error=euclidean_bb_normalised_error,
alpha=0, alpha2=0, bias=True):
super(MeanTemplateGaussNewton, self).__init__(
aam_interface, n_iterations=n_iterations,
compute_error=compute_error)
self._regressor_cls = partial(IIRLRegression, alpha=alpha,
alpha2=alpha2, bias=bias)
class ProjectOut(ParametricSupervisedDescentAlgorithm):
r"""
Base class for defining a cascaded-regression Supervised Descent Algorithm
given a trained AAM model. The algorithm uses the projected-out appearance
vectors as features in the regression.
"""
def _precompute(self):
super(ProjectOut, self)._precompute()
A = self.appearance_model.components
self.A_m = A.T[self.interface.i_mask, :]
self.pinv_A_m = np.linalg.pinv(self.A_m)
def project_out(self, J):
r"""
Projects-out the appearance subspace from a given vector or matrix.
:type: `ndarray`
"""
# Project-out appearance bases from a particular vector or matrix
return J - self.A_m.dot(self.pinv_A_m.dot(J))
def _compute_test_features(self, image, current_shape):
self.transform.set_target(current_shape)
i = self.interface.warp(image)
i_m = i.as_vector()[self.interface.i_mask]
# TODO: This project out could actually be cached at test time -
# but we need to think about the best way to implement this and still
# allow incrementing
e_m = i_m - self.a_bar_m
return self.project_out(e_m)
class ProjectOutNewton(ProjectOut):
r"""
Class for training a cascaded-regression Newton algorithm using Incremental
Regularized Linear Regression (:map:`IRLRegression`) given a trained AAM
model. The algorithm uses the projected-out appearance vectors as
features in the regression.
Parameters
----------
aam_interface : The AAM interface class from `menpofit.aam.algorithm.lk`.
Existing interfaces include:
============================== =============================
Class AAM
============================== =============================
'LucasKanadeStandardInterface' Suitable for holistic AAMs
'LucasKanadeLinearInterface' Suitable for linear AAMs
'LucasKanadePatchInterface' Suitable for patch-based AAMs
============================== =============================
n_iterations : `int`, optional
The number of iterations (cascades).
compute_error : `callable`, optional
The function to be used for computing the fitting error when training
each cascade.
alpha : `float`, optional
The regularization parameter.
bias : `bool`, optional
Flag that controls whether to use a bias term.
"""
def __init__(self, aam_interface, n_iterations=3,
compute_error=euclidean_bb_normalised_error,
alpha=0, bias=True):
super(ProjectOutNewton, self).__init__(
aam_interface, n_iterations=n_iterations,
compute_error=compute_error)
self._regressor_cls = partial(IRLRegression, alpha=alpha, bias=bias)
class ProjectOutGaussNewton(ProjectOut):
r"""
Class for training a cascaded-regression Gauss-Newton algorithm using
Indirect Incremental Regularized Linear Regression (:map:`IIRLRegression`)
given a trained AAM model. The algorithm uses the projected-out
appearance vectors as features in the regression.
Parameters
----------
aam_interface : The AAM interface class from `menpofit.aam.algorithm.lk`.
Existing interfaces include:
============================== =============================
'LucasKanadeStandardInterface' Suitable for holistic AAMs
'LucasKanadeLinearInterface' Suitable for linear AAMs
'LucasKanadePatchInterface' Suitable for patch-based AAMs
============================== =============================
n_iterations : `int`, optional
The number of iterations (cascades).
compute_error : `callable`, optional
The function to be used for computing the fitting error when training
each cascade.
alpha : `float`, optional
The regularization parameter.
alpha2 : `float`, optional
The regularization parameter of the Hessian matrix.
bias : `bool`, optional
Flag that controls whether to use a bias term.
"""
def __init__(self, aam_interface, n_iterations=3,
compute_error=euclidean_bb_normalised_error,
alpha=0, alpha2=0, bias=True):
super(ProjectOutGaussNewton, self).__init__(
aam_interface, n_iterations=n_iterations,
compute_error=compute_error)
self._regressor_cls = partial(IIRLRegression, alpha=alpha,
alpha2=alpha2, bias=bias)
class AppearanceWeights(ParametricSupervisedDescentAlgorithm):
r"""
Base class for defining a cascaded-regression Supervised Descent Algorithm
given a trained AAM model. The algorithm uses the projection weights of the
appearance vectors as features in the regression.
"""
def _precompute(self):
super(AppearanceWeights, self)._precompute()
A = self.appearance_model.components
A_m = A.T[self.interface.i_mask, :]
self.pinv_A_m = np.linalg.pinv(A_m)
def project(self, J):
r"""
Projects a given vector or matrix onto the appearance subspace.
:type: `ndarray`
"""
# Project a particular vector or matrix onto the appearance bases
return self.pinv_A_m.dot(J - self.a_bar_m)
def _compute_test_features(self, image, current_shape):
self.transform.set_target(current_shape)
i = self.interface.warp(image)
i_m = i.as_vector()[self.interface.i_mask]
# Project image onto the appearance model
return self.project(i_m)
class AppearanceWeightsNewton(AppearanceWeights):
r"""
Class for training a cascaded-regression Newton algorithm using Incremental
Regularized Linear Regression (:map:`IRLRegression`) given a trained AAM
model. The algorithm uses the projection weights of the appearance
vectors as features in the regression.
Parameters
----------
aam_interface : The AAM interface class from `menpofit.aam.algorithm.lk`.
Existing interfaces include:
============================== =============================
'LucasKanadeStandardInterface' Suitable for holistic AAMs
'LucasKanadeLinearInterface' Suitable for linear AAMs
'LucasKanadePatchInterface' Suitable for patch-based AAMs
============================== =============================
n_iterations : `int`, optional
The number of iterations (cascades).
compute_error : `callable`, optional
The function to be used for computing the fitting error when training
each cascade.
alpha : `float`, optional
The regularization parameter.
bias : `bool`, optional
Flag that controls whether to use a bias term.
"""
def __init__(self, aam_interface, n_iterations=3,
compute_error=euclidean_bb_normalised_error,
alpha=0, bias=True):
super(AppearanceWeightsNewton, self).__init__(
aam_interface, n_iterations=n_iterations,
compute_error=compute_error)
self._regressor_cls = partial(IRLRegression, alpha=alpha,
bias=bias)
class AppearanceWeightsGaussNewton(AppearanceWeights):
r"""
Class for training a cascaded-regression Gauss-Newton algorithm using
Indirect Incremental Regularized Linear Regression (:map:`IIRLRegression`)
given a trained AAM model. The algorithm uses the projection weights of
the appearance vectors as features in the regression.
Parameters
----------
aam_interface : The AAM interface class from `menpofit.aam.algorithm.lk`.
Existing interfaces include:
============================== =============================
'LucasKanadeStandardInterface' Suitable for holistic AAMs
'LucasKanadeLinearInterface' Suitable for linear AAMs
'LucasKanadePatchInterface' Suitable for patch-based AAMs
============================== =============================
n_iterations : `int`, optional
The number of iterations (cascades).
compute_error : `callable`, optional
The function to be used for computing the fitting error when training
each cascade.
alpha : `float`, optional
The regularization parameter.
alpha2 : `float`, optional
The regularization parameter of the Hessian matrix.
bias : `bool`, optional
Flag that controls whether to use a bias term.
"""
def __init__(self, aam_interface, n_iterations=3,
compute_error=euclidean_bb_normalised_error,
alpha=0, alpha2=0, bias=True):
super(AppearanceWeightsGaussNewton, self).__init__(
aam_interface, n_iterations=n_iterations,
compute_error=compute_error)
self._regressor_cls = partial(IIRLRegression, alpha=alpha,
alpha2=alpha2, bias=bias)
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def isSubtree(self, s: TreeNode, t: TreeNode) -> bool:
if not s or not t:
return False
s1 = self.helper(s,[])
t1 = self.helper(t,[])
for i in t1:
if i not in s1:
return False
# return True
new1 = "".join(s1)
new2 = "".join(t1)
if new2 in new1:
return True
return False
def helper(self,root,path):
if not root:
path.append("#")
return
else:
path.append(str(root.val))
self.helper(root.left,path)
self.helper(root.right,path)
return path
|
"""
Contains custom asserts
"""
from ei_graph import EIGraph
def assert_node_is_item(nodeId):
if not EIGraph.nid_is_item(nodeId):
raise ValueError(
"Invalid item id: %d. Item ids are even." % nodeId
)
def assert_node_is_entity(nodeId):
if not EIGraph.nid_is_entity(nodeId):
raise ValueError(
"Invalid entity id: %d. Entity ids are odd." % nodeId
)
def assert_node_exists(nodeId, EIG):
if not EIG.has_node(nodeId):
raise ValueError("Node id %d does not exist in the graph." % nodeId)
|
# -*-coding:utf-8-*-
print('hello world')
|
import numpy as np
from cpymad.madx import Madx
import xline
# run MADX tests
mad = Madx()
mad.call("rf_multipole.madx")
# create xline rfmultipole
mad_sequence = mad.sequence["sequ_rfmultipole"]
rf_mulitpole_mad = mad_sequence.elements[1]
freq = rf_mulitpole_mad.freq * 1e6 # MAD units are MHz
knl = rf_mulitpole_mad.knl
pn = np.array(rf_mulitpole_mad.pnl) * 360 # MAD units are 2pi
lag = rf_mulitpole_mad.lag * 360 # MAD units are 2pi
rf_multipole = xline.elements.RFMultipole(
voltage=0, frequency=freq, lag=lag, knl=knl, ksl=[0], pn=pn, ps=[0]
)
# track xline
mad_part = xline.Particles.from_madx_track(mad)
p1 = mad_part.copy(0)
p2 = mad_part.copy(1)
p3 = p1.copy()
rf_multipole.track(p3)
# compare
p2.compare(p3)
# test conversion
line = xline.Line.from_madx_sequence(mad_sequence)
tw = mad.twiss(betx=1, bety=1, x=0.1, t=0.5)
p_mad = xline.Particles.from_madx_twiss(tw)
p_six = mad_part.copy(0)
p_out = xline.Particles.from_list(
line.track_elem_by_elem(p_six, start=False)
)
|
#!/usr/bin/env python3
# remove-duplicates.py: remove tweets with identical text from stdin
# usage: remove-duplicates.py < file
# 20201113 erikt(at)xs4all.nl
import csv
import re
import sys
seen_text = {}
csvreader = csv.reader(sys.stdin)
csvwriter = csv.writer(sys.stdout)
for row in csvreader:
text = row[4]
text = row[4].lower()
text = re.sub("https?://\S*","",text)
text = re.sub("\\\\n"," ",text)
text = re.sub("\s+"," ",text)
text = re.sub("^rt\s+\S+:\s*","",text)
if text.strip() not in seen_text:
csvwriter.writerow(row)
seen_text[text.strip()] = True
|
# Get information about the process's memory usage. Ideally this would just
# be some simple calls to resource, but that library is almost useless. The
# python standard library is usually good, but here it's terrible.
# If we wanted to introduce an external dependency we could try psutil,
# though I would avoid it if possible.
# It looks like ctypes would work for this. It has some overhead, but this
# doesn't need to be that fast (and proc reading has overhead too)
import resource, os, ctypes
def current(): return fallback([(linux_current, IOError), (mac_current, AttributeError)])
def resident(): return fallback([(linux_resident, IOError), (mac_resident, AttributeError)])
def max(): return fallback([(linux_max, IOError), (mac_max, AttributeError)])
def fallback(things, default=lambda:0):
for func, exceptions in things:
try: return func()
except exceptions: pass
return default()
#### Linux stuff ####
def linux_current():
with open("/proc/self/statm","r") as f:
return int(f.readline().split()[0])*resource.getpagesize()
def linux_resident():
with open("/proc/self/status","r") as f:
for line in f:
toks = line.split()
if toks[0] == "VmRSS:":
return int(toks[1])*1024
def linux_max():
with open("/proc/self/status","r") as f:
for line in f:
toks = line.split()
if toks[0] == "VmPeak:":
return int(toks[1])*1024
##### MacOs stuff #####
def mac_current(): return get_mac_taskinfo().virtual_size
def mac_resident(): return get_mac_taskinfo().resident_size
def mac_max(): return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss # bytes on mac
# Taskinfo stuff for memory lookups on macs
_libc = None
def get_mac_taskinfo():
# Cache libc so we avoid a file system search every time this is called
global _libc
if _libc is None: _libc = ctypes.cdll.LoadLibrary(None)
# Fail immediately if task_info doesn't exist
_libc.task_info
# Define data structurs
from ctypes import c_int, c_uint, c_ulong
class time_value_t(ctypes.Structure):
_fields_ = [("seconds", c_int), ("microseconds", c_int)]
class task_basic_info(ctypes.Structure):
_pack_ = 4
_fields_ = [("suspend_count", c_int), ("virtual_size", c_ulong), ("resident_size", c_ulong),
("user_time", time_value_t), ("system_time", time_value_t), ("policy", c_int)]
count = c_uint(ctypes.sizeof(task_basic_info)//ctypes.sizeof(c_uint))
# Define function interfaces
task_self = _libc.mach_task_self
task_self.restype = c_uint
task_self.argtypes = []
me = task_self()
task_info = _libc.task_info
task_info.restype = c_int
task_info.argtypes = [c_uint, c_uint, ctypes.POINTER(task_basic_info), ctypes.POINTER(c_uint)]
info = task_basic_info()
status = _libc.task_info(me, 5, ctypes.byref(info), ctypes.byref(count))
return info if status == 0 else None
|
import sys
sys.path.append('../')
import boto
s3_conn = boto.connect_s3()
import multiprocessing
cores = multiprocessing.cpu_count()
import pandas as pd
from skills_utils.time import datetime_to_quarter
from skills_ml.job_postings.common_schema import JobPostingGenerator
from skills_ml.job_postings.corpora import Doc2VecGensimCorpusCreator, Word2VecGensimCorpusCreator
from skills_ml.algorithms.embedding.train import EmbeddingTrainer
def get_time_range(start='2011-01-01', freq='Q', periods=24):
return list(map(lambda x: datetime_to_quarter(x), pd.date_range(start=start, freq=freq, periods=periods)))
if __name__ == '__main__':
time_range = get_time_range(start='2011-01-01', freq='Q', periods=1)
job_postings_generator = JobPostingGenerator(s3_conn=s3_conn, quarters=time_range, s3_path='open-skills-private/job_postings_common', source="all")
corpus_generator = Word2VecGensimCorpusCreator(job_postings_generator)
trainer = EmbeddingTrainer(s3_conn=s3_conn,
corpus_generator = corpus_generator,
model_s3_path='open-skills-private/model_cache/embedding/',
batch_size=4000,
model_type='word2vec')
# The train method takes whatever arugments gensim.models.word2vec.Word2Vec or gensim.model.doc2vec.Doc2Vec has
trainer.train(size=100, iter=4, window=8, workers=cores)
|
import os
SORTS_DIR = os.path.join(os.path.dirname(__file__), 'sorts')
TITLE = 'Python AlgoAnim'
|
# Generated by Django 3.1.1 on 2021-02-25 17:13
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ColDocApp', '0005_20201013_latex_macros'),
]
operations = [
migrations.RemoveField(
model_name='dcoldoc',
name='editor',
),
]
|
VERSION = 0.1
print(f"VGE {VERSION}")
|
""" library to take autodiff and execute a computation graph """
from __future__ import absolute_import
import numpy as np
# import scipy.sparse
from scipy.sparse import spmatrix, coo_matrix
from .. import ndarray
from .._base import DNNL_LIB
from ..cpu_links import array_set as cpu_array_set
from .Variable import PlaceholderOp # add for optimizer
from ..dataloader import DataloaderOp, GNNDataLoaderOp
from .AllReduceCommunicate import AllReduceCommunicateOp
from .ParameterServerCommunicate import ParameterServerCommunicateOp, ParameterServerSparsePullOp, parameterServerSparsePull_op
from .DataTransfer import DataH2DOp, DataD2HOp, DataD2HSparseOp
from .EmbeddingLookUp import EmbeddingLookUp, EmbeddingLookUp_Gradient
from . import OnesLike
from ..stream import *
from ..communicator.mpi_nccl_comm import ncclDataType_t, ncclRedOp_t, mpi_nccl_communicator
from operator import add
from functools import reduce
import ctypes
import os
from time import time
FLAG_SHOW_GRAPH = False
G_NODE_ID = 0
def path_to_lib(name):
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
lib_path = os.path.join(curr_path, '../../../build/lib/')
return os.path.join(lib_path, name)
def mpi_nccl_init():
global nccl_comm
nccl_comm = mpi_nccl_communicator()
nccl_comm.ncclInit()
device_id = nccl_comm.device_id.value
return nccl_comm, device_id
def mpi_nccl_finish(comm = None):
comm.ncclFinish()
def get_nccl_communicate():
global nccl_comm
return nccl_comm
def get_worker_communicate():
global ps_comm
return ps_comm
def worker_init():
global ps_comm
ll = ctypes.cdll.LoadLibrary
ps_comm = ll(path_to_lib("libps.so"))
ps_comm.Init()
os.environ['HEAPPROFILE'] = "./W" + str(ps_comm.rank())
def worker_finish():
ps_comm.Finalize()
def server_init():
global ps_comm
ll = ctypes.cdll.LoadLibrary
ps_comm = ll(path_to_lib("libps.so"))
ps_comm.Init()
ps_comm.StartServer()
os.environ['HEAPPROFILE'] = "./S"+ str(ps_comm.rank())
def server_finish():
ps_comm.Finalize()
def scheduler_init():
global ps_comm
ll = ctypes.cdll.LoadLibrary
ps_comm = ll(path_to_lib("libps.so"))
ps_comm.Init()
def scheduler_finish():
ps_comm.Finalize()
class AthenaConfig(object):
__slots__ = [
'eval_node_list',
'context',
'seed',
'np_rand',
'comm_mode',
'stream_mode',
'ps_comm',
'nccl_comm',
'ctx_infer_mode',
'worker_id',
'worker_num',
'comp_stream',
'nccl_stream',
'h2d_stream',
'd2h_stream',
'h2d_ops',
'd2h_ops',
'ps_map',
'dataloader_name',
'dataloader_ops',
'use_sparse_pull',
'cstable_policy',
'inference',
'enable_lazy',
'bsp',
'prefetch',
'cache_bound',
'log_path',
]
def __init__(
self,
eval_node_list,
ctx=ndarray.cpu(0),
seed=None,
comm_mode=None,
stream_mode='AllStreams',
ctx_infer_mode='use_default',
dataloader_name='',
use_sparse_pull=False,
cstable_policy=None,
inference = False,
bsp=False,
prefetch=True,
enable_lazy=True,
cache_bound=100,
log_path=None,
):
'''
context: default device context
comm_mode: communication mode, should be one of the following
None -> Single GPU
PS -> Parameter Server
AllRedeuce -> MPI AllReduce
Hybrid -> Parameter Server for Sparse Parameter and MPI AllReduce for Dense Parameter
stream_mode: None or ComputeStream or AllStreams
None -> do not use any streams (deprecated, bugs exist)
ComputeStream -> only use stream for computation (deprecated, bugs exist)
AllStreams -> use 3 streams for h2d, d2h and computation
streams should be used only when is_gpu_ctx(context) is True
ctx_infer_mode: use_default or from_prev for nodes that not specified context
use_default -> use default context
from_prev -> use inputs nodes' context if possible, else use default
'''
self.eval_node_list = eval_node_list
# check context
assert ctx, 'Default context should be determined.'
self.context = ctx
# variables initialization
self.seed = seed if seed else np.int64(time())
self.np_rand = np.random.RandomState(self.seed)
# get attribute of communication mode
self.comm_mode = comm_mode
self.ps_comm = None
self.nccl_comm = None
if self.comm_mode == 'PS' or self.comm_mode == 'Hybrid':
worker_init()
self.ps_comm = get_worker_communicate()
self.worker_id = os.getenv('HEAPPROFILE')
self.worker_num = int(os.environ['DMLC_NUM_WORKER']) if 'DMLC_NUM_WORKER' in os.environ else 1
self.nccl_stream = None
if self.comm_mode == "Hybrid" or self.comm_mode == "AllReduce":
if ndarray.is_gpu_ctx(ctx):
self.nccl_stream = create_stream_handle(ctx)
self.nccl_comm = get_nccl_communicate()
# check stream mode
if stream_mode is not None:
if not ndarray.is_gpu_ctx(ctx):
stream_mode = None
assert stream_mode in (None, 'ComputeStream', 'AllStreams'), \
'Stream mode should be None, ComputeStream or AllStreams'
self.stream_mode = stream_mode
# define streams
self.comp_stream = None if stream_mode is None else create_stream_handle(ctx)
if stream_mode == 'AllStreams':
self.h2d_stream = create_stream_handle(ctx)
self.d2h_stream = create_stream_handle(ctx)
else:
self.h2d_stream = None
self.d2h_stream = None
# check ctx infer mode
assert ctx_infer_mode in ('from_prev', 'use_default'), \
'Context inference mode should be from_prev or use_default.'
self.ctx_infer_mode = ctx_infer_mode
self.use_sparse_pull = use_sparse_pull if self.comm_mode == 'PS' or self.comm_mode == "Hybrid" else False
self.cstable_policy = cstable_policy if self.comm_mode == 'PS' or self.comm_mode == "Hybrid" else None
self.prefetch = prefetch if self.comm_mode == 'PS' or self.comm_mode == 'Hybrid' else False
if self.cstable_policy is not None:
self.cstable_policy = self.cstable_policy.upper()
self.use_sparse_pull = False
self.h2d_ops = {}
self.d2h_ops = {}
self.ps_map = {}
self.dataloader_name = dataloader_name
self.inference = inference
self.enable_lazy = (not inference) and enable_lazy # in inference(actually in PS) now we don't use lazy
self.bsp = bsp
self.cache_bound = int(cache_bound)
self.log_path = log_path
if log_path is not None and (self.comm_mode == 'PS' or self.comm_mode == "Hybrid"):
assert os.path.isdir(log_path), 'Need to specify a work directory to save logs.'
self.ps_comm.startRecord(ctypes.c_char_p(bytes(log_path, 'utf-8')))
class Executor(object):
"""Executor computes values for given set of nodes in computation graph."""
def __init__(self, eval_node_list, config=None, **kargs):
"""
Parameters
----------
eval_node_list: list of nodes whose values need to be computed.
topo_order: list of nodes in topological order
node_to_shape_map: dict from node to shape of the node
node_to_arr_map: dict from node to ndarray.NDArray allocated for node
feed_shapes: shapes of feed_dict from last run(...)
"""
if config is None:
config = AthenaConfig(eval_node_list=eval_node_list, **kargs)
assert isinstance(config, AthenaConfig), 'Config type %s invalid.' % str(type(config))
self.eval_node_list = eval_node_list
self.config = config
# In this topo sort, the backward_hook will be called in backward phase;
# when previous nodes finish, the forward hook will be called.
# Can be used to add ops (if added in backward_hook, the added ops will be searched; not true in forward_hook).
# Can be used to determine context (now in forward_hook).
# Now the data transfer ops are added in forward_hook, the communicator ops (ps, allreduce) are added in backward_hook.
if config.inference == False:
topo_sort_with_hook(self.eval_node_list, self.config)
# the real topo order, considering all ops
self.topo_order = find_topo_sort(self.eval_node_list)
else: # in inference phase
if self.config.use_sparse_pull == True or self.config.cstable_policy is not None:
# topo_sort_with_hook(self.eval_node_list, self.config)
# insert ps_sparse_pull_op
self.topo_order = find_topo_sort_inference(self.eval_node_list)
# fetch sparse parameter
fetch_sparse_parameter_value(self.topo_order, self.config)
else:
self.topo_order = find_topo_sort(self.eval_node_list)
# fetch dense parameter
# fetch_dense_parameter_value(self.topo_order, self.config)
# main structures, nodes' shapes and arrays
self.node_to_shape_map = {}
self.node_to_arr_map = {}
# inherit from configurations
self.comm_mode = self.config.comm_mode
self.ps_comm = self.config.ps_comm
self.nccl_comm = self.config.nccl_comm
self.comp_stream = self.config.comp_stream
self.h2d_stream = self.config.h2d_stream
self.d2h_stream = self.config.d2h_stream
self.nccl_stream = self.config.nccl_stream
self.param_psval_map = self.config.ps_map
self.dataloader_name = self.config.dataloader_name
self.use_sparse_pull = self.config.use_sparse_pull
self.cstable_policy = self.config.cstable_policy
# assisting structures, improve performance
self.need_feed_nodes = []
self.param_nodes = []
self.dataloader_nodes = []
self.computing_nodes = []
for node in self.topo_order:
if isinstance(node, DataloaderOp) or isinstance(node , GNNDataLoaderOp):
self.dataloader_nodes.append(node)
elif isinstance(node, PlaceholderOp):
if node.shape is None:
self.need_feed_nodes.append(node)
elif node.trainable:
self.param_nodes.append(node)
elif not ((self.use_sparse_pull or self.cstable_policy) and isinstance(node, EmbeddingLookUp) and self.config.prefetch):
self.computing_nodes.append(node)
self.batch_num = set([node.get_batch_num(self.dataloader_name) for node in self.dataloader_nodes])
assert len(self.batch_num) <= 1, 'Batch num not conform.'
self.batch_num = None if len(self.batch_num) == 0 else self.batch_num.pop()
self.init_need_allocation = (self.need_feed_nodes == []) and (self.dataloader_nodes == [])
def infer_shape(self, feed_shapes):
"""Given shapes of feed_dict nodes, infer shape for all nodes in graph.
Implementation note:
Iteratively calls node.infer_shape to infer shapes.
Node shapes stored in self.node_to_shape_map.
Parameters
----------
feed_shapes: node->shapes mapping for feed_dict nodes.
"""
self.node_to_shape_map = {}
for node in self.topo_order:
if node in feed_shapes:
self.node_to_shape_map[node] = tuple(feed_shapes[node])
else:
input_shapes = [self.node_to_shape_map[n] for n in node.inputs]
cur_shape = node.infer_shape(input_shapes)
self.node_to_shape_map[node] = cur_shape if cur_shape is None else tuple(cur_shape)
# print(node.name, self.node_to_shape_map[node])
def memory_plan(self):
"""Allocates ndarray.NDArray for every node except feed_dict nodes.
Parameters
----------
"""
for node, shape in self.node_to_shape_map.items():
if isinstance(node, PlaceholderOp):
if node.tensor_value is not None:
self.node_to_arr_map[node] = node.tensor_value
elif node not in self.node_to_arr_map:
self.node_to_arr_map[node] = None
elif not isinstance(node, DataloaderOp) and not isinstance(node, GNNDataLoaderOp):
# add for OptimizerOp and ParameterServerOp
if shape is None:
self.node_to_arr_map[node] = None
continue
if isinstance(node, (EmbeddingLookUp_Gradient, DataD2HSparseOp)):
self.node_to_arr_map[node] = ndarray.IndexedSlices(dense_shape=shape)
continue
if isinstance(node, EmbeddingLookUp) and (self.use_sparse_pull or self.cstable_policy) and self.config.prefetch:
self.node_to_arr_map[node] = self.param_psval_map[node.inputs[0]]
continue
if node.on_gpu:
if node.inplace:
self.node_to_arr_map[node] = ndarray.NDArray(None)
else:
self.node_to_arr_map[node] = ndarray.empty(shape, ctx=node.ctx)
else:
self.node_to_arr_map[node] = ndarray.empty(shape, ctx=node.ctx)
def run(self, feed_dict = {}, convert_to_numpy_ret_vals=False):
"""
Parameters
----------
feed_dict: a dictionary of node->np.ndarray supplied by user.
convert_to_numpy_ret_vals: whether to convert ret vals to np.array
Returns
-------
A list of values for nodes in eval_node_list. NDArray or np.ndarray.
"""
assert len(feed_dict) == len(self.need_feed_nodes), 'Feed dict invalid.'
feed_shapes = {}
need_reallocation = self.init_need_allocation
# get feed in values
for node, value in feed_dict.items():
assert node in self.need_feed_nodes, 'Only allow feed in PlaceholderOp with no values, here got %s:%s.' % (str(type(node)), node.name)
local_shape = tuple(value.shape)
local_realloc = node not in self.node_to_shape_map or \
local_shape != self.node_to_shape_map[node]
need_reallocation = need_reallocation or local_realloc
if node.on_cpu:
assert isinstance(value, (np.ndarray, spmatrix, ndarray.NDArray)), \
"feed_dict value type not supported"
if isinstance(value, np.ndarray):
if local_realloc:
self.node_to_arr_map[node] = ndarray.empty(local_shape, ctx=node.ctx)
self.node_to_arr_map[node][:] = value
else:
self.node_to_arr_map[node] = value
else:
if isinstance(value, np.ndarray):
if local_realloc:
self.node_to_arr_map[node] = ndarray.array(value, ctx=node.ctx)
else:
self.node_to_arr_map[node][:] = value
elif isinstance(value, spmatrix):
value = coo_matrix(value)
value = ndarray.sparse_array(value.data,
(value.row, value.col), shape = local_shape, ctx=node.ctx)
self.node_to_arr_map[node] = value
elif isinstance(value, ndarray.NDArray):
if value.ctx == node.ctx:
self.node_to_arr_map[node] = value
else:
if local_realloc:
self.node_to_arr_map[node] = ndarray.empty(local_shape, ctx=node.ctx)
else:
self.node_to_arr_map[node][:] = value
elif isinstance(value, ndarray.ND_Sparse_Array):
self.node_to_arr_map[node] = value
else:
assert False, "feed_dict value type not supported"
feed_shapes[node] = local_shape
# get dataloader values
for node in self.dataloader_nodes:
local_shape = node.get_cur_shape(self.dataloader_name)
local_realloc = node not in self.node_to_shape_map or \
local_shape != self.node_to_shape_map[node]
need_reallocation = need_reallocation or local_realloc
self.node_to_arr_map[node] = node.get_arr(self.dataloader_name)
feed_shapes[node] = local_shape
# reallocation, infer shapes and allocate memory
if need_reallocation:
self.infer_shape(feed_shapes)
self.memory_plan()
# computing
for node in self.computing_nodes:
if node.on_cpu and isinstance(self.node_to_arr_map[node], ndarray.NDArray):
if DNNL_LIB['cpu_ArraySet'] and not isinstance(node, DataD2HOp):
cpu_array_set(self.node_to_arr_map[node], 0.0)
else:
# here we suppose not using DNNL_LIB
# self.node_to_arr_map[node][:] = np.zeros(self.node_to_shape_map[node]).astype(np.float32)
pass
input_vals = [self.node_to_arr_map[n] for n in node.inputs]
node_val = self.node_to_arr_map[node]
for n in node.inputs:
if n.event:
n.event.sync()
if isinstance(node, (ParameterServerCommunicateOp, ParameterServerSparsePullOp)):
# Here we use d2h stream in ps op, since the stream is used for d2h data transfer.
# Please take care at this part.
node.compute(input_vals, node_val, self.d2h_stream)
elif isinstance(node, AllReduceCommunicateOp):
node.compute(input_vals, node_val, self.nccl_comm, self.nccl_stream)
elif isinstance(node, DataH2DOp):
node.compute(input_vals, node_val, self.h2d_stream)
elif isinstance(node, (DataD2HOp, DataD2HSparseOp)):
node.compute(input_vals, node_val, self.d2h_stream)
else:
node.compute(input_vals, node_val, self.comp_stream)
if isinstance(node.event, Event):
# for d2h op / eval nodes / nodes before allreduce or ps nodes
node.event.record(self.comp_stream)
for n in self.eval_node_list:
# every node in eval_node_list should have an event (except dataloader/optimizer...)
if n.event:
n.event.sync()
# get results
results = [self.node_to_arr_map[n] for n in self.eval_node_list]
if convert_to_numpy_ret_vals:
for i in range(len(results)):
if results[i] is not None:
results[i] = results[i].asnumpy()
return results
def save(self, file_path):
assert os.path.isdir(file_path), 'Need to specify a work directory to save parameters.'
if self.comm_mode in (None, 'AllReduce'):
# when using allreduce, users need to specify the worker whose rank equals 0 to save
for node in self.topo_order:
if isinstance(node, PlaceholderOp) and node.trainable:
np.save(os.path.join(file_path, node.name + '.npy'), node.tensor_value.asnumpy())
else:
self.ps_comm.BarrierWorker()
if self.config.worker_id == './W0':
for node in self.topo_order:
if isinstance(node, PlaceholderOp) and node.trainable:
if node.is_embed or self.comm_mode == 'PS':
node.event.sync()
nodeid = ctypes.c_int(node.id)
self.ps_comm.SaveParam(nodeid, ctypes.c_char_p(bytes(file_path, 'utf-8')))
self.ps_comm.Wait(nodeid)
else:
np.save(os.path.join(file_path, node.name + '.npy'), node.tensor_value.asnumpy())
self.ps_comm.BarrierWorker()
def load(self, file_path):
assert os.path.isdir(file_path), 'Need to specify a work directory to load parameters.'
if self.comm_mode in (None, 'AllReduce'):
for node in self.topo_order:
if isinstance(node, PlaceholderOp) and node.trainable:
node.tensor_value[:] = np.load(os.path.join(file_path, node.name + '.npy'))
else:
self.ps_comm.BarrierWorker()
if self.config.worker_id == './W0':
for node in self.topo_order:
if isinstance(node, PlaceholderOp) and node.trainable:
if node.is_embed or self.comm_mode == 'PS':
node.event.sync()
nodeid = ctypes.c_int(node.id)
self.ps_comm.LoadParam(nodeid, ctypes.c_char_p(bytes(file_path, 'utf-8')))
node.event.update()
self.ps_comm.BarrierWorker()
for node in self.topo_order:
if isinstance(node, PlaceholderOp) and node.trainable and not node.is_embed:
if self.comm_mode == 'PS':
node.event.sync()
nodeid = ctypes.c_int(node.id)
self.ps_comm.Pull(nodeid, self.param_psval_map[node].handle)
node.event.update()
else:
node.tensor_value[:] = np.load(os.path.join(file_path, node.name + '.npy'))
elif isinstance(node, EmbeddingLookUp) and self.config.prefetch:
node.event.sync()
nodeid = ctypes.c_int(node.inputs[0].id)
self.ps_comm.SparsePull(nodeid, node.inputs[1].get_next_arr(self.dataloader_name).handle, self.param_psval_map[node.inputs[0]].handle)
node.event.update()
self.ps_comm.BarrierWorker()
def recordLoads(self):
for node in self.param_psval_map:
node.event.sync()
self.ps_comm.getLoads()
def __del__(self):
if self.comp_stream is not None:
self.comp_stream.sync()
if self.h2d_stream is not None:
self.h2d_stream.sync()
if self.d2h_stream is not None:
self.d2h_stream.sync()
if self.nccl_stream is not None:
self.nccl_stream.sync()
for node in self.param_nodes:
if node.event:
node.event.sync()
if self.comm_mode == 'PS' or self.comm_mode == 'Hybrid':
worker_finish()
def gradients(output_node, node_list):
"""Take gradient of output node with respect to each node in node_list.
Parameters
----------
output_node: output node that we are taking derivative of.
node_list: list of nodes that we are taking derivative wrt.
Returns
-------
A list of gradient values, one for each node in node_list respectively.
"""
node_to_output_grads_list = {}
node_to_output_grads_list[output_node] = [OnesLike.oneslike_op(output_node)]
node_to_output_grad = {}
# Traverse forward graph in reverse topological order
reverse_topo_order = reversed(find_topo_sort([output_node]))
for node in reverse_topo_order:
output_grad = sum_node_list(node_to_output_grads_list[node])
if output_grad is None:
for n in node.inputs:
if n not in node_to_output_grads_list:
node_to_output_grads_list[n] = []
continue
node_to_output_grad[node] = output_grad
input_grads_list = node.gradient(output_grad)
for i in range(len(node.inputs)):
if node.inputs[i] not in node_to_output_grads_list:
node_to_output_grads_list[node.inputs[i]] = []
# Calculate partial adjoint for input nodes.
node_to_output_grads_list[node.inputs[i]].append(
input_grads_list[i])
grad_node_list = [node_to_output_grad[node] for node in node_list]
return grad_node_list
##################
# Helper Methods #
##################
def topo_sort_with_hook(node_list, config):
visited = set()
for node in node_list:
topo_sort_dfs_with_hook(node, visited, config)
def topo_sort_dfs_with_hook(node, visited, config):
if node in visited:
return
visited.add(node)
node.backward_hook(config)
for n in node.inputs:
topo_sort_dfs_with_hook(n, visited, config)
node.forward_hook(config)
def find_topo_sort(node_list):
"""Given a list of nodes, return a topo ordering of nodes ending in them.
A simple algorithm is to do a post-order DFS traversal on the given nodes,
going backwards based on input edges. Since a node is added to the ordering
after all its predecessors are traversed due to post-order DFS, we get a
topological sort.
"""
visited = set()
topo_order = []
for node in node_list:
topo_sort_dfs(node, visited, topo_order)
return topo_order
def topo_sort_dfs(node, visited, topo_order):
"""Post-order DFS"""
if node in visited:
return
visited.add(node)
for n in node.inputs:
topo_sort_dfs(n, visited, topo_order)
topo_order.append(node)
def find_topo_sort_inference(node_list):
topo_order = find_topo_sort(node_list)
embedding_list = list()
embedding_outputs = dict()
embedding_cnt = dict()
for node in topo_order:
if isinstance(node, EmbeddingLookUp):
embedding_outputs[node] = list()
embedding_cnt[node] = 0
embedding_list.append(node)
else:
for input_node in node.inputs:
if isinstance(input_node, EmbeddingLookUp):
embedding_outputs[input_node].append(node)
embedding_cnt[input_node] += 1
# parameterServerSparsePull_op(embedding, *outputs)
topo_order_inference = list()
for node in topo_order:
topo_order_inference.append(node)
for embedding in embedding_list:
if node in embedding_outputs[embedding]:
embedding_cnt[embedding] -= 1
if embedding_cnt[embedding] == 0:
topo_order_inference.append(parameterServerSparsePull_op(embedding, embedding_outputs[embedding]))
embedding_list.remove(embedding)
return topo_order_inference
def fetch_sparse_parameter_value(node_list, config):
for node in node_list:
if isinstance(node, ParameterServerSparsePullOp):
node.forward_hook(config)
def fetch_dense_parameter_value(node_list, config):
assert config.comm_mode in ('PS', 'Hybrid')
topo_order = find_topo_sort(node_list)
val_list = []
# get var list
for node in topo_order:
if isinstance(node, PlaceholderOp) and node.trainable:
val_list.append(node)
for node in val_list:
if config.use_sparse_pull and node.is_embed:
continue
else:
pull_val = ndarray.empty(node.shape, ctx=ndarray.cpu(0))
config.ps_comm.Pull(node.id, pull_val.handle)
config.ps_map[node] = pull_val
node.tensor_value = pull_val
node.event.update()
def sum_node_list(node_list):
"""Custom sum func to avoid creating redundant nodes in Python sum func."""
node_list = [n for n in node_list if n is not None]
if node_list == []:
return None
return reduce(add, node_list)
def broadcast_rule(shape_a, shape_b):
"""Return output shape of broadcast shape_a, shape_b.
e.g. broadcast_rule((3,2), (4,3,2))
returns output_shape = (4,3,2)
Check out explanations and more examples at
https://docs.scipy.org/doc/numpy-1.10.0/user/basics.broadcasting.html
http://eli.thegreenplace.net/2015/broadcasting-arrays-in-numpy/
"""
assert(isinstance(shape_a, tuple))
assert(isinstance(shape_b, tuple))
if len(shape_a) > len(shape_b):
longer_shape, shorter_shape = shape_a, shape_b
else:
longer_shape, shorter_shape = shape_b, shape_a
len_diff = len(longer_shape) - len(shorter_shape)
for i in range(len_diff):
# pad with leading 1s
shorter_shape = (1,) + shorter_shape
assert len(shorter_shape) == len(longer_shape)
output_shape = list(longer_shape)
for i in range(len(output_shape)):
assert (shorter_shape[i] == longer_shape[i]) \
or (shorter_shape[i] == 1) \
or (longer_shape[i] == 1)
output_shape[i] = max(shorter_shape[i], longer_shape[i])
return tuple(output_shape)
|
#!/usr/bin/env python
'''
Gamma point Hartree-Fock/DFT for all-electron calculation
The default FFT-based 2-electron integrals may not be accurate enough for
all-electron calculation. It's recommended to use MDF (mixed density fitting)
technique to improve the accuracy.
See also
examples/df/00-with_df.py
examples/df/01-auxbasis.py
examples/df/40-precomupte_df_ints.py
'''
import numpy
from pyscf.pbc import gto, scf, dft
cell = gto.M(
a = numpy.eye(3)*3.5668,
atom = '''C 0. 0. 0.
C 0.8917 0.8917 0.8917
C 1.7834 1.7834 0.
C 2.6751 2.6751 0.8917
C 1.7834 0. 1.7834
C 2.6751 0.8917 2.6751
C 0. 1.7834 1.7834
C 0.8917 2.6751 2.6751''',
basis = '6-31g',
verbose = 4,
)
mf = scf.RHF(cell).density_fit()
mf.kernel()
# Mixed density fitting is another option for all-electron calculations
mf = scf.RHF(cell).mix_density_fit()
mf.with_df.gs = [5]*3 # Tune #PWs in MDF for performance/accuracy balance
mf.kernel()
# Or use even-tempered Gaussian basis as auxiliary fitting functions.
# The following auxbasis is generated based on the expression
# alpha = a * 1.7^i i = 0..N
# where a and N are determined by the smallest and largest exponets of AO basis.
import pyscf.df
auxbasis = pyscf.df.aug_etb(cell, beta=1.7)
mf = scf.RHF(cell).density_fit(auxbasis=auxbasis)
mf.kernel()
#
# Second order SCF solver can be used in the PBC SCF code the same way in the
# molecular calculation
#
mf = dft.RKS(cell).density_fit(auxbasis='weigend')
mf.xc = 'bp86'
mf = scf.newton(mf)
mf.kernel()
|
from django.shortcuts import render, get_object_or_404
from .models import Listing
from django.core.paginator import Paginator, EmptyPage
from .choices import _price, _states, _bedroom
# Create your views here.
def index(request):
# return render(request, 'listings/listings.jinja2', {
# 'name': 'Machado'
# })
# listings = paginator_order_by('-list_date') # fixme :)
listings = paginator_order_by_with_filter('-list_date', is_published=True) # fixme :)
# added paginator...
paginator = Paginator(listings, 2)
page = request.GET.get('page')
paged_listings = paginator.get_page(page)
context = {
"listings": paged_listings
}
return render(request, 'listings/listings.jinja2', context)
def listing(request, listing_id):
print(listing_id)
listing = get_object_or_404(Listing, pk=listing_id)
context = {
"listing": listing
}
return render(request, 'listings/listing.jinja2', context)
def search(request):
queryset_list = Listing.objects.order_by('-list_date')
## todo - refactoring this here :*)
# keywords
if 'keywords' in request.GET:
keywords = request.GET["keywords"]
if keywords:
queryset_list = queryset_list.filter(description__icontains=keywords)
# city
if 'city' in request.GET:
city = request.GET["city"]
if city:
queryset_list = queryset_list.filter(city__iexact=city)
# state
if 'state' in request.GET:
state = request.GET["state"]
if state:
queryset_list = queryset_list.filter(state__iexact=state) # fixme, attributes;
# bedrooms
if 'bedrooms' in request.GET:
bedrooms = request.GET["bedrooms"]
if bedrooms:
queryset_list = queryset_list.filter(bedrooms__lte=bedrooms) # fixme, less than or equal
# price
if 'price' in request.GET:
price = request.GET["price"]
if price:
queryset_list = queryset_list.filter(price__lte=price) # fixme, less than or equal
context = {
"state_choice": _states,
"price_choice": _price,
"bedroom_choice": _bedroom,
"listings": queryset_list,
"values": request.GET
}
return render(request, 'listings/search.jinja2', context)
### paginator - filters
def paginator_order_by(order: str):
# result data's in postgresql
return Listing.objects.order_by(order)
def paginator_order_by_with_filter(order: str, **filters):
# result data's in postgresql with filter
data = Listing.objects.order_by(order).filter(**filters)
return data
|
import argparse
import errno
import time
from io import StringIO
from pavilion import arguments
from pavilion import commands
from pavilion import plugins
from pavilion.unittest import PavTestCase
class CancelCmdTests(PavTestCase):
def setUp(self):
plugins.initialize_plugins(self.pav_cfg)
def tearDown(self):
plugins._reset_plugins()
def test_clean(self):
"""Test clean command with no arguments."""
arg_parser = arguments.get_parser()
args = arg_parser.parse_args([
'run',
'-H', 'this',
'clean_test'
])
run_cmd = commands.get_command(args.command_name)
run_cmd.silence()
run_cmd.run(self.pav_cfg, args)
args = arg_parser.parse_args([
'clean'
])
clean_cmd = commands.get_command(args.command_name)
clean_cmd.silence()
self.assertEqual(clean_cmd.run(self.pav_cfg, args), 0)
def test_clean_wait(self):
"""Test clean command after waiting for tests to finish."""
arg_parser = arguments.get_parser()
args = arg_parser.parse_args([
'run',
'-H', 'this',
'clean_test'
])
run_cmd = commands.get_command(args.command_name)
run_cmd.silence()
run_cmd.run(self.pav_cfg, args)
time.sleep(1)
args = arg_parser.parse_args([
'clean'
])
clean_cmd = commands.get_command(args.command_name)
clean_cmd.silence()
self.assertEqual(clean_cmd.run(self.pav_cfg, args), 0)
def test_clean_with_older_than_flag(self):
"""Test clean command with multiple date formats."""
arg_parser = arguments.get_parser()
args = arg_parser.parse_args([
'run',
'-H', 'this',
'clean_test'
])
run_cmd = commands.get_command(args.command_name)
run_cmd.silence()
run_cmd.run(self.pav_cfg, args)
args = arg_parser.parse_args([
'clean',
'--older-than', '5 weeks'
])
clean_cmd = commands.get_command(args.command_name)
clean_cmd.silence()
self.assertEqual(clean_cmd.run(self.pav_cfg, args), 0)
args = arg_parser.parse_args([
'run',
'-H', 'this',
'clean_test'
])
run_cmd = commands.get_command(args.command_name)
run_cmd.silence()
run_cmd.run(self.pav_cfg, args)
|
__author__ = "MetaCarta"
__copyright__ = "Copyright (c) 2006-2008 MetaCarta"
__license__ = "Clear BSD"
__version__ = "$Id: SQLite.py 606 2009-04-24 16:25:41Z brentp $"
import re
import copy
from FeatureServer.DataSource import DataSource
from vectorformats.Feature import Feature
from vectorformats.Formats import WKT
import sys
try:
import sqlite3
except:
from pysqlite2 import dbapi2 as sqlite3
class SQLite (DataSource):
"""Similar to the PostGIS datasource. Works with the
built in sqlite in Python2.5+, or with pysqlite2."""
wkt_linestring_match = re.compile(r'\(([^()]+)\)')
query_action_types = ['lt', 'gt', 'like', 'gte', 'lte']
query_action_sql = {'lt': '<', 'gt': '>' , 'like':'like'
, 'gte': '>=', 'lte': '<='}
def __init__(self, name, srid = 4326, srid_out = 4326, order=None, writable = True, **args):
DataSource.__init__(self, name, **args)
self.table = args.get("layer") or name
self.fid_col = 'feature_id'
self.geom_col = 'wkt_geometry'
self.order = order
self.srid = srid # not used now...
self.srid_out = srid_out # not used now...
self.db = None
self.dsn = args.get("dsn") or args.get("file")
self.writable = writable
def begin (self):
self.db = sqlite3.connect(self.dsn)
# allow both dictionary and integer index lookups.
self.db.row_factory = sqlite3.Row
# create the table if it doesnt exist.
if not self.table in self.tables():
c = self.db.cursor()
c.executescript(self.schema())
self.db.commit()
def tables(self):
c = self.db.cursor()
res = c.execute("SELECT name FROM sqlite_master WHERE type='table'").fetchall()
return [r[0] for r in res]
def schema(self):
return """\
CREATE TABLE '%s' (
feature_id INTEGER PRIMARY KEY,
xmin INTEGER,
ymin INTEGER,
xmax INTEGER,
ymax INTEGER,
date_created DATETIME,
date_modified DATETIME,
%s VARCHAR
);
CREATE TABLE '%s_attrs' (
id INTEGER PRIMARY KEY,
feature_id INTEGER,
key VARCHAR(256),
value TEXT
);
CREATE INDEX %s_xy_idx ON %s (xmin, xmax, ymin, ymax);
CREATE INDEX %s_attrs_feature_id on %s_attrs (feature_id);
CREATE INDEX %s_attrs_%s_key on %s_attrs (key);
/* automatic timestamp, but dont override if one is sent in */
CREATE TRIGGER %s_insert_date_trigger
AFTER INSERT ON %s
BEGIN
UPDATE %s SET date_created = datetime('now', 'localtime')
WHERE feature_id = NEW.feature_id AND
NEW.date_created IS NULL;
UPDATE %s SET date_modified = datetime('now', 'localtime')
WHERE feature_id = NEW.feature_id;
END;
CREATE TRIGGER %s_update_date_trigger
/* update the main table when attrs are modified */
AFTER UPDATE ON %s_attrs
BEGIN
UPDATE %s SET date_modified = datetime('now', 'localtime')
WHERE feature_id = NEW.feature_id;
END;
""" % tuple([self.table, self.geom_col] + list((self.table,) * 15))
def commit (self):
if self.writable:
self.db.commit()
self.db.close()
def rollback (self):
if self.writable:
self.db.rollback()
self.db.close()
def column_names (self, feature):
return feature.properties.keys()
def value_formats (self, feature):
#values = ["%%(%s)s" % self.geom_col]
values = []
for key, val in feature.properties.items():
values.append(":%s" % key)
return values
def feature_predicates (self, feature):
columns = self.column_names(feature)
values = self.value_formats(feature)
predicates = []
for pair in zip(columns, values):
if pair[0] != self.geom_col:
predicates.append(" %s = %s" % pair)
else:
predicates.append(" %s = %s " % (self.geom_col, WKT.to_wkt(feature.geometry)))
return predicates
def feature_values (self, feature):
return copy.deepcopy(feature.properties)
def insert (self, action):
feature = action.feature
bbox = feature.get_bbox()
columns = ", ".join([self.geom_col,'xmin,ymin,xmax,ymax'])
values = [WKT.to_wkt(feature.geometry)] + list(bbox)
sql = "INSERT INTO \"%s\" (%s) VALUES (?,?,?,?,?)" % ( self.table, columns)
cursor = self.db.cursor()
res = cursor.execute(str(sql), values)
action.id = res.lastrowid
#self.db.commit()
insert_tuples = [(res.lastrowid, k, v) for k,v in feature.properties.items()]
sql = "INSERT INTO \"%s_attrs\" (feature_id, key, value) VALUES (?, ?, ?)" % (self.table,)
cursor.executemany(sql,insert_tuples)
#self.db.commit()
return self.select(action)
def update (self, action):
feature = action.feature
bbox = feature.get_bbox()
predicates = self.feature_predicates(feature)
# this assumes updates can not introduce new attrs.... fix?
sql = "UPDATE \"%s_attrs\" SET value = :value WHERE key = :key AND %s = %d" % (
self.table, self.fid_col, action.id )
cursor = self.db.cursor()
predicate_list = []
for i in range(0, len(predicates) - 1, 2):
predicate_list.append( dict(key=predicates[i], value=predicates[i+1]) )
cursor.executemany(str(sql), predicate_list)
# should check if changed before doing this ...
geom_sql = "UPDATE %s SET %s = ?, xmin = ?, ymin = ?, xmax = ?, ymax = ? WHERE %s = %d" \
% (self.table, self.geom_col, self.fid_col, action.id)
cursor.execute(geom_sql, [WKT.to_wkt(feature.geometry)] + list(bbox))
#self.db.commit()
return self.select(action)
def delete (self, action):
sql = "DELETE FROM \"%s\" WHERE %s = :%s" % (
self.table, self.fid_col, self.fid_col )
cursor = self.db.cursor()
cursor.execute(str(sql), {self.fid_col: action.id})
sql = "DELETE FROM \"%s_attrs\" WHERE %s = :%s" % (
self.table, self.fid_col, self.fid_col )
cursor.execute(str(sql), {self.fid_col: action.id})
#self.db.commit()
return []
def select (self, action):
cursor = self.db.cursor()
features = []
sql_attrs = "SELECT key, value FROM \"%s_attrs\" WHERE feature_id = :feature_id" % (self.table,)
selection_dict = {}
if action.id is not None:
sql = "SELECT * FROM \"%s\" WHERE %s = ?" % ( self.table, self.fid_col)
cursor.execute(str(sql), (action.id,))
results = [ cursor.fetchone() ]
else:
match = Feature(props = action.attributes)
filters = match.properties.items()
sql = "SELECT DISTINCT(t.feature_id) as feature_id, t.%s as %s,\
t.%s as %s FROM \"%s\" t LEFT JOIN \"%s_attrs\" a ON a.feature_id =\
t.feature_id " % ( self.geom_col, self.geom_col, self.fid_col, self.fid_col, self.table, self.table )
select_dict = {}
if filters:
sql += "WHERE 1 "
for ii, (key, value) in enumerate(filters):
if isinstance(value, dict):
select_dict['key%i' % ii] = value['column']
select_dict['value%i' % ii] = value['value']
sql += (" AND a.key = :key%i AND a.value " + self.query_action_sql[value['type']] + " :value%i") % (ii, ii)
else:
select_dict['key%i' % ii] = key
select_dict['value%i' % ii] = value
sql += " AND a.key = :key%i AND a.value = :value%i" % (ii, ii)
bbox = ''
if action.bbox:
# skip sql interpolation as these are from calculation.
bbox = " AND %f > t.xmin \
AND t.xmax > %f \
AND %f > t.ymin \
AND t.ymax > %f "\
% (action.bbox[2], action.bbox[0], action.bbox[3], action.bbox[1])
sql += bbox
sql += self.order or ''
sql += " LIMIT %d" % (action.maxfeatures or 1000, )
if action.startfeature:
sql += " OFFSET %d" % action.startfeature
cursor.execute(str(sql), select_dict)
results = cursor.fetchall()
for row in results:
attrs = cursor.execute(sql_attrs, dict(feature_id=row['feature_id']) ).fetchall()
d = {}
#if attrs == []: continue
for attr in attrs:
d[attr[0]] = attr[1]
geom = WKT.from_wkt(row[self.geom_col])
id = row[self.fid_col]
if (geom):
features.append( Feature( id, geom, self.geom_col, self.srid_out, d ) )
return features
|
import os
import shutil
from pathlib import Path
from timeit import default_timer as timer
import h5py
import librosa
import numpy as np
import pandas as pd
import torch
from methods.data import BaseDataset, collate_fn
from torch.utils.data import DataLoader
from tqdm import tqdm
from utils.common import float_samples_to_int16, find_key_from_value
from utils.config import get_afextractor
class Preprocessor_task2:
"""Preprocess the audio data.
1. Extract wav file and store to hdf5 file
2. Extract meta file and store to hdf5 file
"""
def __init__(self, args, cfg, dataset):
"""
Args:
args: parsed args
cfg: configurations
dataset: dataset class
"""
self.args = args
self.cfg = cfg
self.dataset = dataset
self.cfg_logmelIV = cfg['data']['logmelIV']
# Path for dataset
self.hdf5_dir = Path(cfg['hdf5_dir']).joinpath(cfg['dataset']).joinpath('task2')
# Path for extraction of wav
self.data_dir_list = [
dataset.dataset_dir['task2']['dev'].joinpath('data'),
dataset.dataset_dir['task2']['train'].joinpath('data'),
dataset.dataset_dir['task2']['test'].joinpath('data')
]
data_h5_dir = self.hdf5_dir.joinpath('data').joinpath('{}fs'.format(self.cfg_logmelIV['sample_rate']))
self.data_h5_dir_list = [
data_h5_dir.joinpath('dev'),
data_h5_dir.joinpath('train'),
data_h5_dir.joinpath('test')
]
# Path for extraction of scalar
self.scalar_h5_dir = self.hdf5_dir.joinpath('scalar')
fn_scalar = '{}_sr{}_nfft{}_hop{}_mel{}.h5'.format(cfg['data']['audio_feature'],
self.cfg_logmelIV['sample_rate'], self.cfg_logmelIV['n_fft'], self.cfg_logmelIV['hop_length'], self.cfg_logmelIV['n_mels'])
self.scalar_path = self.scalar_h5_dir.joinpath(fn_scalar)
# Path for extraction of meta
self.label_dir_list = [
dataset.dataset_dir['task2']['dev'].joinpath('labels'),
dataset.dataset_dir['task2']['train'].joinpath('labels'),
dataset.dataset_dir['task2']['test'].joinpath('labels'),
]
# Path for extraction of frame label
self.meta_frame_csv_dir_list = [
self.hdf5_dir.joinpath('meta').joinpath('frame').joinpath('dev'),
self.hdf5_dir.joinpath('meta').joinpath('frame').joinpath('train'),
self.hdf5_dir.joinpath('meta').joinpath('frame').joinpath('test')
]
# Path for extraction of track label
self.meta_track_h5_dir_list = [
self.hdf5_dir.joinpath('meta').joinpath('track').joinpath('dev'),
self.hdf5_dir.joinpath('meta').joinpath('track').joinpath('train'),
]
if args.dataset_type == 'train':
self.data_dir_list = self.data_dir_list[:2]
self.data_h5_dir_list = self.data_h5_dir_list[:2]
self.label_dir_list = self.label_dir_list[:2]
self.meta_frame_csv_dir_list = self.meta_frame_csv_dir_list[:2]
elif args.dataset_type == 'test':
self.data_dir_list = self.data_dir_list[2:]
self.data_h5_dir_list = self.data_h5_dir_list[2:]
self.label_dir_list = self.label_dir_list[2:]
self.meta_frame_csv_dir_list = self.meta_frame_csv_dir_list[2:]
def extract_data(self):
""" Extract wave and store to hdf5 file
"""
print('Converting wav file to hdf5 file starts......\n')
for h5_dir in self.data_h5_dir_list:
if h5_dir.is_dir():
flag = input("HDF5 folder {} is already existed, delete it? (y/n)".format(h5_dir)).lower()
if flag == 'y':
shutil.rmtree(h5_dir)
elif flag == 'n':
print("User select not to remove the HDF5 folder {}. The process will quit.\n".format(h5_dir))
return
h5_dir.mkdir(parents=True)
for idx, data_dir in enumerate(self.data_dir_list):
h5_dir = self.data_h5_dir_list[idx]
data_path = os.listdir(data_dir)
data_path_A = [i for i in data_path if i.split('.')[0].split('_')[-1]=='A']
audio_count = 0
for wav_file_A in data_path_A:
wav_file_B = wav_file_A[:-5] + 'B' + wav_file_A[-4:] #change A with B
wav_path_A = data_dir.joinpath(wav_file_A)
wav_path_B = data_dir.joinpath(wav_file_B)
data_A, _ = librosa.load(wav_path_A, sr=self.cfg_logmelIV['sample_rate'], mono=False)
data_B, _ = librosa.load(wav_path_B, sr=self.cfg_logmelIV['sample_rate'], mono=False)
# stack two ambisonics data
data = np.concatenate((data_A, data_B), axis=0)
# save to h5py
h5_file = wav_file_A.replace('_A','').replace('.wav','.h5')
h5_path = h5_dir.joinpath(h5_file)
with h5py.File(h5_path, 'w') as hf:
hf.create_dataset(name='waveform', data=float_samples_to_int16(data), dtype=np.int16)
audio_count += 1
print('{}, {}, {}'.format(audio_count, h5_path, data.shape))
def extract_frame_label(self):
""" Extract frame label for evaluating. Store to csv file.
"""
num_frames = int(self.dataset.clip_length / self.dataset.label_resolution)
print('Converting meta file to frame label file starts......\n')
for meta_frame_dir in self.meta_frame_csv_dir_list:
if meta_frame_dir.is_dir():
flag = input("frame label folder {} is already existed, delete it? (y/n)".format(meta_frame_dir)).lower()
if flag == 'y':
shutil.rmtree(meta_frame_dir)
elif flag == 'n':
print("User select not to remove the frame label folder {}. The process will quit.\n".format(meta_frame_dir))
return
#quantize time stamp to step resolution
quantize = lambda x: round(float(x) / self.dataset.label_resolution)
for idx, label_dir in enumerate(self.label_dir_list): # label dir
label_list = os.listdir(label_dir)
self.meta_frame_csv_dir_list[idx].mkdir(parents=True, exist_ok=True)
iterator = tqdm(enumerate(label_list), total=len(label_list), unit='it')
for idy, path in iterator: # label path
frame_label = {}
for i in range(num_frames):
frame_label[i] = []
path = label_dir.joinpath(path)
df = pd.read_csv(path)
meta_path = self.meta_frame_csv_dir_list[idx].joinpath(path.stem + '.csv')
for idz, row in df.iterrows():
#compute start and end frame position (quantizing)
start = quantize(row['Start'])
end = quantize(row['End'])
start_frame = int(start)
end_frame = int(end)
class_id = self.dataset.label_dic_task2[row['Class']] #int ID of sound class name
sound_frames = np.arange(start_frame, end_frame)
for f in sound_frames:
local_frame_label = [class_id, row['X'], row['Y'],row['Z'], idz]
frame_label[f].append(local_frame_label)
for frame in range(num_frames):
if frame_label[frame]:
for event in frame_label[frame]:
event[0] = find_key_from_value(self.dataset.label_dic_task2, event[0])[0]
with meta_path.open('a') as f:
f.write('{},{},{},{},{},{}\n'.format(frame, event[0], event[1], event[2], event[3], event[4]))
tqdm.write('{}, {}'.format(idy, meta_path))
def extract_track_label(self):
""" Extract track label for permutation invariant training. Store to h5 file
"""
num_tracks = self.dataset.max_ov
num_frames = int(self.dataset.clip_length / self.dataset.label_resolution)
num_classes = self.dataset.num_classes
#quantize time stamp to step resolution
quantize = lambda x: round(float(x) / self.dataset.label_resolution)
for idx, label_dir in enumerate(self.label_dir_list):
label_list = os.listdir(label_dir)
self.meta_track_h5_dir_list[idx].mkdir(parents=True, exist_ok=True)
iterator = tqdm(enumerate(label_list), total=len(label_list), unit='it')
for idy, path in iterator:
sed_label = np.zeros((num_frames, num_tracks, num_classes))
doa_label = np.zeros((num_frames, num_tracks, 3))
path = label_dir.joinpath(path)
df = pd.read_csv(path)
for idz, row in df.iterrows():
#compute start and end frame position (quantizing)
start = quantize(row['Start'])
end = quantize(row['End'])
start_frame = int(start)
end_frame = int(end)
class_id = self.dataset.label_dic_task2[row['Class']] #int ID of sound class name
for track_idx in range(num_tracks):
if sed_label[start_frame][track_idx].sum() == 0:
sed_label[start_frame:end_frame, track_idx, class_id] = 1
doa_label[start_frame:end_frame, track_idx, 0] = row['X']
doa_label[start_frame:end_frame, track_idx, 1] = row['Y']
doa_label[start_frame:end_frame, track_idx, 2] = row['Z']
break
else:
track_idx += 1
meta_path = self.meta_track_h5_dir_list[idx].joinpath(path.stem + '.h5')
with h5py.File(meta_path, 'w') as hf:
hf.create_dataset(name='sed_label', data=sed_label, dtype=np.float32)
hf.create_dataset(name='doa_label', data=doa_label, dtype=np.float32)
tqdm.write('{}, {}'.format(idy, meta_path))
def extract_scalar(self):
""" Extract scalar and store to hdf5 file
"""
print('Extracting scalar......\n')
self.scalar_h5_dir.mkdir(parents=True, exist_ok=True)
cuda_enabled = not self.args.no_cuda and torch.cuda.is_available()
train_set = BaseDataset(self.args, self.cfg, self.dataset)
data_generator = DataLoader(
dataset=train_set,
batch_size=16,
shuffle=False,
num_workers=self.args.num_workers,
collate_fn=collate_fn,
pin_memory=True
)
af_extractor = get_afextractor(self.cfg, cuda_enabled).eval()
iterator = tqdm(enumerate(data_generator), total=len(data_generator), unit='it')
features_A = []
features_B = []
begin_time = timer()
for it, batch_sample in iterator:
if it == len(data_generator):
break
batch_x_A = batch_sample['waveform'][:,:4]
batch_x_B = batch_sample['waveform'][:,4:]
batch_x_A.require_grad = False
batch_x_B.require_grad = False
if cuda_enabled:
batch_x_A = batch_x_A.cuda(non_blocking=True)
batch_x_B = batch_x_B.cuda(non_blocking=True)
batch_y_A = af_extractor(batch_x_A).transpose(0, 1) # (C,N,T,F)
batch_y_B = af_extractor(batch_x_B).transpose(0, 1) # (C,N,T,F)
C, _, _, F = batch_y_A.shape
features_A.append(batch_y_A.reshape(C, -1, F).cpu().numpy()) # (C, N*T, F)
features_B.append(batch_y_B.reshape(C, -1, F).cpu().numpy()) # (C, N*T, F)
iterator.close()
features_A = np.concatenate(features_A, axis=1)
features_B = np.concatenate(features_B, axis=1)
mean_A = []
mean_B = []
std_A = []
std_B = []
for ch in range(C):
mean_A.append(np.mean(features_A[ch], axis=0, keepdims=True))
std_A.append(np.std(features_A[ch], axis=0, keepdims=True))
mean_B.append(np.mean(features_B[ch], axis=0, keepdims=True))
std_B.append(np.std(features_B[ch], axis=0, keepdims=True))
mean_A = np.stack(mean_A)[None, ...]
std_A = np.stack(std_A)[None, ...]
mean_B = np.stack(mean_B)[None, ...]
std_B = np.stack(std_B)[None, ...]
mean = np.concatenate((mean_A, mean_B), axis=1)
std = np.concatenate((std_A, std_B), axis=1)
# save to h5py
with h5py.File(self.scalar_path, 'w') as hf:
hf.create_dataset(name='mean', data=mean, dtype=np.float32)
hf.create_dataset(name='std', data=std, dtype=np.float32)
print("\nScalar saved to {}\n".format(str(self.scalar_path)))
print("Extacting scalar finished! Time spent: {:.3f} s\n".format(timer() - begin_time))
|
from __future__ import absolute_import
# development system imports
import datetime
import os
import random
import uuid
from datetime import date, timedelta
from decimal import Decimal
from hickup.users.managers import UsersManager
# from django.db.models.fields.related import ManyToManyField
# Third partie imports
from countries_plus.models import Country
from django_resized import ResizedImageField
from dateutil import relativedelta
# django imports
from django.contrib.auth.models import AbstractUser
from django.core.validators import MaxValueValidator, MinValueValidator, RegexValidator
from django.db.models import (
CASCADE,
SET_NULL,
BooleanField,
CharField,
DateField,
DateTimeField,
DecimalField,
EmailField,
FileField,
ForeignKey,
GenericIPAddressField,
ImageField,
OneToOneField,
SlugField,
TextChoices,
ManyToManyField,
TextField,
URLField,
UUIDField,
)
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from model_utils.models import TimeStampedModel
from tinymce.models import HTMLField
class User(AbstractUser):
"""Default user for hickup."""
MODERATOR = 'MODERATOR'
QUESTIONER = 'QUESTIONER'
HELPER = 'HELPER'
ROLE_CHOICES = (
('', 'Role'),
(MODERATOR, 'Moderator'),
(QUESTIONER, 'Questioner'),
(HELPER, 'Helper'),
)
#: First and last name do not cover name patterns around the globe
role = CharField(_("User Role"), choices=ROLE_CHOICES, default=QUESTIONER, blank=True, null=True, max_length=255)
objects = UsersManager()
def fullname(self):
if self.first_name and self.last_name:
return f"{self.first_name} {self.last_name}"
else:
return f"{self.username}"
return fullname
def get_absolute_url(self):
"""Get url for user's detail view.
Returns:
str: URL for user detail.
"""
return reverse("users:detail", kwargs={"username": self.username})
# Image upload folders
def get_filename_ext(filepath):
base_name = os.path.basename(filepath)
name, ext = os.path.splitext(base_name)
return name, ext
def profile_image(instance, filename):
new_filename = random.randint(1, 3910209312)
name, ext = get_filename_ext(filename)
final_filename = "{new_filename}{ext}".format(new_filename=new_filename, ext=ext)
return "user-profile-photo/{new_filename}/{final_filename}".format(
new_filename=new_filename, final_filename=final_filename
)
class Profile(TimeStampedModel):
SEX = (
("", "Gender"),
("Male", "MALE"),
("Female", "FEMALE"),
)
MARITAL = (
("", "Marital"),
("Single", "Single"),
("Married", "Married"),
("Divorced", "Divorced"),
("Seperated", "Seperated"),
)
STATES = (
("", "States"),
("Abia", "Abia"),
("Adamawa", "Adamawa"),
("Akwa Ibom", "Akwa Ibom"),
("Anambra", "Anambra"),
("Bauchi", "Bauchi"),
("Bayelsa", "Bayelsa"),
("Benue", "Benue"),
("Borno", "Borno"),
("Cross River", "Cross River"),
("Delta", "Delta"),
("Ebonyi", "Ebonyi"),
("Enugu", "Enugu"),
("Edo", "Edo"),
("Ekiti", "Ekiti"),
("Gombe", "Gombe"),
("Imo", "Imo"),
("Jigawa", "Jigawa"),
("Kaduna", "Kaduna"),
("Kano", "Kano"),
("Katsina", "Katsina"),
("Kebbi", "Kebbi"),
("Kogi", "Kogi"),
("Kwara", "Kwara"),
("Lagos", "Lagos"),
("Nasarawa", "Nasarawa"),
("Niger", "Niger"),
("Ogun", "Ogun"),
("Ondo", "Ondo"),
("Osun", "Osun"),
("Oyo", "Oyo"),
("Plateau", "Plateau"),
("Rivers", "Rivers"),
("Sokoto", "Sokoto"),
("Taraba", "Taraba"),
("Yobe", "Yobe"),
("Zamfara", "Zamfara"),
)
# REGEX Expressions for validation
SSN_REGEX = "^(?!666|000|9\\d{2})\\d{3}-(?!00)\\d{2}-(?!0{4}\\d{4}$)"
NUM_REGEX = "^[0-9]*$"
ABC_REGEX = "^[A-Za-z]*$"
user = OneToOneField(User, on_delete=CASCADE, related_name='profile')
# symmetrical meaning, if i follow you, you cant automatically follow me back
follows = ManyToManyField('self', related_name='followed_by', symmetrical=False)
image = ResizedImageField(size=[500, 300], quality=75, crop=['middle', 'center'], upload_to=profile_image, force_format='JPEG')
gender = CharField(_("Gender"), max_length=7, blank=True, null=True, choices=SEX)
dob = DateField(_("Date of Birth"), blank=True, null=True)
marital = CharField(
_("Marital Status"), max_length=10, blank=True, null=True, choices=MARITAL
)
phone_no = CharField(_("Phone Number"), blank=True, null=True, max_length=13)
@property
def age(self):
TODAY = datetime.date.today()
if self.dob:
return "%s" % relativedelta.relativedelta(TODAY, self.dob).years
else:
return None
def __str__(self):
return self.user.fullname
class Meta:
managed = True
verbose_name = "Profile"
verbose_name_plural = "Profiles"
ordering = ["-created", "-modified"]
# this creates the user profile with a user and makes referencing the user.profile easier
User.profile = property(lambda u:Profile.objects.get_or_create(user=u)[0])
|
#!/usr/bin/env python
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import sys
import re
import json
import argparse
import traceback
import socket
# python 2 and 3 version compatibility
if sys.version_info.major == 2:
import urllib2
else:
import urllib.request as urllib2
# parse args
parser = argparse.ArgumentParser()
parser.add_argument('--host_name', default=False, help='host name which must be updated')
parser.add_argument('--config', default=sys.argv[0] + '.conf', help='config file location')
parser.add_argument('--address', help='use this address and skip ip detection from ssh')
parser.add_argument('--debug', default=False, help='enable debug mode')
args = parser.parse_args()
class Ddns:
'''Class for records changes via namesilo.com api'''
def __init__(self, args):
# get config from json file
self.conf = json.load(open(args.config))
# add parsed args to config dictionary
for key in vars(args):
self.conf[key] = vars(args)[key]
self.file_name = self.conf['log_directory'] + '/' + self.conf['host_name']
def _get_last_data(self):
'''Read data from last run if exist'''
if not os.path.isdir(self.conf['log_directory']):
os.mkdir(self.conf['log_directory'], int('0755', base=8))
# read last ip of host if data exist
if not os.path.isfile(self.file_name):
self.last_ip = False
return
with open(self.file_name, 'r') as data_file:
self.last_ip = data_file.readline().strip()
def _get_own_ip(self):
'''Get own IP-address if updating own record'''
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('10.255.255.255', 1))
self.ip = s.getsockname()[0]
s.close()
def _validate_ip(self):
'''Validate IP-address'''
nums = self.ip.split('.')
for n in nums:
num = int(n)
if num < 0 and num > 255:
print('IP: {0} is not valid'.format(self.ip))
sys.exit(1)
def _get_data(self):
'''Get IP-address of remote client'''
if args.address:
if args.address == 'me':
self._get_own_ip()
else:
self.ip = args.address
self._validate_ip()
else:
client = os.environ['SSH_CONNECTION']
self.ip = client.split()[0]
def _write_data(self):
'''Write IP-address of remote client to file for future usage'''
with open(self.file_name, 'w') as data_file:
data_file.write(self.ip)
def _is_host_allowed(self):
'''Check if host allowed to DNS registration'''
if self.conf['host_name'] not in self.conf['allowed_hostnames']:
if self.conf['debug']:
print(self.conf['host_name'] + ' not in allowed hosts')
sys.exit(0)
def _prepare_url(self):
'''Prepare URL for hosting API regarding to data from config'''
update_record_url = self.conf['api_url'] + 'dnsUpdateRecord?version=1&type=xml&key={0}&domain={1}&rrid={2}&rrtype={3}&rrhost={4}&rrvalue={5}&rrttl={6}'
add_record_url = self.conf['api_url'] + 'dnsAddRecord?version=1&type=xml&key={0}&domain={1}&rrtype={3}&rrhost={4}&rrvalue={5}&rrttl={6}'
if self.record_id:
base_url = update_record_url
else:
base_url = add_record_url
self.url = base_url.format(self.conf['user_key'], self.conf['root_domain'], self.record_id, self.conf['entry_type'], self.conf['host_name'], self.ip, self.conf['entry_ttl'])
def _use_api(self):
'''Request sesulting URL and read response'''
self.req = req = urllib2.Request(self.url, None, self.conf['http_headers'])
self.resp = resp = urllib2.urlopen(req)
self.resp_page = resp.read()
def _write_log(self, message):
'''Write message to log file'''
with open(self.file_name + '.log', 'w') as data_file:
data_file.write(message + '\n')
def is_address_changed(self):
'''Check if address changed from last run'''
if self.last_ip != self.ip:
return True
def _get_domain_list(self):
'''Get current domains list from via hosting API'''
domain_pat = re.compile(r'<resource_record><record_id>(?P<id>[a-zA-Z0-9]+)</record_id><type>[A-Z]+</type><host>' +
'.'.join((self.conf['host_name'], self.conf['root_domain'])) +
r'</host><value>[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}</value>' +
r'<ttl>[0-9]+</ttl><distance>[0-9]+</distance></resource_record>')
self.url = self.conf['api_url'] + 'dnsListRecords?version=1&type=xml&key={0}&domain={1}'.format(self.conf['user_key'], self.conf['root_domain'])
self.req = req = urllib2.Request(self.url, None, self.conf['http_headers'])
self.resp = resp = urllib2.urlopen(req)
for line in resp:
line = bytes.decode(line)
parsed_line = domain_pat.search(line)
if parsed_line:
self.record_id = parsed_line.groupdict().get('id')
if not self.record_id:
self._write_log('domain founded but id parsing failed\n')
sys.exit(0)
else:
self.record_id = False
def run(self):
'''Run all things together'''
self._get_last_data()
self._get_data()
if self.is_address_changed():
self._get_domain_list()
self._is_host_allowed()
self._prepare_url()
self._use_api()
self._write_log(bytes.decode(self.resp_page) + '\n')
self._write_data()
else:
self._write_log('client`s ip not changed since last run\n')
sys.exit(0)
if __name__ == '__main__':
# run in debug mode if debug option is set
if args.debug:
C = Ddns(args)
C.run()
else:
try:
C = Ddns(args)
C.run()
except:
pass
|
##########################################################################
#
# Copyright (c) 2010, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import math
import unittest
import random
import imath
import IECore
import IECoreScene
class SmoothSmoothSkinningWeightsOpTest( unittest.TestCase ) :
def mesh( self ) :
vertsPerFace = IECore.IntVectorData( [ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 ] )
vertexIds = IECore.IntVectorData( [
0, 1, 3, 2, 2, 3, 5, 4, 4, 5, 7, 6, 6, 7, 9, 8,
8, 9, 11, 10, 10, 11, 13, 12, 12, 13, 15, 14, 14, 15, 1, 0,
1, 15, 13, 3, 3, 13, 11, 5, 5, 11, 9, 7, 14, 0, 2, 12,
12, 2, 4, 10, 10, 4, 6, 8
] )
return IECoreScene.MeshPrimitive( vertsPerFace, vertexIds )
def createSSD( self, offsets, counts, indices, weights ) :
names = IECore.StringVectorData( [ "|joint1", "|joint1|joint2", "|joint1|joint2|joint3" ] )
poses = IECore.M44fVectorData( [
imath.M44f( 1, -0, 0, -0, -0, 1, -0, 0, 0, -0, 1, -0, -0, 2, -0, 1 ),
imath.M44f( 1, -0, 0, -0, -0, 1, -0, 0, 0, -0, 1, -0, -0, 0, -0, 1 ),
imath.M44f( 1, -0, 0, -0, -0, 1, -0, 0, 0, -0, 1, -0, -0, -2, -0, 1 )
] )
return IECoreScene.SmoothSkinningData( names, poses, offsets, counts, indices, weights )
def original( self ) :
offsets = IECore.IntVectorData( [ 0, 1, 2, 4, 6, 7, 8, 10, 12, 14, 16, 17, 18, 20, 22, 23 ] )
counts = IECore.IntVectorData( [ 1, 1, 2, 2, 1, 1, 2, 2, 2, 2, 1, 1, 2, 2, 1, 1 ] )
indices = IECore.IntVectorData( [ 0, 0, 0, 1, 0, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 2, 1, 1, 0, 1, 0, 1, 0, 0 ] )
weights = IECore.FloatVectorData( [
1, 1, 0.8, 0.2, 0.8, 0.2, 1, 1, 0.5, 0.5, 0.5, 0.5,
0.5, 0.5, 0.5, 0.5, 1, 1, 0.8, 0.2, 0.8, 0.2, 1, 1
] )
return self.createSSD( offsets, counts, indices, weights )
def smooth1_50( self ) :
offsets = IECore.IntVectorData( [ 0, 2, 4, 6, 8, 11, 14, 16, 18, 20, 22, 25, 28, 30, 32, 34 ] )
counts = IECore.IntVectorData( [ 2, 2, 2, 2, 3, 3, 2, 2, 2, 2, 3, 3, 2, 2, 2, 2 ] )
indices = IECore.IntVectorData( [
0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 0, 1, 2, 1, 2, 1, 2,
1, 2, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 0, 1, 0, 1, 0, 1
] )
weights = IECore.FloatVectorData( [
0.966667, 0.0333333, 0.966667, 0.0333333, 0.725, 0.275, 0.725, 0.275,
0.1, 0.8375, 0.0625, 0.1, 0.8375, 0.0625, 0.583333, 0.416667,
0.583333, 0.416667, 0.583333, 0.416667, 0.583333, 0.416667, 0.1, 0.8375,
0.0625, 0.1, 0.8375, 0.0625, 0.725, 0.275, 0.725, 0.275,
0.966667, 0.0333333, 0.966667, 0.0333333
] )
return self.createSSD( offsets, counts, indices, weights )
def smooth1_100( self ) :
offsets = IECore.IntVectorData( [ 0, 2, 4, 6, 8, 11, 14, 16, 18, 20, 22, 25, 28, 30, 32, 34 ] )
counts = IECore.IntVectorData( [ 2, 2, 2, 2, 3, 3, 2, 2, 2, 2, 3, 3, 2, 2, 2, 2 ] )
indices = IECore.IntVectorData( [
0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 0, 1, 2, 1, 2, 1, 2,
1, 2, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 0, 1, 0, 1, 0, 1
] )
weights = IECore.FloatVectorData( [
0.933333, 0.0666667, 0.933333, 0.0666667, 0.65, 0.35, 0.65, 0.35,
0.2, 0.675, 0.125, 0.2, 0.675, 0.125, 0.666667, 0.333333,
0.666667, 0.333333, 0.666667, 0.333333, 0.666667, 0.333333, 0.2, 0.675,
0.125, 0.2, 0.675, 0.125, 0.65, 0.35, 0.65, 0.35,
0.933333, 0.0666667, 0.933333, 0.0666667
] )
return self.createSSD( offsets, counts, indices, weights )
def smooth3_30( self ) :
offsets = IECore.IntVectorData( [ 0, 3, 6, 9, 12, 15, 18, 21, 24, 27, 30, 33, 36, 39, 42, 45 ] )
counts = IECore.IntVectorData( [ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 ] )
indices = IECore.IntVectorData( [
0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2,
0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2
] )
weights = IECore.FloatVectorData( [
0.933725, 0.0659938, 0.00028125, 0.933725, 0.0659938, 0.00028125, 0.691672, 0.301016,
0.0073125, 0.691672, 0.301016, 0.0073125, 0.145912, 0.767439, 0.0866484, 0.145912,
0.767439, 0.0866484, 0.0161625, 0.6094, 0.374438, 0.0161625, 0.6094, 0.374438,
0.0161625, 0.6094, 0.374438, 0.0161625, 0.6094, 0.374438, 0.145912, 0.767439,
0.0866484, 0.145912, 0.767439, 0.0866484, 0.691672, 0.301016, 0.0073125, 0.691672,
0.301016, 0.0073125, 0.933725, 0.0659938, 0.00028125, 0.933725, 0.0659938, 0.00028125
] )
return self.createSSD( offsets, counts, indices, weights )
def smoothSelectVerts( self ) :
offsets = IECore.IntVectorData( [ 0, 1, 2, 4, 6, 9, 10, 12, 14, 16, 18, 21, 24, 26, 28, 29 ] )
counts = IECore.IntVectorData( [ 1, 1, 2, 2, 3, 1, 2, 2, 2, 2, 3, 3, 2, 2, 1, 1 ] )
indices = IECore.IntVectorData( [
0, 0, 0, 1, 0, 1, 0, 1, 2, 1, 1, 2, 1, 2, 1, 2,
1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 0, 1, 0, 0
] )
weights = IECore.FloatVectorData( [
1, 1, 0.725, 0.275, 0.725, 0.275, 0.1, 0.8375, 0.0625,
1, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.1,
0.8375, 0.0625, 0.1, 0.8375, 0.0625, 0.725, 0.275, 0.8, 0.2, 1, 1
] )
return self.createSSD( offsets, counts, indices, weights )
def smoothWithLocks( self ) :
offsets = IECore.IntVectorData( [ 0, 1, 2, 5, 8, 10, 12, 14, 16, 18, 20, 22, 24, 27, 30, 31 ] )
counts = IECore.IntVectorData( [ 1, 1, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 1, 1 ] )
indices = IECore.IntVectorData( [
0, 0, 0, 1, 2, 0, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2,
1, 2, 1, 2, 1, 2, 1, 2, 0, 1, 2, 0, 1, 2, 0, 0
] )
weights = IECore.FloatVectorData( [
1, 1, 0.8, 0.193898, 0.00610161, 0.8, 0.193898, 0.00610161,
0.902086, 0.0979137, 0.902086, 0.0979137, 0.624712, 0.375288, 0.624712, 0.375288,
0.624712, 0.375288, 0.624712, 0.375288, 0.902086, 0.0979137, 0.902086, 0.0979137,
0.8, 0.193898, 0.00610161, 0.8, 0.193898, 0.00610161, 1, 1
] )
return self.createSSD( offsets, counts, indices, weights )
def testTypes( self ) :
""" Test SmoothSmoothSkinningWeightsOp types"""
ssd = self.original()
op = IECoreScene.SmoothSmoothSkinningWeightsOp()
self.assertEqual( type(op), IECoreScene.SmoothSmoothSkinningWeightsOp )
self.assertEqual( op.typeId(), IECoreScene.TypeId.SmoothSmoothSkinningWeightsOp )
op.parameters()['input'].setValue( IECore.IntData(1) )
self.assertRaises( RuntimeError, op.operate )
def testSmooth1_0( self ) :
""" Test SmoothSmoothSkinningWeightsOp with 1 iteration and 0.0 smooth-ratio"""
ssd = self.original()
op = IECoreScene.SmoothSmoothSkinningWeightsOp()
op.parameters()['input'].setValue( ssd )
op.parameters()['mesh'].setValue( self.mesh() )
op.parameters()['smoothingRatio'].setValue( 0.0 )
op.parameters()['iterations'].setValue( 1 )
op.parameters()['applyLocks'].setValue( False )
result = op.operate()
self.assertEqual( result.influenceNames(), ssd.influenceNames() )
self.assertEqual( result.influencePose(), ssd.influencePose() )
self.assertEqual( result.pointIndexOffsets(), ssd.pointIndexOffsets() )
self.assertEqual( result.pointInfluenceCounts(), ssd.pointInfluenceCounts() )
self.assertEqual( result.pointInfluenceIndices(), ssd.pointInfluenceIndices() )
self.assertEqual( result.pointInfluenceWeights(), ssd.pointInfluenceWeights() )
self.assertEqual( result, ssd )
def testSmooth1_100( self ) :
""" Test SmoothSmoothSkinningWeightsOp with 1 iteration and 1.0 smooth-ratio"""
ssd = self.original()
op = IECoreScene.SmoothSmoothSkinningWeightsOp()
op.parameters()['input'].setValue( ssd )
op.parameters()['mesh'].setValue( self.mesh() )
op.parameters()['smoothingRatio'].setValue( 1.0 )
op.parameters()['iterations'].setValue( 1 )
op.parameters()['applyLocks'].setValue( False )
result = op.operate()
self.assertEqual( result.influenceNames(), ssd.influenceNames() )
self.assertEqual( result.influencePose(), ssd.influencePose() )
self.assertNotEqual( result.pointIndexOffsets(), ssd.pointIndexOffsets() )
self.assertNotEqual( result.pointInfluenceCounts(), ssd.pointInfluenceCounts() )
self.assertNotEqual( result.pointInfluenceIndices(), ssd.pointInfluenceIndices() )
self.assertNotEqual( result.pointInfluenceWeights(), ssd.pointInfluenceWeights() )
self.assertNotEqual( result, ssd )
smooth = self.smooth1_100()
self.assertEqual( result.influenceNames(), smooth.influenceNames() )
self.assertEqual( result.influencePose(), smooth.influencePose() )
self.assertEqual( result.pointIndexOffsets(), smooth.pointIndexOffsets() )
self.assertEqual( result.pointInfluenceCounts(), smooth.pointInfluenceCounts() )
self.assertEqual( result.pointInfluenceIndices(), smooth.pointInfluenceIndices() )
resultWeights = result.pointInfluenceWeights()
smoothWeights = smooth.pointInfluenceWeights()
for i in range( 0, resultWeights.size() ) :
self.assertAlmostEqual( resultWeights[i], smoothWeights[i], 6 )
def testSmooth1_50( self ) :
""" Test SmoothSmoothSkinningWeightsOp with 1 iteration and 0.5 smooth-ratio"""
ssd = self.original()
op = IECoreScene.SmoothSmoothSkinningWeightsOp()
op.parameters()['input'].setValue( ssd )
op.parameters()['mesh'].setValue( self.mesh() )
op.parameters()['smoothingRatio'].setValue( 0.5 )
op.parameters()['iterations'].setValue( 1 )
op.parameters()['applyLocks'].setValue( False )
result = op.operate()
self.assertEqual( result.influenceNames(), ssd.influenceNames() )
self.assertEqual( result.influencePose(), ssd.influencePose() )
self.assertNotEqual( result.pointIndexOffsets(), ssd.pointIndexOffsets() )
self.assertNotEqual( result.pointInfluenceCounts(), ssd.pointInfluenceCounts() )
self.assertNotEqual( result.pointInfluenceIndices(), ssd.pointInfluenceIndices() )
self.assertNotEqual( result.pointInfluenceWeights(), ssd.pointInfluenceWeights() )
self.assertNotEqual( result, ssd )
smooth = self.smooth1_50()
self.assertEqual( result.influenceNames(), smooth.influenceNames() )
self.assertEqual( result.influencePose(), smooth.influencePose() )
self.assertEqual( result.pointIndexOffsets(), smooth.pointIndexOffsets() )
self.assertEqual( result.pointInfluenceCounts(), smooth.pointInfluenceCounts() )
self.assertEqual( result.pointInfluenceIndices(), smooth.pointInfluenceIndices() )
resultWeights = result.pointInfluenceWeights()
smoothWeights = smooth.pointInfluenceWeights()
for i in range( 0, resultWeights.size() ) :
self.assertAlmostEqual( resultWeights[i], smoothWeights[i], 6 )
def testSmooth3_30( self ) :
""" Test SmoothSmoothSkinningWeightsOp with 3 iterations and 0.3 smooth-ratio"""
ssd = self.original()
op = IECoreScene.SmoothSmoothSkinningWeightsOp()
op.parameters()['input'].setValue( ssd )
op.parameters()['mesh'].setValue( self.mesh() )
op.parameters()['smoothingRatio'].setValue( 0.3 )
op.parameters()['iterations'].setValue( 3 )
op.parameters()['applyLocks'].setValue( False )
result = op.operate()
self.assertEqual( result.influenceNames(), ssd.influenceNames() )
self.assertEqual( result.influencePose(), ssd.influencePose() )
self.assertNotEqual( result.pointIndexOffsets(), ssd.pointIndexOffsets() )
self.assertNotEqual( result.pointInfluenceCounts(), ssd.pointInfluenceCounts() )
self.assertNotEqual( result.pointInfluenceIndices(), ssd.pointInfluenceIndices() )
self.assertNotEqual( result.pointInfluenceWeights(), ssd.pointInfluenceWeights() )
self.assertNotEqual( result, ssd )
smooth = self.smooth3_30()
self.assertEqual( result.influenceNames(), smooth.influenceNames() )
self.assertEqual( result.influencePose(), smooth.influencePose() )
self.assertEqual( result.pointIndexOffsets(), smooth.pointIndexOffsets() )
self.assertEqual( result.pointInfluenceCounts(), smooth.pointInfluenceCounts() )
self.assertEqual( result.pointInfluenceIndices(), smooth.pointInfluenceIndices() )
resultWeights = result.pointInfluenceWeights()
smoothWeights = smooth.pointInfluenceWeights()
for i in range( 0, resultWeights.size() ) :
self.assertAlmostEqual( resultWeights[i], smoothWeights[i], 6 )
def testLocks( self ) :
""" Test SmoothSmoothSkinningWeightsOp locking mechanism"""
ssd = self.original()
op = IECoreScene.SmoothSmoothSkinningWeightsOp()
op.parameters()['input'].setValue( ssd )
op.parameters()['mesh'].setValue( self.mesh() )
op.parameters()['smoothingRatio'].setValue( 0.3 )
op.parameters()['iterations'].setValue( 3 )
op.parameters()['applyLocks'].setValue( True )
op.parameters()['influenceLocks'].setValue( IECore.BoolVectorData( [ True, False, False ] ) )
result = op.operate()
self.assertEqual( result.influenceNames(), ssd.influenceNames() )
self.assertEqual( result.influencePose(), ssd.influencePose() )
self.assertNotEqual( result.pointIndexOffsets(), ssd.pointIndexOffsets() )
self.assertNotEqual( result.pointInfluenceCounts(), ssd.pointInfluenceCounts() )
self.assertNotEqual( result.pointInfluenceIndices(), ssd.pointInfluenceIndices() )
self.assertNotEqual( result.pointInfluenceWeights(), ssd.pointInfluenceWeights() )
self.assertNotEqual( result, ssd )
smooth = self.smoothWithLocks()
self.assertEqual( result.influenceNames(), smooth.influenceNames() )
self.assertEqual( result.influencePose(), smooth.influencePose() )
self.assertEqual( result.pointIndexOffsets(), smooth.pointIndexOffsets() )
self.assertEqual( result.pointInfluenceCounts(), smooth.pointInfluenceCounts() )
self.assertEqual( result.pointInfluenceIndices(), smooth.pointInfluenceIndices() )
resultWeights = result.pointInfluenceWeights()
smoothWeights = smooth.pointInfluenceWeights()
for i in range( 0, resultWeights.size() ) :
self.assertAlmostEqual( resultWeights[i], smoothWeights[i], 6 )
# make sure locked weights did not change
dop = IECoreScene.DecompressSmoothSkinningDataOp()
dop.parameters()['input'].setValue( result )
decompressedResult = dop.operate()
dop.parameters()['input'].setValue( ssd )
decompressedOrig = dop.operate()
resultIndices = decompressedResult.pointInfluenceIndices()
resultWeights = decompressedResult.pointInfluenceWeights()
origWeights = decompressedOrig.pointInfluenceWeights()
for i in range( 0, resultWeights.size() ) :
if resultIndices[i] == 0 :
self.assertAlmostEqual( resultWeights[i], origWeights[i], 6 )
# make sure the result is normalized
nop = IECoreScene.NormalizeSmoothSkinningWeightsOp()
nop.parameters()['input'].setValue( result )
normalized = nop.operate()
self.assertEqual( result.influenceNames(), normalized.influenceNames() )
self.assertEqual( result.influencePose(), normalized.influencePose() )
self.assertEqual( result.pointIndexOffsets(), normalized.pointIndexOffsets() )
self.assertEqual( result.pointInfluenceCounts(), normalized.pointInfluenceCounts() )
self.assertEqual( result.pointInfluenceIndices(), normalized.pointInfluenceIndices() )
resultWeights = result.pointInfluenceWeights()
normalizedWeights = normalized.pointInfluenceWeights()
for i in range( 0, resultWeights.size() ) :
self.assertAlmostEqual( resultWeights[i], normalizedWeights[i], 6 )
def testVertexSelection( self ) :
""" Test SmoothSmoothSkinningWeightsOp using selected vertices"""
ssd = self.original()
op = IECoreScene.SmoothSmoothSkinningWeightsOp()
op.parameters()['input'].setValue( ssd )
op.parameters()['mesh'].setValue( self.mesh() )
op.parameters()['smoothingRatio'].setValue( 0.5 )
op.parameters()['iterations'].setValue( 1 )
op.parameters()['applyLocks'].setValue( False )
op.parameters()['vertexIndices'].setFrameListValue( IECore.FrameList.parse( "2-4,10-12" ) )
result = op.operate()
self.assertEqual( result.influenceNames(), ssd.influenceNames() )
self.assertEqual( result.influencePose(), ssd.influencePose() )
self.assertNotEqual( result.pointIndexOffsets(), ssd.pointIndexOffsets() )
self.assertNotEqual( result.pointInfluenceCounts(), ssd.pointInfluenceCounts() )
self.assertNotEqual( result.pointInfluenceIndices(), ssd.pointInfluenceIndices() )
self.assertNotEqual( result.pointInfluenceWeights(), ssd.pointInfluenceWeights() )
self.assertNotEqual( result, ssd )
smooth = self.smoothSelectVerts()
self.assertEqual( result.influenceNames(), smooth.influenceNames() )
self.assertEqual( result.influencePose(), smooth.influencePose() )
self.assertEqual( result.pointIndexOffsets(), smooth.pointIndexOffsets() )
self.assertEqual( result.pointInfluenceCounts(), smooth.pointInfluenceCounts() )
self.assertEqual( result.pointInfluenceIndices(), smooth.pointInfluenceIndices() )
resultWeights = result.pointInfluenceWeights()
smoothWeights = smooth.pointInfluenceWeights()
for i in range( 0, resultWeights.size() ) :
self.assertAlmostEqual( resultWeights[i], smoothWeights[i], 6 )
# make sure only selected vertices changed
dop = IECoreScene.DecompressSmoothSkinningDataOp()
dop.parameters()['input'].setValue( result )
decompressedResult = dop.operate()
dop.parameters()['input'].setValue( ssd )
decompressedOrig = dop.operate()
resultOffsets = decompressedResult.pointIndexOffsets()
resultCounts = decompressedResult.pointInfluenceCounts()
resultIndices = decompressedResult.pointInfluenceIndices()
resultWeights = decompressedResult.pointInfluenceWeights()
origWeights = decompressedOrig.pointInfluenceWeights()
nonSelectedVerts = [ x for x in range( 0, resultOffsets.size() ) if x not in op.parameters()['vertexIndices'].getFrameListValue().asList() ]
for i in nonSelectedVerts :
for j in range( 0, resultCounts[i] ) :
current = resultOffsets[i] + j
self.assertAlmostEqual( resultWeights[current], origWeights[current], 6 )
def testErrorStates( self ) :
""" Test SmoothSmoothSkinningWeightsOp with various error states"""
ssd = self.original()
op = IECoreScene.SmoothSmoothSkinningWeightsOp()
op.parameters()['input'].setValue( ssd )
# bad mesh
op.parameters()['mesh'].setValue( IECore.IntData(1) )
self.assertRaises( RuntimeError, op.operate )
# wrong number of verts
op.parameters()['mesh'].setValue( op.parameters()['mesh'].defaultValue )
self.assertRaises( RuntimeError, op.operate )
# wrong number of locks
op.parameters()['mesh'].setValue( self.mesh() )
op.parameters()['applyLocks'].setValue( True )
op.parameters()['influenceLocks'].setValue( IECore.BoolVectorData( [ True, False, True, False ] ) )
self.assertRaises( RuntimeError, op.operate )
# invalid vertex ids
op.parameters()['applyLocks'].setValue( False )
op.parameters()['vertexIndices'].setFrameListValue( IECore.FrameList.parse( "10-18" ) )
self.assertRaises( RuntimeError, op.operate )
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 8 12:47:46 2018
TODO: Convert this all to a class!
@author: amirbitran
"""
import numpy as np
import matplotlib.pyplot as plt
import joblib
class Kinetic_model:
def __init__(self, protein_name, lengths, rates=[], connectivity=[], Temperatures = [],Clusters = [],folding_rate_paths=[], tiebreak = 'Less folded', omit_transitions = []):
"""
Initialize a co-translational folding kinetic model for a given
protein_name with folding/unfolding rates at different lengths given by
list/array lengths
There are one of two ways to initialize the rates:
1. One possibility is you input a list of rate matrices...this is a
list of array-types where each array is the rates matrix for a
particular length of the protein. Each array can potentially have
multiple pages due to multiple temperatures
Note: If you choose this option, you also need to provide a value for
connectivity, wich is a list of lists where element i,j tells you, if
the protein is at cluster j at the ith length and you synthesize to the
i+1st length, to whcih cluster does the protein now go?
Note, in this case, you must also input the clusters! And the temperatures!
2. Alternatively, you can input a list of paths, each of which
contains a file with folding info. The info in these files
is then used to construct the rates matrix.
Note: If you choose this option, you can either specify connectivity
yourself as in option 1, or leave that variable blank and the
algorith will infer connectivity by finding pairs of clusters at
different length with maximal structural similarity
The optional argument tiebreak involves computing connectivity
If cluster i from length L is equally similar to clusters j and k
at length L+1, then if tiebreak = 'Less folded', you connect to
whichever of j or k is less folded
Otherwise, connect to the more folded one
omit_transitions is a list of duplets for transitions you want to keep
out of kinetic model, for instnace if folding rate prediction is poor
For instnace, you may set omit_transitions = [(3,0)],
in which case transitions from 3 to 0, and vice versa are both omitted
"""
self.protein_name=protein_name
self.lengths=lengths
min_len=min(lengths)
Nclusters=[] #A list, the ith element tells you how many clusters the ith length has
#Clusters=[] #A list of lists, each of which contains the clusters for a given length
if len(connectivity)==0: self.Infer_connectivity=True
if len(rates)!=0: #we use the provided list of folding rates -- just convert to arrays first
for r in range(len(rates)):
rates[r]=np.atleast_3d(np.array(rates[r])) #we make the arrays 3 dimensional (even if therr is only one page per array) to be consistent with the possibitliy that there can be multiple temperatures
#self.rates = rates
if len(connectivity)==0 and len(rates)>1:
print('Error! Connectivity must be specified if you provide a custom rate matrix with multiple lengths')
#Temperatures=[]
#self.Clusters = Clusters
for n in range(len(rates)):
Nclusters.append(len(Clusters[n]))
elif len(folding_rate_paths)!=0: #we construct the rate matrix using information in the directories
#print('moo')
Temperatures = [] #A list of lists, the ith element tells you the temperatures at the ith length
for n, path in enumerate(folding_rate_paths):
folding_info = joblib.load(path)
folding_rates = folding_info['folding rates']
unfolding_rates = folding_info['unfolding rates']
clusters = folding_info['clusters']
temperatures = folding_info['eq temperatures']
transmat = folding_rates.transpose((1,0,2))*lengths[n]/min_len +unfolding_rates.transpose((1,0,2))*lengths[n]/min_len #all rates are reweighed based on the shortest protein, essentially converting to monte carlo "sweeps"
Temperatures.append(temperatures)
if len(omit_transitions)>0:
for duple in omit_transitions:
i = duple[0]
j = duple[1]
transmat[i,j]=0
transmat[j,i]=0
#Note, we transpose the folding_rates and unfolding_rates matrix because in these matrices, folding_rates[i,j] means rate of transitioning tfom i to j
#But in a typical transition matrix, element [i,j] actually means the rate of going from j to i
#Enforce rows need to sum to 0
for t in range(np.shape(transmat)[2]):
for i in range(np.shape(transmat)[0]):
transmat[i,i,t] = -np.sum(transmat[:, i, t])
rates.append(transmat)
Nclusters.append(len(clusters))
Clusters.append(clusters)
else:
print('Error! Need to either specify a rate matrix, or input a list of directories with folding rate information')
self.folding_rate_paths=folding_rate_paths
self.clusters = Clusters
self.Nclusters = Nclusters
self.tiebreak = tiebreak
if self.Infer_connectivity:
self.compute_connectivity(self.tiebreak)
else:
self.connectivity=connectivity
self.rates=rates
self.Temperatures=Temperatures
self.Cleared_extraneous=False
def compute_connectivity(self, tiebreak):
connectivity=[]
for n, direc in enumerate(self.folding_rate_paths):
if n>0:
conn=[]
for i, cl in enumerate(old_clusters):
mean_distances=[] #mean distance from cluster i to each of the clusters j
for j, cu in enumerate(self.clusters[n]): #now, loop through all states in each cluster
dis=[]
for k in cl:
for l in cu:
dis.append( np.sum([ np.abs(int(k[vv]) - int(l[vv])) for vv in range(len(k)) ]))
mean_distances.append(np.mean(dis))
winners = np.where(mean_distances ==np.min(mean_distances))[0] #to which clusters in new length is cluster i from old length closest to
if tiebreak =='Less folded':
conn.append(winners[-1]) #if there is a tie, connect to the less folded cluster, to be conservative
else:
conn.append(winners[0])
connectivity.append(conn)
old_clusters=self.clusters[n]
self.connectivity=connectivity
def delete_extraneous_clusters(self):
"""
Deletes clusters that are completely isolated, and adjusts transition matrix accordingly
Learns this based on transition matrix at each length from the temperature whose index is given
by temp_index
By default, temp_index is 0, so it learns the isolated cluster from the lowest temperature
Prior to 2/11/19: It learned at the lowest temperature at which transition matrix
has no Nans, since Nans are an indicator that the PMF was not trusted at given temperature,
so folding rates there aren't meaningful...but then I realized thi sisn't true since
Nans are implemented after the fact by Compare_folding_Rates etc..
moreover, there was an issue with CMK that caused me to just default to temp_idnex = 4 (T = 0.5)
I did not use 0 since for some reason, MarR had Nans at temp index of 0...not sure why
"""
rates=[]
clusters=[]
temp_index = 4
for n in range(len(self.lengths)):
#Deleted the following on 2/12/19 due to a weird case with CMk...see notes
#temp_index = 0 #find lowest temperature at whcih the transition matrix has no Nans
#while np.any( np.isnan(self.rates[n][:,:,temp_index])):
# temp_index+=1
TT = self.rates[n][:,:,temp_index]
keep=[]
for r in range(len(TT)):
if np.max(np.abs(TT[:,r]))>0 or np.max(np.abs(TT[r,:]))>0 : #not completely isolated
keep.append(r)
else:
print('Length {}: Deleting cluster {}'.format(self.lengths[n],self.clusters[n][r]))
keep = np.array(keep)
#keep only rows and columns for connected clusters...first keep rows, then keep columns
zz = self.rates[n][keep, :, :]
zz = zz[:, keep, :]
rates.append(zz)
self.Nclusters[n] = np.shape(zz)[0]
CC = [clust for c, clust in enumerate(self.clusters[n]) if c in keep ]
clusters.append(CC)
print('\t')
self.rates = rates
self.clusters=clusters
self.Cleared_extraneous=True
if self.Infer_connectivity:
self.compute_connectivity(self.tiebreak)
def diagonalize(self):
"""
Diagonlize transition matrices
"""
self.eigenvectors=[] #list of matrices containing eigenvectors for respective lengths
self.eigenvalues=[]
if not self.Cleared_extraneous: self.delete_extraneous_clusters()
for n, transmat in enumerate(self.rates):
v=np.zeros(np.shape(transmat))
w = np.zeros((np.shape(transmat)[0], np.shape(transmat)[2]))
for t in range(np.shape(transmat)[2]): #diagonalize each temperature (page) separately
TT = transmat[:,:,t]
#min_element = np.min(np.abs(TT[np.where(TT)]))
#w[:,t], v[:,:,t] = np.linalg.eig(TT/min_element)
if not np.any(np.isnan(TT)): #diagnoalize if there are no Nans, so all rates have been computed
w[:,t], v[:,:,t] = np.linalg.eig(TT)
else:
w[:,t] = np.nan
v[:,:,t] = np.nan
#w[:,t] = w[:,t]*min_element
#Ocasionally, eigenvalues end up positive due to numerical mistake...if they are positive and super small (like less than 10**-20), we just convert them to 0
#But if they are positive and appreciable, we print a warning
#if np.max(w[:,t])>10**(-20):
# print('Warning: Found a positive eigenvalue of magnitude {} at temperature {}'.format(np.max(w[:,t]), self.Temperatures[t]))
w[np.where(w>0)]=0
self.eigenvalues.append(w)
self.eigenvectors.append(v)
def MFPT(self, temp, Trans_times, folded_cluster=0, starting_state = -1):
"""
Compute MFPT to folded, fully synthesized state
Input a list of times that tells you how long you spend at each length
If you have L lengths, then this list of times shoudl only be L-1 long
By default, you are assumd to start in the last cluster, which is generally the least folded one
But for proteins such as MarR and CMK, you do not get reliable rates of transition from last cluster
to penultimate one, since this involves folding of a simple antiparallel beta hairpin, which algorithm struggles with
Thus, it may be worthwhile to set starting_state = -2 for these
"""
self.delete_extraneous_clusters()
if ~hasattr(self, 'eigenvectors'):
self.diagonalize()
#print('Length of rates list is {}'.format(len(self.rates)))
for n, transmat in enumerate(self.rates):
#print(n)
temperatures=self.Temperatures[n]
temp_index = np.where(temperatures==temp)[0][0]
if n==0: #At the initial length, you start in the unfolded state
#print('baahhh')
P0 = np.zeros(self.Nclusters[n])
P0[starting_state]=1
else: #propagate probability distribution from last timepoint of previous length to the corresponding clusters in the new length
#print('Kawkawwwww')
P0_old=P
P0=np.zeros(self.Nclusters[n])
for i, p in enumerate(P0_old):
P0[self.connectivity[n-1][i]]+=p
if n<len(self.rates)-1: #Unless you are in the final length, we compute the time evolution normally
#print('Broooo')
tau = Trans_times[n]
lambdas = self.eigenvalues[n][:, temp_index]
lambdatau=np.array(np.dot(lambdas, tau), dtype=np.float32) #convert to float 32 helps w numerics?
exp_lambdat = np.diag(np.exp(lambdatau))
v = self.eigenvectors[n][:,:, temp_index]
#Compute matrix exponential (time evolution operator)
M = np.linalg.multi_dot((v, exp_lambdat, np.linalg.inv(v)))
#Now time evolve the initial distribution
P = np.dot(M,P0)
#print(P)
else: #For the final length, we set an absorbing boundary at the folded cluster so that we can compute a MFPT
#print('MOO')
MM=transmat[folded_cluster+1:,folded_cluster+1:,temp_index] #get rid of row/column corresponding to folded cluster, so that it becomes an absorbing state outside the system
Nstates = np.shape(MM)[0]
MFPTs = np.dot( np.linalg.inv(MM.transpose()), -1*np.ones((Nstates, 1))) #MFPT as a function of starting state
MFPT = np.dot(P0[folded_cluster+1:], MFPTs)[0] #integrate over all states the product of the probability of starting in that state * MFPT given that state
return MFPT + np.sum(Trans_times)
def MFPT_and_yield(self, temp, Trans_times,folded_clusters=[0], starting_state = -1):
"""
Compute MFPT to folded, fully synthesized state, as well as how much protein ends up in folded state
after translation
Input a list of times that tells you how long you spend at each length
If you have L lengths, then this list of times should be L long,
since the last element of this list tells you how long you go
between the time where you acquire folding properties of full protein
and end of translation (ex. for MarR, it would be between 110 and 144)
By default, you are assumd to start in the last cluster, which is generally the least folded one
But for proteins such as MarR and CMK, you do not get reliable rates of transition from last cluster
to penultimate one, since this involves folding of a simple antiparallel beta hairpin, which algorithm struggles with
Thus, it may be worthwhile to set starting_state = -2 for these
folded_cluster is a list of clusters that count as folded...to compute yield, you compute
sum of probaiblites of beign in one of these
For MFPT, we set absorbing boundary at first one listed
"""
folded_clusters = np.array(folded_clusters)
self.delete_extraneous_clusters()
if ~hasattr(self, 'eigenvectors'):
self.diagonalize()
#print('Length of rates list is {}'.format(len(self.rates)))
for n, transmat in enumerate(self.rates):
#print(n)
temperatures=self.Temperatures[n]
temp_index = np.where(temperatures==temp)[0][0]
if n==0: #At the initial length, you start in the unfolded state
#print('baahhh')
P0 = np.zeros(self.Nclusters[n])
P0[starting_state]=1
else: #propagate probability distribution from last timepoint of previous length to the corresponding clusters in the new length
#print('Kawkawwwww')
P0_old=P
P0=np.zeros(self.Nclusters[n])
for i, p in enumerate(P0_old):
P0[self.connectivity[n-1][i]]+=p
#print('Broooo')
tau = Trans_times[n]
lambdas = self.eigenvalues[n][:, temp_index]
lambdatau=np.array(np.dot(lambdas, tau), dtype=np.float32) #convert to float 32 helps w numerics?
exp_lambdat = np.diag(np.exp(lambdatau))
v = self.eigenvectors[n][:,:, temp_index]
#Compute matrix exponential (time evolution operator)
M = np.linalg.multi_dot((v, exp_lambdat, np.linalg.inv(v)))
#Now time evolve the initial distribution
P = np.dot(M,P0)
#print(P)
if n==len(self.rates)-1: #For the final length, we set an absorbing boundary at the folded cluster so that we can compute a MFPT
#print('MOO')
MM=transmat[folded_clusters[0]+1:,folded_clusters[0]+1:,temp_index] #get rid of row/column corresponding to folded cluster, so that it becomes an absorbing state outside the system
Nstates = np.shape(MM)[0]
MFPTs = np.dot( np.linalg.inv(MM.transpose()), -1*np.ones((Nstates, 1))) #MFPT as a function of starting state
MFPT = np.dot(P0[folded_clusters[0]+1:], MFPTs)[0] #integrate over all states the product of the probability of starting in that state * MFPT given that state
return MFPT + np.sum(Trans_times), np.sum(P[folded_clusters])
def Yield(self, temp, Trans_times, folded_cluster=0, starting_state = -1):
"""
Compute amount of folded protein at the end of synthesis
Trans_times should include time to go from length i to i+1, as well
as the last entry shoudl be the time from final length to termination of synthesis
"""
self.delete_extraneous_clusters()
if ~hasattr(self, 'eigenvectors'):
self.diagonalize()
#print('Length of rates list is {}'.format(len(self.rates)))
for n, transmat in enumerate(self.rates):
#print(n)
temperatures=self.Temperatures[n]
temp_index = np.where(temperatures==temp)[0][0]
if n==0: #At the initial length, you start in the unfolded state
#print('baahhh')
P0 = np.zeros(self.Nclusters[n])
P0[starting_state]=1
else: #propagate probability distribution from last timepoint of previous length to the corresponding clusters in the new length
#print('Kawkawwwww')
P0_old=P
P0=np.zeros(self.Nclusters[n])
for i, p in enumerate(P0_old):
P0[self.connectivity[n-1][i]]+=p
#print('Broooo')
tau = Trans_times[n]
lambdas = self.eigenvalues[n][:, temp_index]
lambdatau=np.array(np.dot(lambdas, tau), dtype=np.float32) #convert to float 32 helps w numerics?
exp_lambdat = np.diag(np.exp(lambdatau))
v = self.eigenvectors[n][:,:, temp_index]
#Compute matrix exponential (time evolution operator)
M = np.linalg.multi_dot((v, exp_lambdat, np.linalg.inv(v)))
#Now time evolve the distribution
P = np.dot(M,P0)
return P[folded_cluster]
def compute_time_evolution(t, eigenvalues, eigenvectors, C, state=3):
"""
Solves master equation and returns the time evolution of a desired state
Default is 3, which is completely synthesized, folded state
"""
if type(t)!=np.ndarray:
t = np.array([t])
lambda_t=np.dot(eigenvalues.reshape(len(eigenvalues),1), t.reshape(1, len(t)) ) #creates a matrix where horizontal dimension represents different time points in t. In each row of this matrix, the timepoints are all multiplied by a different eigenvalue
exp_lambda_t=np.exp(lambda_t) #exponential of the above matrix
coeff_times_exp=np.zeros(np.shape(exp_lambda_t)) #multiply each row of the above vector by the coefficient corresponding to it
for n, c in enumerate(C):
coeff_times_exp[n,:] = c*exp_lambda_t[n,:]
#at a given column t, the row i of coeff_times_exp corresponds to coefficient i multiplied by the exponential of the ith eigenvalue times time (t)
#So now, to get the time evolution, we sipmly take the matrix product of our eigenvector matrix with coeff_times_exp
time_evolution=np.dot(eigenvectors,coeff_times_exp)
time_evolution=time_evolution[state,:] #time evolution of folded state
return time_evolution
def plot_timecourse(X, temp, Trans_times, clusters_to_plot, starting_state = -1, final_time = None, colors= ['r','b', 'g', 'y', 'k'], linewidth = 8,colorchange = (None, None, None), scientific = True, ylim = (), ntimes = 200, ax = None,fontsize=30, labelsize = 25, timenorm = 1, ylabel = True, xlabel= True):
"""
Compute amount of folded protein at the end of synthesis
Trans_times should include time to go from length i to i+1, as well
as the last entry shoudl be the time from final length to termination of synthesis
clusters_to_plot is a list of lists
the nth sublist tells you which cluster numbers you want to follow time evolution of
at the nth length
But you may want to follow the time evolution of the summed probaiblity of being in
either of two clusters, in which case some element of the sublist can itself be a list
i
ntimes is the number of timepoints at which you compute probabilities at each length--200 by default
An important note involves color continuity...Note that the ith element of all
of the sublists of clusters_to_plot will be plotted with the same color,
so make sure to order the list clusters_to_plot accordingly (i.e. probably
you'll want to do it based on connectivity)
You can also make times on the x axis dimensionless by dividing by the slowest folding time, given by timenorm
By default, timenorm = 1, in which case you do not normalize
But if timenorm is set to something other than 1 (assumed to be slowest folding time), then everything is normalized by that slowest folding time
By default, stops plotting once synthesis is complete. But perhaps you want to keep following the timecourses after
synthesis completes. In that case, set final_time to something other than None. In that case, you will keep following time evolution
for times up through final_time, assuming same folding kinetics as in the final lenght regime.
Note that final_time shold be inputted in normalized units, so if timenorm is set to something other than 1, that final time shoudl be divided by timenorm
Note that final_time should have a value GREATER than the time value after synthesis ends.
If it's less, the code won't do anything
scientific indicates whether you want x axis in scientific notation
A new fun feature: You can now indicate a COLORCHANGE! That is, for example in MarR, you may want unfolded cluster 1 (with only hairpin folded)
to appear gold until length regime 1, at which point it will appear red. Then you can enter the keyword colorchange = (1, 1,'red') beyond which point,
cluster 1 will appear red
First element of tuple is length at whcih change should occur, second element is cluster that is to be affected by colorchange, third element is the new color
"""
#TODO: change so that if x label has like 10*(11), make that exponent appear with the same fontsize as the other text!
if ax == None: fig, ax = plt.subplots()
X.delete_extraneous_clusters()
if ~hasattr(X, 'eigenvectors'):
X.diagonalize()
#plt.figure()
for n, transmat in enumerate(X.rates): #loop through lengths
#print(n)
if colorchange[0]!=None and colorchange[0]==n:
colors[colorchange[1]]=colorchange[2]
temperatures=X.Temperatures[n]
temp_index = np.where(temperatures==temp)[0][0]
if n==0: #At the initial length, you start in the unfolded state
#print('baahhh')
P0 = np.zeros(X.Nclusters[n])
P0[starting_state]=1
if Trans_times[n]==0:
taus = np.array([0])
else:
taus = np.arange(0, Trans_times[n],(Trans_times[n])/ntimes) #times at which you plot for this given length
taus_cum = taus
else: #propagate probability distribution from last timepoint of previous length to the corresponding clusters in the new length
#print('Kawkawwwww')
P0=np.zeros(X.Nclusters[n])
for i, p in enumerate(P_final):
P0[X.connectivity[n-1][i]]+=p
#taus = np.arange(0, Trans_times[n],(Trans_times[n])/ntimes)
if Trans_times[n]==0:
taus = np.array([0])
else:
taus = np.linspace(0, Trans_times[n],ntimes)
taus_cum = taus+np.sum(Trans_times[0:n])
lambdas = X.eigenvalues[n][:, temp_index]
v = np.array(X.eigenvectors[n][:,:,temp_index], dtype=np.float32)
C=np.linalg.solve(v, P0)
for z, clust in enumerate(clusters_to_plot[n]):
if type(clust)==list:
Pi = np.zeros((len(clust), ntimes))
for r, cc in enumerate(clust):
Pi[r, :] = compute_time_evolution(taus, lambdas, v, C, state=cc)
P_t = np.sum(Pi, axis = 0)
else:
P_t = compute_time_evolution(taus, lambdas, v, C, state=clust)
ax.plot(taus_cum/timenorm, P_t, color=colors[z], linewidth = linewidth)
#Figure out probability of each state at final time
P_final = np.zeros(np.shape(P0))
for k in range(len(P_final)):
P_final[k] = compute_time_evolution(Trans_times[n], lambdas, v, C, state=k)
if n!=0 and Trans_times[n]!=0:
ax.axvline(x=taus_cum[0]/timenorm, color = 'k', linestyle = ':', linewidth = linewidth/2) #draw a vertical line at the translation time
#Keep plotting beyond synthesis, if desired
if final_time!=None and final_time >np.max(taus_cum/timenorm):
taus = np.linspace(0, final_time*timenorm - np.max(taus_cum),ntimes)
taus_cum = taus + np.max(taus_cum)
ax.axvline(x=taus_cum[0]/timenorm, color = 'k', linestyle = ':', linewidth = linewidth/2)
P0 = P_final
C=np.linalg.solve(v, P0)
for z, clust in enumerate(clusters_to_plot[-1]): #plot the same clusters as at the final length regime
if type(clust)==list:
Pi = np.zeros((len(clust), ntimes))
for r, cc in enumerate(clust):
Pi[r, :] = compute_time_evolution(taus, lambdas, v, C, state=cc)
P_t = np.sum(Pi, axis = 0)
else:
P_t = compute_time_evolution(taus, lambdas, v, C, state=clust)
ax.plot(taus_cum/timenorm, P_t, color=colors[z], linewidth = linewidth)
ax.tick_params(labelsize = labelsize)
if scientific: ax.ticklabel_format(axis = 'x', style = 'scientific', scilimits = (0,0))
if timenorm ==1 and xlabel:
ax.set_xlabel('Time (MC sweeps)', fontsize=fontsize, labelpad = 20)
elif timenorm!=1 and xlabel:
ax.set_xlabel('Time / Slowest folding time', fontsize=fontsize, labelpad = 20)
if ylabel: ax.set_ylabel('Probability', fontsize=fontsize, labelpad = 20)
if len(ylim)>0:
ax.set_ylim(ylim)
def linemap( T_pause, T_posttrans, Total_transtimes_rare, Total_transtimes_opt, rare_slowdown_factors, slowest_folding_rate, slowdowns_to_plot = [1,3,6,9],ax = None, legend = False, label = True,labelsize = 35, fontsize = 40, slowdown_labelsize = 35, ylim = None):
"""
As a function the folding/synthesis rate, plots ratio of post-translational folding time to co-translatiaonl folding time for vairous possible rare codon slowdowns
"""
if ax ==None: fig, ax = plt.subplots()
#linestyles = ['-','--', '-.', ':']
colors = ['rebeccapurple', 'darkorchid', 'darkviolet','purple', 'mediumorchid', 'violet', 'fuchsia', 'magenta' ]
linestyles = ['-', '--', '-.', ':', '-', '--', '-.', ':' ]
for zz, slowdown in enumerate(slowdowns_to_plot):
ind = np.where(rare_slowdown_factors==slowdown)[0][0]
xxx = Total_transtimes_opt[:, ind]*slowest_folding_rate
#label_value = np.round((Total_transtimes_rare[0,ind]/Total_transtimes_opt[0,ind] - 1)*100, 2)
yy = T_posttrans/T_pause[:,ind]
ax.plot(xxx, yy, linewidth = 4,label = 'Rare slowdown factor = {}'.format(slowdown-1), color = colors[zz], linestyle = linestyles[zz])
#index = int(7*len(xxx)/8) #index for position at which text will be added
index = np.argmax(yy)
if label:
if zz ==len(slowdowns_to_plot) - 1:
ax.text( xxx[index] - 0.002, T_posttrans/T_pause[index, ind]+0.8, '{}% slowdown'.format( (slowdown-1)*100 ), fontsize = slowdown_labelsize , color = colors[zz] )
elif slowdown ==1:
ax.text( xxx[index] - 0.002, T_posttrans/T_pause[index, ind]+0.8, 'No slowdown', fontsize = slowdown_labelsize , color = colors[zz] )
#elif zz ==1:
# ax.text( xxx[index] - 0.002, T_posttrans/T_pause[index, ind]+0.8, '{}% slowdown'.format( (slowdown-1)*100 ), fontsize = slowdown_labelsize , color = colors[zz] )
else:
ax.text( xxx[index] + 0.002, T_posttrans/T_pause[index, ind]+0.0, '{}%'.format( (slowdown-1)*100 ), fontsize = slowdown_labelsize , color = colors[zz] )
ax.plot(xxx, [1 for i in range(len(xxx))], linestyle = ':', color = 'k')
ax.set_xlabel('Slowest folding rate/Synthesis rate', fontsize = fontsize, labelpad = 20)
ax.set_ylabel("τ$_{post-translational}$ / τ$_{co-translational} $", fontsize = fontsize+5, labelpad = 20)
if ylim!=None:
ax.set_ylim(ylim)
ax.tick_params(labelsize = labelsize)
if legend: ax.legend(fontsize = fontsize)
plt.show()
#clusters_to_plot=[ [ [0,1]], [4, 3, 0], [5, 4, [1,2], 0 ] ]
|
from django.shortcuts import render
def index(request):
return render(request, 'homepage/home.html')
def about(request):
return render(request, 'homepage/about.html')
def signup(request):
return render(request, 'homepage/signup.html')
def signin(request):
return render(request, 'homepage/signin.html')
|
from django.contrib import admin
from django.core.exceptions import ValidationError
from django.forms import ModelForm
from django.utils.translation import ugettext as _
from freq.models import *
admin.site.register(Client)
admin.site.register(ProductArea)
class FeatureRequestAdminForm(ModelForm):
def clean_priority(self):
priority = self.cleaned_data['priority']
if priority < 1:
raise ValidationError(
_('Priority values must be positive.'),
code='invalid')
client = self.cleaned_data['client']
req_count = FeatureRequest.objects.filter(client=client).count()
if not self.instance.pk:
req_count += 1
if priority > req_count:
raise ValidationError(
_('This client only has %(count)s feature request(s);'
' priority values may not exceed that number.'),
code='invalid',
params={'count': req_count})
return priority
@admin.register(FeatureRequest)
class FeatureRequestAdmin(admin.ModelAdmin):
date_hierarchy = 'target_date'
form = FeatureRequestAdminForm
list_display = ('title', 'client', 'priority', 'target_date',
'product_area')
list_filter = ('client', 'product_area')
search_fields = ('title', 'description', 'url')
|
# -*- coding: utf-8 -*-
from django.conf.urls import url
from Instanssi.admin_base.views import index
app_name = "admin_base"
urlpatterns = [
url(r'^$', index, name="index"),
]
|
import json
import argparse
def write_jsonl(data, path):
with open(path, 'w') as f:
for example in data:
json_data = json.dumps(example)
f.write(json_data + '\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_path', required=True)
parser.add_argument('-o', '--output_path', required=True)
args = parser.parse_args()
print('Loading misinfo...')
with open(args.input_path, 'r') as f:
misinfo = json.load(f)
formatted_misinfo = []
for m_id, m in misinfo.items():
formatted_misinfo.append(
{
'id': m_id,
'contents': m['text'],
}
)
print('Writing jsonl misinfo...')
write_jsonl(formatted_misinfo, args.output_path)
print('Done!')
|
from dataclasses import dataclass
@dataclass(frozen=True)
class Item:
quantity: float
measure: str
name: str
price: float
def __str__(self):
# ' 2 grain rice @ $1.0...$2.0'
return f' {self.quantity} {self.measure} {self.name} @ ${self.price:.1f}...${self.price * self.quantity:.1f}'
class Cart:
def __init__(self):
self.items = list()
def add(self, item: Item):
self.items.append(item)
def __format__(self, format_spec):
if format_spec == "short":
return ', '.join(sorted(item.name for item in self.items))
elif format_spec == "long":
return "\n".join(str(item) for item in self.items)
|
from numpy.testing.utils import assert_equal
from kernel_exp_family.tools.xvalidation import XVal
import numpy as np
def test_xval_execute_no_shuffle():
x = XVal(N=10, num_folds=3, shuffle=False)
for train, test in x:
print train, test
def test_xval_execute_shuffle():
x = XVal(N=10, num_folds=3, shuffle=True)
for train, test in x:
print train, test
def test_xval_result_no_suffle():
x = XVal(N=10, num_folds=3, shuffle=False)
train, test = x.next()
assert_equal(train, np.array([3, 4, 5, 6, 7, 8, 9]))
assert_equal(test, np.array([0, 1, 2]))
train, test = x.next()
assert_equal(train, np.array([0, 1, 2, 6, 7, 8, 9]))
assert_equal(test, np.array([3, 4, 5, ]))
train, test = x.next()
assert_equal(train, np.array([0, 1, 2, 3, 4, 5]))
assert_equal(test, np.array([6, 7, 8, 9]))
def test_xval_result_suffle():
x = XVal(N=10, num_folds=3, shuffle=True)
for train, test in x:
sorted_all = np.sort(np.hstack((train, test)))
assert_equal(sorted_all, np.arange(10))
assert len(train) == len(np.unique(train))
assert len(test) == len(np.unique(test))
assert np.abs(len(test) - 10 / 3) <= 1
assert np.abs(len(train) - (10 - 10 / 3)) <= 1
|
import random
import h5py
import numpy as np
from torch.utils.data import Dataset
class TrainDataset(Dataset):
def __init__(self, h5_file, patch_size, scale):
super(TrainDataset, self).__init__()
self.h5_file = h5_file
self.patch_size = patch_size
self.scale = scale
@staticmethod
def random_crop(lr, hr, size, scale):
lr_left = random.randint(0, lr.shape[1] - size)
lr_right = lr_left + size
lr_top = random.randint(0, lr.shape[0] - size)
lr_bottom = lr_top + size
hr_left = lr_left * scale
hr_right = lr_right * scale
hr_top = lr_top * scale
hr_bottom = lr_bottom * scale
lr = lr[lr_top:lr_bottom, lr_left:lr_right]
hr = hr[hr_top:hr_bottom, hr_left:hr_right]
return lr, hr
@staticmethod
def random_horizontal_flip(lr, hr):
if random.random() < 0.5:
lr = lr[:, ::-1, :].copy()
hr = hr[:, ::-1, :].copy()
return lr, hr
@staticmethod
def random_vertical_flip(lr, hr):
if random.random() < 0.5:
lr = lr[::-1, :, :].copy()
hr = hr[::-1, :, :].copy()
return lr, hr
@staticmethod
def random_rotate_90(lr, hr):
if random.random() < 0.5:
lr = np.rot90(lr, axes=(1, 0)).copy()
hr = np.rot90(hr, axes=(1, 0)).copy()
return lr, hr
def __getitem__(self, idx):
with h5py.File(self.h5_file, 'r') as f:
lr = f['lr'][str(idx)][::]
hr = f['hr'][str(idx)][::]
lr, hr = self.random_crop(lr, hr, self.patch_size, self.scale)
lr, hr = self.random_horizontal_flip(lr, hr)
lr, hr = self.random_vertical_flip(lr, hr)
lr, hr = self.random_rotate_90(lr, hr)
lr = lr.astype(np.float32).transpose([2, 0, 1]) / 255.0
hr = hr.astype(np.float32).transpose([2, 0, 1]) / 255.0
return lr, hr
def __len__(self):
with h5py.File(self.h5_file, 'r') as f:
return len(f['lr'])
class EvalDataset(Dataset):
def __init__(self, h5_file):
super(EvalDataset, self).__init__()
self.h5_file = h5_file
def __getitem__(self, idx):
with h5py.File(self.h5_file, 'r') as f:
lr = f['lr'][str(idx)][::].astype(np.float32).transpose([2, 0, 1]) / 255.0
hr = f['hr'][str(idx)][::].astype(np.float32).transpose([2, 0, 1]) / 255.0
return lr, hr
def __len__(self):
with h5py.File(self.h5_file, 'r') as f:
return len(f['lr'])
|
import factory
from wallet.models import MINIMUM_ACCOUNT_BALANCE, Wallet
class WalletFactory(factory.django.DjangoModelFactory):
class Meta:
model = Wallet
django_get_or_create = ('name',)
name = factory.Sequence(lambda n: "Wallet_%03d" % n)
balance = MINIMUM_ACCOUNT_BALANCE
|
# crie um modulo moeda.py que tenha as funções incorporadas aumentar(), diminuir(), dobro() e metade().
# faça tmb um programa que importe esse modulo e use algumas dessas funções
def aumentar(v, p):
res = v + (p*v)/100
return res
def diminuir(v, p):
res = v - (p*v)/100
return res
def metade(v):
res = v/2
return res
def dobro(v):
res = v*2
return res
|
"""
Main postgrez module
"""
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import psycopg2
from .utils import read_yaml, IteratorFile, build_copy_query
from .exceptions import (PostgrezConfigError, PostgrezConnectionError,
PostgrezExecuteError, PostgrezLoadError,
PostgrezExportError)
import os
import sys
import io
import logging
LOGGER = logging.getLogger(__name__)
## number of characters in query to display
QUERY_LENGTH = 50
## initialization defaults
DEFAULT_PORT = 5432
DEFAULT_SETUP = 'default'
DEFAULT_SETUP_PATH = '~'
class Connection(object):
"""Class which establishes connections to a PostgresSQL database. Users
have the option to provide the host, database, username, password and port
to connect to. Alternatively, they can utilize their .postgrez configuration
file (recommended).
Methods used internally by the class are prefixed with a `_`.
Attributes:
host (str): Database host address
port (int): Connection port number (defaults to 5432 if not provided)
database (str): Name of the database
user (str): Username used to authenticate
password (str, optional): Password used to authenticate
conn (psycopg2 connection): psycopg2 connection object
cursor (psycopg2 cursor): psycopg2 cursor object, associated with
the connection object
"""
def __init__(self, host=None, database=None, user=None, password=None,
port=DEFAULT_PORT, setup=DEFAULT_SETUP,
setup_path=DEFAULT_SETUP_PATH):
"""Initialize connection to postgres database. First, we look if a host,
database, username and password were provided. If they weren't, we try
and read credentials from the .postgrez config file.
Args:
host (str, optional): Database host url. Defaults to None.
database (str, optional): Database name. Defaults to None.
user (str, optional): Username. Defaults to None.
password (str, optional): Password. Defaults to None.
setup (str, optional): Name of the db setup to use in ~/.postgrez.
If no setup is provided, looks for the 'default' key in
~/.postgrez which specifies the default configuration to use.
setup_path (str, optional): Path to the .postgrez configuration
file. Defaults to '~', i.e. your home directory on Mac/Linux.
"""
self.host = host
self.database = database
self.user = user
self.password = password
self.port = port
self.setup = setup
self.setup_path = setup_path
self.conn = None
self.cursor = None
if host is None and database is None and user is None:
## Fetch attributes from file
self._get_attributes()
## Validate the parsed attributes
self._validate_attributes()
## If no errors are raised, connect to the database
self._connect()
def _get_attributes(self):
"""Read database connection parameters from ~/.postgrez.
Raises:
PostgrezConfigError: If the config file ~/.postgrez does not exist.
PostgrezConfigError: If the supplied setup variable is not in the
~/.postgrez file
"""
if self.setup_path == '~':
yaml_file = os.path.join(os.path.expanduser('~'), '.postgrez')
else:
yaml_file = os.path.join(self.setup_path, '.postgrez')
LOGGER.info('Fetching attributes from .postgrez file: %s' % yaml_file)
if os.path.isfile(yaml_file) == False:
raise PostgrezConfigError('Unable to find ~/.postgrez config file')
config = read_yaml(yaml_file)
if self.setup not in config.keys():
raise PostgrezConfigError('Setup variable %s not found in config '
'file' % self.setup)
if self.setup == 'default':
# grab the default setup key
self.setup = config[self.setup]
self.host = config[self.setup].get('host', None)
self.port = config[self.setup].get('port', 5432)
self.database = config[self.setup].get('database', None)
self.user = config[self.setup].get('user', None)
self.password = config[self.setup].get('password', None)
def _validate_attributes(self):
"""Validate that the minimum required fields were either supplied or
parsed from the .postgrez configuration file.
Raises:
PostgrezConfigError: If the minimum attributes were not supplied or
included in ~/.postgrez.
"""
if self.host is None or self.user is None or self.database is None:
raise PostgrezConfigError('Please provide a host, user and '
'database as a minimum. Please visit '
'https://github.com/ian-whitestone/postgrez for details')
def _connected(self):
"""Determine if a pscyopg2 connection or cursor has been created.
Returns:
connect_status (bool): True of a psycopg2 connection or cursor
object exists.
"""
return (True if self.conn.closed == 0 else False)
def _connect(self):
"""Create a connection to a PostgreSQL database.
"""
LOGGER.info('Establishing connection to %s database' % self.database)
self.conn = psycopg2.connect(
host=self.host,
port=self.port,
database=self.database,
user=self.user,
password=self.password
)
self.cursor = self.conn.cursor()
def _disconnect(self):
"""Close connection
"""
LOGGER.debug('Attempting to disconnect from database %s' % self.database)
self.cursor.close()
self.conn.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc, exc_tb):
"""Close the cursor and connection objects if they have been created.
This code is automatically executed after the with statement is
completed or if any error arises during the process.
Reference: https://stackoverflow.com/questions/1984325/explaining-pythons-enter-and-exit
"""
if self._connected():
self._disconnect()
class Cmd(Connection):
"""Class which handles execution of queries.
"""
def execute(self, query, query_vars=None, commit=True):
"""Execute the supplied query.
Args:
query (str): Query to be executed. Query can contain placeholders,
as long as query_vars are supplied.
query_vars (tuple, list or dict): Variables to be executed with query.
See http://initd.org/psycopg/docs/usage.html#query-parameters.
commit (bool): Commit any pending transaction to the database.
Defaults to True.
Raises:
PostgrezConnectionError: If the connection has been closed.
"""
if self._connected() == False:
raise PostgrezConnectionError('Connection has been closed')
LOGGER.info('Executing query %s...' % query[0:QUERY_LENGTH].strip())
self.cursor.execute(query, vars=query_vars)
if commit:
self.conn.commit()
def load_from_object(self, table_name, data, columns=None, null=None):
"""Load data into a Postgres table from a python list.
Args:
table_name (str): name of table to load data into.
data (list): list of tuples, where each row is a tuple
columns (list): iterable with name of the columns to import.
The length and types should match the content of the file to
read. If not specified, it is assumed that the entire table
matches the file structure. Defaults to None.
null (str): Format which nulls (or missing values) are represented.
Defaults to 'None'. If a row is passed in as
[None, 1, '2017-05-01', 25.321], it will treat the first
element as missing and inject a Null value into the database for
the corresponding column.
Raises:
PostgrezLoadError: If an error occurs while building the iterator
file.
"""
try:
LOGGER.info('Attempting to load %s records into table %s' %
(len(data), table_name))
if null is None:
null = 'None'
table_width = len(data[0])
template_string = "|".join(['{}'] * table_width)
f = IteratorFile((template_string.format(*x) for x in data))
except Exception as e:
raise PostgrezLoadError("Unable to load data to Postgres. "
"Error: %s" % e)
self.cursor.copy_from(f, table_name, sep="|", null=null,
columns=columns)
self.conn.commit()
def load_from_file(self, table_name, filename, header=True, delimiter=',',
columns=None, quote=None, null=None):
"""
Args:
table_name (str): name of table to load data into.
filename (str): name of the file
header (boolean): Specify True if the first row of the flat file
contains the column names. Defaults to True.
delimiter (str): delimiter with which the columns are separated.
Defaults to ','
columns (list): iterable with name of the columns to import.
The length and types should match the content of the file to
read. If not specified, it is assumed that the entire table
matches the file structure. Defaults to None.
quote (str): Specifies the quoting character to be used when a data
value is quoted. This must be a single one-byte character.
Defaults to None, which uses the postgres default of a single
double-quote.
null (str): Format which nulls (or missing values) are represented.
Defaults to None, which corresponds to an empty string.
If a CSV file contains a row like:
,1,2017-05-01,25.321
it will treat the first element as missing and inject a Null
value into the database for the corresponding column.
"""
LOGGER.info('Attempting to load file %s into table %s' %
(filename, table_name))
copy_query = build_copy_query('load', table_name, header=header,
columns=columns, delimiter=delimiter,
quote=quote, null=null)
with open(filename, 'r') as f:
LOGGER.info('Executing copy query\n%s' % copy_query)
self.cursor.copy_expert(copy_query, f)
self.conn.commit()
def export_to_file(self, query, filename, columns=None, delimiter=',',
header=True, null=None):
"""Export records from a table or query to a local file.
Args:
query (str): A select query or a table
columns (list): List of column names to export. columns should only
be provided if you are exporting a table
(i.e. query = 'table_name'). If query is a query to export, desired
columns should be specified in the select portion of that query
(i.e. query = 'select col1, col2 from ...'). Defaults to None.
filename (str): Filename to copy to.
delimiter (str): Delimiter to separate columns with. Defaults to ','.
header (boolean): Specify True to return the column names. Defaults
to True.
null (str): Specifies the string that represents a null value.
Defaults to None, which uses the postgres default of an
unquoted empty string.
"""
copy_query = build_copy_query('export',query, columns=columns,
delimiter=delimiter,
header=header, null=null)
LOGGER.info('Running copy_expert with\n%s\nOutputting results to %s' %
(copy_query, filename))
with open(filename, 'w') as f:
LOGGER.info('Executing copy query\n%s' % copy_query)
self.cursor.copy_expert(copy_query, f)
def export_to_object(self, query, columns=None, delimiter=',', header=True,
null=None):
"""Export records from a table or query and returns list of records.
Args:
query (str): A select query or a table_name
columns (list): List of column names to export. columns should only
be provided if you are exporting a table
(i.e. query = 'table_name'). If query is a query to export, desired
columns should be specified in the select portion of that query
(i.e. query = 'select col1, col2 from ...'). Defaults to None.
delimiter (str): Delimiter to separate columns with. Defaults to ','
header (boolean): Specify True to return the column names. Defaults
to True.
Returns:
data (list): If header is True, returns list of dicts where each
dict is in the format {col1: val1, col2:val2, ...}. Otherwise,
returns a list of lists where each list is [val1, val2, ...].
Raises:
PostgrezExportError: If an error occurs while exporting to an object.
"""
copy_query = build_copy_query('export',query, columns=columns,
delimiter=delimiter,
header=header, null=null)
data = None
try:
LOGGER.info('Running copy_expert with with\n%s\nOutputting results to '
'list.' % copy_query)
# stream output to local object
text_stream = io.StringIO()
self.cursor.copy_expert(copy_query, text_stream)
output = text_stream.getvalue()
# parse output
output = output.split('\n')
cols = output[0].split(delimiter)
end_index = (-1 if len(output[1:]) > 1 else 2)
if header:
data = [{cols[i]:value for i, value in
enumerate(row.split(delimiter))}
for row in output[1:end_index]]
else:
data = [row.split(delimiter) for row in output[1:-1]]
except Exception as e:
raise PostgrezExportError('Unable to export to object. Error: %s'
% (e))
return data
|
import azure.cognitiveservices.speech as speechsdk
def get_text_from_input(input_audio_filename, speech_config):
# Creates an audio configuration that points to an audio file.
# Replace with your own audio filename.
audio_input = speechsdk.AudioConfig(filename=input_audio_filename)
# Creates a recognizer with the given settings
speech_recognizer = speechsdk.SpeechRecognizer(speech_config=speech_config, audio_config=audio_input)
print("Recognizing first result...")
# Starts speech recognition, and returns after a single utterance is recognized. The end of a
# single utterance is determined by listening for silence at the end or until a maximum of 15
# seconds of audio is processed. The task returns the recognition text as result.
# Note: Since recognize_once() returns only a single utterance, it is suitable only for single
# shot recognition like command or query.
# For long-running multi-utterance recognition, use start_continuous_recognition() instead.
result = speech_recognizer.recognize_once()
# result = speech_recognizer.start_continuous_recognition()
return result.text
|
import contextlib
from collections.abc import Callable, Hashable, Iterable, Iterator
from functools import partial
from itertools import chain
from types import SimpleNamespace
from typing import Any, Optional, TypeVar, Union
import attr
from .lfu import LFU
from .lru import LRU
__all__ = [
"lru_cache_with_key",
"lfu_cache_with_key",
"apply",
"SingleThreadPoolExecutor",
]
T = TypeVar("T")
S = TypeVar("S")
# TODO Add more cache replacement policy implementation
def cache_with_key(
key: Callable[..., Hashable], maxsize: Optional[int] = 128, policy: str = "LRU"
) -> Callable[[Callable[..., T]], Callable[..., T]]:
"""
It's like the builtin `functools.lru_cache`, except that it provides customization
space for the key calculating method and the cache replacement policy.
"""
@attr.s(auto_attribs=True)
class CacheInfo:
hit: int = 0
miss: int = 0
maxsize: int = 0
currsize: int = 0
class decorator:
def __init__(self, func: Callable[..., T]) -> None:
self._func = func
if policy == "LRU":
self._cache = LRU(maxsize=maxsize)
elif policy == "LFU":
self._cache = LFU(maxsize=maxsize)
else:
raise NotImplementedError
self._hit = self._miss = 0
__slots__ = ("_func", "_cache", "_hit", "_miss")
def __call__(self, *args: Any, **kwargs: Any) -> T:
arg_key = key(*args, **kwargs)
if arg_key in self._cache:
self._hit += 1
return self._cache[arg_key]
else:
self._miss += 1
result = self._func(*args, **kwargs)
self._cache[arg_key] = result
return result
@property
def __cache__(self) -> Union[LRU, LFU]:
return self._cache
def cache_info(self) -> CacheInfo:
return CacheInfo(self._hit, self._miss, maxsize, self._cache.size) # type: ignore
def clear_cache(self) -> None:
self._cache.clear()
return decorator
lru_cache_with_key = partial(cache_with_key, policy="LRU")
lru_cache_with_key.__doc__ = "It's like the builtin `functools.lru_cache`, except that it provides customization space for the key calculating method."
lfu_cache_with_key = partial(cache_with_key, policy="LFU")
lfu_cache_with_key.__doc__ = "It's like the builtin `functools.lru_cache`, except that it provides customization space for the key calculating method, and it uses LFU, not LRU, as cache replacement policy."
def apply(fn: Callable[..., T], *args: Any, **kwargs: Any) -> T:
""" Equivalent to Haskell's $ operator """
return fn(*args, **kwargs)
def concat(lists: Iterable[list[T]]) -> list[T]:
""" Concatenate multiple lists into one list """
return list(chain(*lists))
@contextlib.contextmanager
def SingleThreadPoolExecutor() -> Iterator[SimpleNamespace]:
"Return an equivalent to ThreadPoolExecutor(max_workers=1)"
yield SimpleNamespace(map=map, submit=apply, shutdown=nullfunc)
class compose:
""" Equivalent to Haskell's . operator """
def __init__(self, fn1: Callable[[T], S], fn2: Callable[..., T]) -> None:
self._fn1 = fn1
self._fn2 = fn2
__slots__ = ("_fn1", "_fn2")
def __call__(self, *args: Any, **kwargs: Any) -> S:
return self._fn1(self._fn2(*args, **kwargs))
def nullfunc(*_: Any, **__: Any) -> None:
""" A function that does nothing """
pass
|
"""
BOSH Client
-----------
Based on https://friendpaste.com/1R4PCcqaSWiBsveoiq3HSy
"""
import gzip
import socket
import base64
import httplib
import logging
import StringIO
from random import randint
from urlparse import urlparse
from xml.etree import ElementTree as ET
from puresasl.client import SASLClient
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
HTTPBIND_NS = 'http://jabber.org/protocol/httpbind'
BOSH_NS = 'urn:xmpp:xbosh'
XMPP_SASL_NS = 'urn:ietf:params:xml:ns:xmpp-sasl'
XMPP_BIND_NS = 'urn:ietf:params:xml:ns:xmpp-bind'
JABBER_CLIENT_NS = 'jabber:client'
JABBER_STREAMS_NS = 'http://etherx.jabber.org/streams'
XMPP_VERSION = '1.0'
BOSH_VERSION = '1.6'
BOSH_CONTENT = 'text/xml; charset=utf-8'
XML_LANG = 'en'
BOSH_WAIT = 60
BOSH_HOLD = 1
class BOSHClient(object):
def __init__(self, jid, password, bosh_service):
""" Initialize the client.
You must specify the Jabber ID, the corresponding password and the URL
of the BOSH service to connect to.
"""
self.log = logging.getLogger('conversejs.boshclient')
self.log.addHandler(NullHandler())
self._connection = None
self._sid = None
self.jid, self.to = jid.split('@')
self.password = password
self.bosh_service = urlparse(bosh_service)
self.rid = randint(0, 10000000)
self.log.debug('Init RID: %s' % self.rid)
self.headers = {
"Content-Type": "text/plain; charset=UTF-8",
"Accept": "text/xml",
"Accept-Encoding": "gzip, deflate"
}
self.server_auth = []
@property
def connection(self):
"""Returns an stablished connection"""
if self._connection:
return self._connection
self.log.debug('Initializing connection to %s' % (self.bosh_service.
netloc))
if self.bosh_service.scheme == 'http':
Connection = httplib.HTTPConnection
elif self.bosh_service.scheme == 'https':
Connection = httplib.HTTPSConnection
else:
# TODO: raise proper exception
raise Exception('Invalid URL scheme %s' % self.bosh_service.scheme)
self._connection = Connection(self.bosh_service.netloc, timeout=10)
self.log.debug('Connection initialized')
# TODO add exceptions handler there (URL not found etc)
return self._connection
def close_connection(self):
if not self._connection:
self.log.debug('Trying to close connection before initializing it.')
return
self.log.debug('Closing connection')
self.connection.close()
self.log.debug('Connection closed')
# TODO add execptions handler there
def get_body(self, sid_request=False):
body = ET.Element('body')
body.set('xmlns', HTTPBIND_NS)
if sid_request:
body.set('xmlns:xmpp', BOSH_NS)
body.set('wait', unicode(BOSH_WAIT))
body.set('hold', unicode(BOSH_HOLD))
body.set('content', BOSH_CONTENT)
body.set('ver', unicode(BOSH_VERSION))
body.set('xmpp:version', unicode(XMPP_VERSION))
body.set('xml:lang', "en")
body.set('to', self.to)
if self._sid:
body.set('sid', self.sid)
body.set('rid', str(self.rid))
return body
def send_request(self, xml_stanza):
ElementType = getattr(ET, '_Element', ET.Element)
if isinstance(xml_stanza, ElementType):
xml_stanza = ET.tostring(xml_stanza)
self.log.debug('XML_STANZA: %s', xml_stanza)
self.log.debug('Sending the request')
self.connection.request("POST", self.bosh_service.path,
xml_stanza, self.headers)
response = self.connection.getresponse()
self.log.debug('Response status code: %s' % response.status)
# Increment request id:
# http://xmpp.org/extensions/xep-0124.html#rids-syntax
self.rid += 1
if response.status == 200:
data = response.read()
if response.getheader('content-encoding', '').lower() == 'gzip':
buf = StringIO.StringIO(data)
f = gzip.GzipFile(fileobj=buf)
data = f.read()
else:
self.log.debug('Something wrong happened!')
return False
self.log.debug('DATA: %s', data)
return data
@property
def sid(self):
if self._sid:
return self._sid
return self.request_sid()
def request_sid(self):
""" Request a BOSH session according to
http://xmpp.org/extensions/xep-0124.html#session-request
Returns the new SID (str).
"""
if self._sid:
return self._sid
self.log.debug('Prepare to request BOSH session')
data = self.send_request(self.get_body(sid_request=True))
if not data:
return None
# This is XML. response_body contains the <body/> element of the
# response.
response_body = ET.fromstring(data)
# Get the remote Session ID
self._sid = response_body.get('sid')
self.log.debug('sid = %s' % self._sid)
# Get the longest time (s) that the XMPP server will wait before
# responding to any request.
self.server_wait = response_body.get('wait')
self.log.debug('wait = %s' % self.server_wait)
# Get the authid
self.authid = response_body.get('authid')
# Get the allowed authentication methods using xpath
search_for = '{{{0}}}features/{{{1}}}mechanisms/{{{2}}}mechanism'.format(
JABBER_STREAMS_NS, XMPP_SASL_NS, XMPP_SASL_NS
)
self.log.debug('Looking for "%s" into response body', search_for)
mechanisms = response_body.findall(search_for)
self.server_auth = []
for mechanism in mechanisms:
self.server_auth.append(mechanism.text)
self.log.debug('New AUTH method: %s' % mechanism.text)
if not self.server_auth:
self.log.debug(('The server didn\'t send the allowed '
'authentication methods'))
self._sid = None
return self._sid
def get_challenge(self, mechanism):
body = self.get_body()
auth = ET.SubElement(body, 'auth')
auth.set('xmlns', XMPP_SASL_NS)
auth.set('mechanism', mechanism)
resp_root = ET.fromstring(self.send_request(body))
challenge_node = resp_root.find('{{{0}}}challenge'.format(XMPP_SASL_NS))
if challenge_node is not None:
return challenge_node.text
return None
def send_challenge_response(self, response_plain):
"""Send a challenge response to server"""
# Get a basic stanza body
body = self.get_body()
# Create a response tag and add the response content on it
# using base64 encoding
response_node = ET.SubElement(body, 'response')
response_node.set('xmlns', XMPP_SASL_NS)
response_node.text = base64.b64encode(response_plain)
# Send the challenge response to server
resp_root = ET.fromstring(self.send_request(body))
return resp_root
def authenticate_xmpp(self):
"""Authenticate the user to the XMPP server via the BOSH connection."""
self.request_sid()
self.log.debug('Prepare the XMPP authentication')
# Instantiate a sasl object
sasl = SASLClient(
host=self.to,
service='xmpp',
username=self.jid,
password=self.password
)
# Choose an auth mechanism
sasl.choose_mechanism(self.server_auth, allow_anonymous=False)
# Request challenge
challenge = self.get_challenge(sasl.mechanism)
# Process challenge and generate response
response = sasl.process(base64.b64decode(challenge))
# Send response
resp_root = self.send_challenge_response(response)
success = self.check_authenticate_success(resp_root)
if success is None and\
resp_root.find('{{{0}}}challenge'.format(XMPP_SASL_NS)) is not None:
resp_root = self.send_challenge_response('')
return self.check_authenticate_success(resp_root)
return success
def check_authenticate_success(self, resp_root):
if resp_root.find('{{{0}}}success'.format(XMPP_SASL_NS)) is not None:
self.request_restart()
self.bind_resource()
return True
elif resp_root.find('{{{0}}}failure'.format(XMPP_SASL_NS)) is not None:
return False
return None
def bind_resource(self):
body = self.get_body()
iq = ET.SubElement(body, 'iq')
iq.set('id', 'bind_1')
iq.set('type', 'set')
iq.set('xmlns', JABBER_CLIENT_NS)
bind = ET.SubElement(iq, 'bind')
bind.set('xmlns', XMPP_BIND_NS)
self.send_request(body)
def request_restart(self):
body = self.get_body()
body.set('xmpp:restart', 'true')
body.set('xmlns:xmpp', BOSH_NS)
self.send_request(body)
def get_credentials(self):
try:
success = self.authenticate_xmpp()
except socket.error as error:
success = False
self.log.exception(error)
msg = 'Error trying to connect to bosh service: %s'
self.log.error(msg, self.bosh_service.netloc)
if not success:
return None, None, None
return u'{0}@{1}'.format(self.jid, self.to), self.sid, self.rid
if __name__ == '__main__':
import sys
if len(sys.argv) != 4:
print 'usage: {0} SERVICE_URL USERNAME PASSWORD'.format(sys.argv[0])
sys.exit(1)
c = BOSHClient(sys.argv[2], sys.argv[3], sys.argv[1])
print c.get_credentials()
c.close_connection()
|
from .group import Group
from .node import Node
__all__ = ["Node", "Group"]
|
# Найти произведение ряда чисел от 1 до 10. Полученный результат вывести на экран
final_mlt = 1
for number in range(1,11):
final_mlt = final_mlt * number
print('multiplication of {:*^d} numbers is {:*^d}'.format(number,final_mlt))
print('\nThe multiplication of all numbers from 1 to 10 is: {:d}'.format(final_mlt))
|
from django.contrib.auth import get_user_model
from django import forms
from django.template.defaultfilters import slugify
from djafforum.models import ForumCategory, Topic, Comment
User = get_user_model()
class CategoryForm(forms.ModelForm):
class Meta:
model = ForumCategory
exclude = ('slug', 'created_by')
def clean_title(self):
if ForumCategory.objects.filter(slug=slugify(self.cleaned_data['title'])).exclude(id=self.instance.id):
raise forms.ValidationError('Category with this Name already exists.')
return self.cleaned_data['title']
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user', None)
super(CategoryForm, self).__init__(*args, **kwargs)
def save(self, commit=True):
instance = super(CategoryForm, self).save(commit=False)
instance.created_by = self.user
instance.title = self.cleaned_data['title']
if str(self.cleaned_data['is_votable']) == 'True':
instance.is_votable = True
else:
instance.is_votable = False
if str(self.cleaned_data['is_active']) == 'True':
instance.is_active = True
else:
instance.is_active = False
if not self.instance.id:
instance.slug = slugify(self.cleaned_data['title'])
if commit:
instance.save()
return instance
class TopicForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user', None)
super(TopicForm, self).__init__(*args, **kwargs)
self.fields["category"].widget.attrs = {"class": "form-control select2"}
self.fields["title"].widget.attrs = {"class": "form-control"}
self.fields["tags"].widget.attrs = {"class": "form-control tags"}
tags = forms.CharField(required=False)
class Meta:
model = Topic
fields = ("title", "category", "description", "tags")
def clean_title(self):
if Topic.objects.filter(slug=slugify(self.cleaned_data['title'])).exclude(id=self.instance.id):
raise forms.ValidationError('Topic with this Name already exists.')
return self.cleaned_data['title']
def save(self, commit=True):
instance = super(TopicForm, self).save(commit=False)
instance.title = self.cleaned_data['title']
instance.description = self.cleaned_data['description']
instance.category = self.cleaned_data['category']
if not self.instance.id:
instance.slug = slugify(self.cleaned_data['title'])
instance.created_by = self.user
instance.status = 'Published'
if commit:
instance.save()
return instance
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ('comment', 'topic')
def clean_comment(self):
if self.cleaned_data['comment']:
return self.cleaned_data['comment']
raise forms.ValidationError('This field is required')
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user', None)
super(CommentForm, self).__init__(*args, **kwargs)
def save(self, commit=True):
instance = super(CommentForm, self).save(commit=False)
instance.comment = self.cleaned_data['comment']
instance.topic = self.cleaned_data['topic']
if not self.instance.id:
instance.commented_by = self.user
if 'parent' in self.cleaned_data.keys() and self.cleaned_data['parent']:
instance.parent = self.cleaned_data['parent']
if commit:
instance.save()
return instance
|
import argparse
import csv
import glob
import os
import sys
import random
import numpy
import torch
from bokeh import plotting
from PIL import Image
from scipy.cluster.vq import kmeans, whiten
from torchvision import transforms
def my_function(x):
t = [None] * 3
t[0] = x[0, :, :].add(10)
t[1] = x[1, :, :].mul(0.5)
t[2] = x[2, :, :].exp()
return torch.stack(t, dim=0)
def kmeans_anchors():
filenames = [os.path.splitext(f)[0] for f in glob.glob("data_train/*.jpg")]
jpg_files = [f + ".jpg" for f in filenames]
txt_files = [f + ".txt" for f in filenames]
fig = plotting.figure()
reso = 320
observation = [None] * len(jpg_files)
for i, (jpg, txt) in enumerate(zip(jpg_files, txt_files)):
# print(jpg)
image = Image.open(jpg)
ratio = reso / image.width
with open(txt, "r", encoding="utf-8", newline="") as csv_file:
lines = numpy.array([[int(x) for x in line[0:8]] for line in csv.reader(csv_file)])
wh = lines[:, [4, 5]] - lines[:, [0, 1]]
wh = wh * ratio
observation[i] = wh
observation = numpy.concatenate(observation, axis=0).astype(float)
centroids, distortion = kmeans(observation, 6, iter=100)
fig.cross(observation[:, 0], observation[:, 1], line_color="skyblue")
fig.circle(centroids[:, 0], centroids[:, 1], fill_color="orange")
print(centroids, distortion)
# plotting.show(fig)
def random_square():
filenames = [os.path.splitext(f)[0] for f in glob.glob("data_train/*.jpg")]
jpg_files = [f + ".jpg" for f in filenames]
txt_files = [f + ".txt" for f in filenames]
reso = 480
transform = transforms.Compose([transforms.Resize(reso), transforms.RandomCrop(reso)])
random.seed(0)
for jpg in random.sample(jpg_files, 10):
image = Image.open(jpg).convert("L")
if image.width < image.height:
new_size = (reso, int(image.height * reso / image.width))
crop_at = (0, random.randint(0, new_size[1] - reso))
else:
new_size = (int(image.width * reso / image.height), reso)
crop_at = (random.randint(0, new_size[0] - reso), 0)
crop_at = crop_at + (crop_at[0] + reso, crop_at[1] + reso)
image = image.resize(new_size).crop(crop_at)
image = transforms.ToTensor()(image)
print(image)
def move_to_data_valid():
poor_jpgs = random.sample(glob.glob("data_train/*.jpg"), 50)
for f in poor_jpgs:
new_name = "data_valid/" + os.path.basename(f)
os.rename(f, new_name)
if __name__ == "__main__":
move_to_data_valid()
|
# task 1 - "Fraction class"
# implement a "fraction" class, it should provide the following attributes:
# the class should offer suitable attributes to use the print and float functions. should ensure check that the values provided to _self_ are integers and respond with a suitable error message if they are not.
# inputs:
num = int(input("enter a number:"))
denom = int(input("enter a second number:"))
print("numerator value = ", num)
print("denome value = ", denom)
class Fraction: # implement a "fraction" class, it should provide the following attributes:
def __init__ (self, num, denom):
self.num = num # data - num(integer)
self.denom = denom # data - denom (integer)
defining math functions
def add(self, other):
return self.num + self.denom # addition (returns a new fraction representing the result of the addition)
def sub(self,other)
return self.num - self.denom # substraction (returns a new fraction representing the result of the substraction)
def multiply(self,other)
return self.num * self.denom # multiplication (returns a new fraction representing the result of the multiplication)
def divide(self,other)
return self.num / self.denom # division (returns a new fraction representing the result of the division)
def inverse(self,other)
inverse x!y
return self.num ! self.denom # inverse (returns a new fraction representing the result of the inverse)
try:
print(fraction)
except:
print("ValueError")
except:
print("NameError")
# instance of the fraction class
fraction = Fraction(2, 3)
fraction.add(Fraction(1,2))
# calling the math functions
add_fraction = fraction.add(self.num,self.denom)
minus_fraction =fraction.sub(self.num, self.denom)
multiply_fraction =fraction.multiply(self.num, self.denom)
division_fraction = fraction.divide(self.num, self.denom)
inverse_fraction = fraction.inverse(self.num, self.denom)
printing to output..
print("Sum of Addition:" .add())
print("Sum of Subtraction:" .sub())
print("Sum of Division:" .multiply())
print("Sum of Multiply:" .divide())
print("Sum of Inverse:" .inverse())
|
# coding:utf-8
from user import User
from privilege import Privileges
class Admin(User):
def __init__(self, first_name, last_name, **describes):
super().__init__(first_name, last_name, **describes)
self.privileges = ['add', 'delete', 'modify', 'inquire']
self.privilege = Privileges()
def show_privileges(self):
for privilege in self.privileges:
print("You have " + privilege + 'Authority' + '.')
new_user = Admin('liu', 'hanyu')
new_user.privilege.show_privileges()
|
import json
import unittest
from unittest import mock
from hexbytes.main import HexBytes
from web3.utils.datastructures import AttributeDict
from pyetheroll.constants import ChainID
from pyetheroll.transaction_debugger import (TransactionDebugger,
decode_contract_call)
class TestTransactionDebugger(unittest.TestCase):
def test_decode_method_log1(self):
"""
Trying to decode a `Log1()` event call.
"""
# simplified contract ABI for tests
contract_abi = [
{'inputs': [], 'type': 'constructor', 'payable': False},
{'payable': False, 'type': 'fallback'},
{'inputs': [
{'indexed': False, 'type': 'address', 'name': 'sender'},
{'indexed': False, 'type': 'bytes32', 'name': 'cid'},
{'indexed': False, 'type': 'uint256', 'name': 'timestamp'},
{'indexed': False, 'type': 'string', 'name': 'datasource'},
{'indexed': False, 'type': 'string', 'name': 'arg'},
{'indexed': False, 'type': 'uint256', 'name': 'gaslimit'},
{'indexed': False, 'type': 'bytes1', 'name': 'proofType'},
{'indexed': False, 'type': 'uint256', 'name': 'gasPrice'}],
'type': 'event', 'name': 'Log1', 'anonymous': False},
{'inputs': [
{'indexed': False, 'type': 'address', 'name': 'sender'},
{'indexed': False, 'type': 'bytes32', 'name': 'cid'},
{'indexed': False, 'type': 'uint256', 'name': 'timestamp'},
{'indexed': False, 'type': 'string', 'name': 'datasource'},
{'indexed': False, 'type': 'string', 'name': 'arg1'},
{'indexed': False, 'type': 'string', 'name': 'arg2'},
{'indexed': False, 'type': 'uint256', 'name': 'gaslimit'},
{'indexed': False, 'type': 'bytes1', 'name': 'proofType'},
{'indexed': False, 'type': 'uint256', 'name': 'gasPrice'}],
'type': 'event', 'name': 'Log2', 'anonymous': False},
{'inputs': [
{'indexed': False, 'type': 'address', 'name': 'sender'},
{'indexed': False, 'type': 'bytes32', 'name': 'cid'},
{'indexed': False, 'type': 'uint256', 'name': 'timestamp'},
{'indexed': False, 'type': 'string', 'name': 'datasource'},
{'indexed': False, 'type': 'bytes', 'name': 'args'},
{'indexed': False, 'type': 'uint256', 'name': 'gaslimit'},
{'indexed': False, 'type': 'bytes1', 'name': 'proofType'},
{'indexed': False, 'type': 'uint256', 'name': 'gasPrice'}],
'type': 'event', 'name': 'LogN', 'anonymous': False}]
topics = [HexBytes(
'b76d0edd90c6a07aa3ff7a222d7f5933e29c6acc660c059c97837f05c4ca1a84'
)]
log_data = (
"0x"
"000000000000000000000000fe8a5f3a7bb446e1cb4566717691cd3139289ed4"
"b0230ab70b78e47050766089ea333f2ff7ad41c6f31e8bed8c2acfcb8e911841"
"0000000000000000000000000000000000000000000000000000000000000000"
"0000000000000000000000000000000000000000000000000000000000000100"
"0000000000000000000000000000000000000000000000000000000000000140"
"00000000000000000000000000000000000000000000000000000000000395f8"
"1100000000000000000000000000000000000000000000000000000000000000"
"00000000000000000000000000000000000000000000000000000004a817c800"
"0000000000000000000000000000000000000000000000000000000000000006"
"6e65737465640000000000000000000000000000000000000000000000000000"
"00000000000000000000000000000000000000000000000000000000000001b4"
"5b55524c5d205b276a736f6e2868747470733a2f2f6170692e72616e646f6d2e"
"6f72672f6a736f6e2d7270632f312f696e766f6b65292e726573756c742e7261"
"6e646f6d5b2273657269616c4e756d626572222c2264617461225d272c20275c"
"6e7b226a736f6e727063223a22322e30222c226d6574686f64223a2267656e65"
"726174655369676e6564496e746567657273222c22706172616d73223a7b2261"
"70694b6579223a247b5b646563727970745d20424b6733544373376c6b7a4e72"
"316b523670786a50434d32534f656a63466f6a55504d544f73426b432f343748"
"485066317350326f78564c546a4e42752b736c523953675a797144746a564f56"
"35597a67313269556b62756270304470636a434564654a54486e477743366744"
"3732394755566f47766f393668757877526f5a6c436a594f3830725771325747"
"596f522f4c433357616d704475767632426f3d7d2c226e223a312c226d696e22"
"3a312c226d6178223a3130302c227265706c6163656d656e74223a747275652c"
"2262617365223a3130247b5b6964656e746974795d20227d227d2c226964223a"
"31247b5b6964656e746974795d20227d227d275d000000000000000000000000")
transaction_debugger = TransactionDebugger(contract_abi)
decoded_method = transaction_debugger.decode_method(topics, log_data)
# TODO: simplify that arg call for unit testing
self.assertEqual(
decoded_method['call'],
{'arg': bytes(
'[URL] [\'json(https://api.random.org/json-rpc/1/invoke).resul'
't.random["serialNumber","data"]\', \'\\n{"jsonrpc":"2.0","met'
'hod":"generateSignedIntegers","params":{"apiKey":${[decrypt] '
'BKg3TCs7lkzNr1kR6pxjPCM2SOejcFojUPMTOsBkC/47HHPf1sP2oxVLTjNBu'
'+slR9SgZyqDtjVOV5Yzg12iUkbubp0DpcjCEdeJTHnGwC6gD729GUVoGvo96h'
'uxwRoZlCjYO80rWq2WGYoR/LC3WampDuvv2Bo=},"n":1,"min":1,"max":1'
'00,"replacement":true,"base":10${[identity] "}"},"id":1${[ide'
'ntity] "}"}\']', "utf8"),
'cid': (
b'\xb0#\n\xb7\x0bx\xe4pPv`\x89\xea3?/\xf7\xadA\xc6\xf3\x1e'
b'\x8b\xed\x8c*\xcf\xcb\x8e\x91\x18A'),
'datasource': b'nested',
'gasPrice': 20000000000,
'gaslimit': 235000,
'proofType': b'\x11',
'sender': '0xfe8a5f3a7bb446e1cb4566717691cd3139289ed4',
'timestamp': 0}
)
self.assertEqual(
decoded_method['method_info']['definition'],
'Log1(address,bytes32,uint256,string,string,uint256,bytes1,uint256)')
self.assertEqual(
decoded_method['method_info']['sha3'].hex(),
'0xb76d0edd90c6a07aa3ff7a222d7f5933e29c6acc660c059c97837f05c4ca1a84')
def test_decode_method_log_bet(self):
"""
Trying to decode a `LogBet()` event call.
"""
# simplified contract ABI
contract_abi = [
{
'inputs': [{'type': 'uint256', 'name': 'newMaxProfitAsPercent'}],
'constant': False, 'name': 'ownerSetMaxProfitAsPercentOfHouse',
'outputs': [], 'stateMutability': 'nonpayable',
'payable': False, 'type': 'function'},
{
'inputs': [], 'constant': True, 'name': 'treasury',
'outputs': [{'type': 'address', 'name': ''}],
'stateMutability': 'view', 'payable': False, 'type': 'function'},
{
'inputs': [], 'constant': True, 'name': 'totalWeiWagered',
'outputs': [{'type': 'uint256', 'name': ''}],
'stateMutability': 'view', 'payable': False, 'type': 'function'},
{
'inputs': [{'type': 'uint256', 'name': 'newMinimumBet'}],
'constant': False, 'name': 'ownerSetMinBet',
'outputs': [], 'stateMutability': 'nonpayable',
'payable': False, 'type': 'function'
},
{
'stateMutability': 'nonpayable',
'inputs': [],
'type': 'constructor',
'payable': False
},
{'stateMutability': 'payable', 'payable': True, 'type': 'fallback'},
{
'inputs': [
{'indexed': True, 'type': 'bytes32', 'name': 'BetID'},
{'indexed': True, 'type': 'address', 'name': 'PlayerAddress'},
{'indexed': True, 'type': 'uint256', 'name': 'RewardValue'},
{'indexed': False, 'type': 'uint256', 'name': 'ProfitValue'},
{'indexed': False, 'type': 'uint256', 'name': 'BetValue'},
{'indexed': False, 'type': 'uint256', 'name': 'PlayerNumber'}],
'type': 'event', 'name': 'LogBet', 'anonymous': False},
]
topics = [
HexBytes(
'1cb5bfc4e69cbacf65c8e05bdb84d7a327bd6bb4c034ff82359aefd7443775c4'
),
HexBytes(
'b0230ab70b78e47050766089ea333f2ff7ad41c6f31e8bed8c2acfcb8e911841'
),
HexBytes(
'00000000000000000000000066d4bacfe61df23be813089a7a6d1a749a5c936a'
),
HexBytes(
'000000000000000000000000000000000000000000000000016a98b78c556c34'
),
]
log_data = (
'0x'
'0000000000000000000000000000000000000000000000000007533f2ecb6c34'
'000000000000000000000000000000000000000000000000016345785d8a0000'
'0000000000000000000000000000000000000000000000000000000000000062')
transaction_debugger = TransactionDebugger(contract_abi)
decoded_method = transaction_debugger.decode_method(topics, log_data)
self.assertEqual(
decoded_method['call'],
{'BetID': (
b'\xb0#\n\xb7\x0bx\xe4pPv`\x89\xea3?/\xf7\xadA\xc6\xf3\x1e\x8b'
b'\xed\x8c*\xcf\xcb\x8e\x91\x18A'),
'BetValue': 100000000000000000,
'PlayerAddress':
'0x66d4bacfe61df23be813089a7a6d1a749a5c936a',
'PlayerNumber': 98,
'ProfitValue': 2061855670103092,
'RewardValue': 102061855670103092})
self.assertEqual(
decoded_method['method_info']['definition'],
'LogBet(bytes32,address,uint256,uint256,uint256,uint256)')
self.assertEqual(
decoded_method['method_info']['sha3'].hex(),
'0x1cb5bfc4e69cbacf65c8e05bdb84d7a327bd6bb4c034ff82359aefd7443775c4')
def test_decode_contract_call(self):
"""
Uses actual data from:
https://etherscan.io/tx/
0xf7b7196ca9eab6e4fb6e7bce81aeb25a4edf04330e57b3c15bece9d260577e2b
In its simplified form for tests.
"""
json_abi = (
'[{"constant":false,"inputs":[{"name":"_to","type":"address"},{"na'
'me":"_value","type":"uint256"}],"name":"transfer","outputs":[{"na'
'me":"success","type":"bool"}],"payable":false,"type":"function"}]'
)
contract_abi = json.loads(json_abi)
call_data = (
'a9059cbb00000000000000000000000067fa2c06c9c6d4332f330e14a66bdf18'
'73ef3d2b0000000000000000000000000000000000000000000000000de0b6b3'
'a7640000')
method_name, args = decode_contract_call(contract_abi, call_data)
self.assertEqual(method_name, 'transfer')
self.assertEqual(
args,
['0x67fa2c06c9c6d4332f330e14a66bdf1873ef3d2b', 1000000000000000000]
)
def test_decode_contract_call_callback(self):
"""
Decode `__callback()` method call.
Uses actual data from:
https://etherscan.io/tx/
0xf6d291b2de12db618aafc5fd9f40a37384b4a7ac41d14463a1d707a4f43137c3
In its simplified form for tests.
"""
contract_abi = [
{
"constant": False,
"inputs": [
{"name": "myid", "type": "bytes32"},
{"name": "result", "type": "string"}
],
"name": "__callback", "outputs": [], "payable": False,
"stateMutability": "nonpayable", "type": "function"
},
{
"constant": False,
"inputs": [
{"name": "myid", "type": "bytes32"},
{"name": "result", "type": "string"},
{"name": "proof", "type": "bytes"}
],
"name": "__callback", "outputs": [], "payable": False,
"stateMutability": "nonpayable", "type": "function"
}
]
call_data = (
'38bbfa5010369b11d06269122229ec4088d4bf42fbf629b0d40432ffc40cc638'
'd938f1e800000000000000000000000000000000000000000000000000000000'
'0000006000000000000000000000000000000000000000000000000000000000'
'0000008000000000000000000000000000000000000000000000000000000000'
'0000000000000000000000000000000000000000000000000000000000000000'
'000000221220ba7237d9ed277fdd4bf2b358049b1c5e971b2bc5fa0edd47b334'
'5d3890e415fc0000000000000000000000000000000000000000000000000000'
'00000000')
method_name, args = decode_contract_call(contract_abi, call_data)
self.assertEqual(method_name, '__callback')
myid = bytes.fromhex(
'10369b11d06269122229ec4088d4bf42fbf629b0d40432ffc40cc638d938f1e8')
result = b''
proof = bytes.fromhex(
'1220ba7237d9ed277fdd4bf2b358049b1c5e971b2bc5fa0edd47b3345d3890e4'
'15fc')
self.assertEqual(
args,
[
myid,
result,
proof,
]
)
def m_get_abi(self, instance):
"""
Mocked version of `web3.contract.Contract.get_abi()`.
"""
# retrieves the original contract address
address = instance.url_dict[instance.ADDRESS]
abi1 = (
'[{"anonymous":false,"inputs":[{"indexed":false,"name":"sender","t'
'ype":"address"},{"indexed":false,"name":"cid","type":"bytes32"},{'
'"indexed":false,"name":"timestamp","type":"uint256"},{"indexed":f'
'alse,"name":"datasource","type":"string"},{"indexed":false,"name"'
':"arg","type":"string"},{"indexed":false,"name":"gaslimit","type"'
':"uint256"},{"indexed":false,"name":"proofType","type":"bytes1"},'
'{"indexed":false,"name":"gasPrice","type":"uint256"}],"name":"Log'
'1","type":"event"}]')
abi2 = (
'[{"anonymous":false,"inputs":[{"indexed":true,"name":"BetID","typ'
'e":"bytes32"},{"indexed":true,"name":"PlayerAddress","type":"addr'
'ess"},{"indexed":true,"name":"RewardValue","type":"uint256"},{"in'
'dexed":false,"name":"ProfitValue","type":"uint256"},{"indexed":fa'
'lse,"name":"BetValue","type":"uint256"},{"indexed":false,"name":"'
'PlayerNumber","type":"uint256"}],"name":"LogBet","type":"event"}]'
)
if address.lower() == '0xcbf1735aad8c4b337903cd44b419efe6538aab40':
return abi1
elif address.lower() == '0xfe8a5f3a7bb446e1cb4566717691cd3139289ed4':
return abi2
return None
def test_decode_transaction_logs(self):
"""
Mocking `web3.eth.Eth.getTransactionReceipt()` response and verifies
decoding transaction works as expected.
"""
mocked_logs = [
AttributeDict({
'address': '0xCBf1735Aad8C4B337903cD44b419eFE6538aaB40',
'topics': [
HexBytes(
'b76d0edd90c6a07aa3ff7a222d7f5933'
'e29c6acc660c059c97837f05c4ca1a84'
)
],
'data':
'000000000000000000000000fe8a5f3a7bb446e1cb4566717691cd3139289ed4'
'b0230ab70b78e47050766089ea333f2ff7ad41c6f31e8bed8c2acfcb8e911841'
'0000000000000000000000000000000000000000000000000000000000000000'
'0000000000000000000000000000000000000000000000000000000000000100'
'0000000000000000000000000000000000000000000000000000000000000140'
'00000000000000000000000000000000000000000000000000000000000395f8'
'1100000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000004a817c800'
'0000000000000000000000000000000000000000000000000000000000000006'
'6e65737465640000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000001b4'
'5b55524c5d205b276a736f6e2868747470733a2f2f6170692e72616e646f6d2e'
'6f72672f6a736f6e2d7270632f312f696e766f6b65292e726573756c742e7261'
'6e646f6d5b2273657269616c4e756d626572222c2264617461225d272c20275c'
'6e7b226a736f6e727063223a22322e30222c226d6574686f64223a2267656e65'
'726174655369676e6564496e746567657273222c22706172616d73223a7b2261'
'70694b6579223a247b5b646563727970745d20424b6733544373376c6b7a4e72'
'316b523670786a50434d32534f656a63466f6a55504d544f73426b432f343748'
'485066317350326f78564c546a4e42752b736c523953675a797144746a564f56'
'35597a67313269556b62756270304470636a434564654a54486e477743366744'
'3732394755566f47766f393668757877526f5a6c436a594f3830725771325747'
'596f522f4c433357616d704475767632426f3d7d2c226e223a312c226d696e22'
'3a312c226d6178223a3130302c227265706c6163656d656e74223a747275652c'
'2262617365223a3130247b5b6964656e746974795d20227d227d2c226964223a'
'31247b5b6964656e746974795d20227d227d275d000000000000000000000000',
}),
AttributeDict({
'address': '0xFE8a5f3a7Bb446e1cB4566717691cD3139289ED4',
'topics': [
HexBytes(
'1cb5bfc4e69cbacf65c8e05bdb84d7a3'
'27bd6bb4c034ff82359aefd7443775c4'),
HexBytes(
'b0230ab70b78e47050766089ea333f2f'
'f7ad41c6f31e8bed8c2acfcb8e911841'),
HexBytes(
'00000000000000000000000066d4bacf'
'e61df23be813089a7a6d1a749a5c936a'),
HexBytes(
'00000000000000000000000000000000'
'0000000000000000016a98b78c556c34')
],
'data':
'0000000000000000000000000000000000000000000000000007533f2ecb6c34'
'000000000000000000000000000000000000000000000000016345785d8a0000'
'0000000000000000000000000000000000000000000000000000000000000062',
})
]
chain_id = ChainID.ROPSTEN
transaction_hash = (
"0x330df22df6543c9816d80e582a4213b1fc11992f317be71775f49c3d853ed5be")
with \
mock.patch('web3.eth.Eth.getTransactionReceipt') \
as m_getTransactionReceipt, \
mock.patch(
'etherscan.contracts.Contract.get_abi',
side_effect=self.m_get_abi, autospec=True):
m_getTransactionReceipt.return_value.logs = mocked_logs
decoded_methods = TransactionDebugger.decode_transaction_logs(
chain_id, transaction_hash)
self.assertEqual(len(decoded_methods), 2)
decoded_method = decoded_methods[0]
self.assertEqual(
decoded_method['method_info']['definition'],
'Log1(address,bytes32,uint256,string,string,uint256,bytes1,uint256)'
)
decoded_method = decoded_methods[1]
self.assertEqual(
decoded_method['method_info']['definition'],
'LogBet(bytes32,address,uint256,uint256,uint256,uint256)'
)
|
from matplotlib import pyplot as plt
import cv2
import matplotlib
import os
import random
import torch
from torch.autograd import Variable
import torchvision.transforms as standard_transforms
import misc.transforms as own_transforms
import pandas as pd
import glob
from models.CC import CrowdCounter
from config import cfg
from misc.utils import *
import scipy.io as sio
from PIL import Image, ImageOps
torch.cuda.set_device(0)
torch.backends.cudnn.benchmark = True
exp_name = '../SHHB_results'
if not os.path.exists(exp_name):
os.mkdir(exp_name)
if not os.path.exists(exp_name+'/pred'):
os.mkdir(exp_name+'/pred')
mean_std = ([0.452016860247, 0.447249650955, 0.431981861591],[0.23242045939, 0.224925786257, 0.221840232611])
img_transform = standard_transforms.Compose([
standard_transforms.ToTensor(),
standard_transforms.Normalize(*mean_std)
])
restore = standard_transforms.Compose([
own_transforms.DeNormalize(*mean_std),
standard_transforms.ToPILImage()
])
pil_to_tensor = standard_transforms.ToTensor()
dataRoot = 'test_data'
data_save_path = '/home/stone/ai/mldata/crowd_density_counting/output/SHHB'
model_path = '/home/stone/ai/mldata/crowd_density_counting/ckpt/05-ResNet-50_all_ep_35_mae_32.4_mse_76.1.pth'
def main():
file_list = glob.glob(os.path.join(dataRoot,"*.jpeg"))
test(file_list, model_path)
def test(file_list, model_path):
net = CrowdCounter(cfg.GPU_ID,cfg.NET)
net.load_state_dict(torch.load(model_path))
net.cuda()
net.eval()
f1 = plt.figure(1)
preds = []
for filename in file_list:
print(filename)
imgname = filename
filename_no_ext = os.path.splitext(os.path.basename(filename))[0]
img = Image.open(imgname)
if img.mode == 'L':
img = img.convert('RGB')
img = img_transform(img)
with torch.no_grad():
img = Variable(img[None,:,:,:]).cuda()
pred_map = net.test_forward(img)
data = pred_map.squeeze().cpu().numpy()
print(np.min(data),np.max(data))
cv2.imwrite(os.path.join(data_save_path,filename_no_ext+".jpg"),data)
pred_map = pred_map.cpu().data.numpy()[0,0,:,:]
pred = np.sum(pred_map)/100.0
print('{}:pred={}, pred_map={}'.format(filename_no_ext, pred, pred_map))
pred_map = pred_map/np.max(pred_map+1e-20)
pred_frame = plt.gca()
plt.imshow(pred_map, 'jet')
pred_frame.axes.get_yaxis().set_visible(False)
pred_frame.axes.get_xaxis().set_visible(False)
pred_frame.spines['top'].set_visible(False)
pred_frame.spines['bottom'].set_visible(False)
pred_frame.spines['left'].set_visible(False)
pred_frame.spines['right'].set_visible(False)
plt.savefig(data_save_path+'/'+filename_no_ext+'_pred_'+str(float(pred))+'.png',\
bbox_inches='tight',pad_inches=0,dpi=150)
plt.close()
if __name__ == '__main__':
main()
|
"""Subpackage logs."""
from . import log_operator
|
topic_model.visualize_barchart(topics=[0,1,2,3,4,5])
|
'''program to estimate the generalization error from a variety of AVMs
Determine accuracy on validation set YYYYMM of various hyperparameter setting
for elastic net.
INVOCATION
python val.py YYYYMM [-test]
INPUT FILE:
WORKING/samples-train-validate.csv
OUTPUT FILE:
WORKING/linval/YYYYMM.pickle
'''
from __future__ import division
import collections
import cPickle as pickle
import numpy as np
import os
import pandas as pd
import pdb
from pprint import pprint
import random
import sys
import AVM
from Bunch import Bunch
from columns_contain import columns_contain
import layout_transactions
from Logger import Logger
from ParseCommandLine import ParseCommandLine
from Path import Path
# from TimeSeriesCV import TimeSeriesCV
cc = columns_contain
def usage(msg=None):
print __doc__
if msg is not None:
print msg
sys.exit(1)
def make_control(argv):
# return a Bunch
print argv
if not(2 <= len(argv) <= 3):
usage('invalid number of arguments')
pcl = ParseCommandLine(argv)
arg = Bunch(
base_name='linval',
yyyymm=argv[1],
test=pcl.has_arg('--test'),
)
try:
arg.yyyymm = int(arg.yyyymm)
except:
usage('YYYYMM not an integer')
random_seed = 123
random.seed(random_seed)
dir_working = Path().dir_working()
debug = False
out_file_name = (
('test-' if arg.test else '') +
'%s.pickle' % arg.yyyymm
)
# assure output directory exists
dir_path = dir_working + arg.base_name + '/'
if not os.path.exists(dir_path):
os.makedirs(dir_path)
return Bunch(
arg=arg,
debug=debug,
path_in=dir_working + 'samples-train-validate.csv',
path_out=dir_path + out_file_name,
random_seed=random_seed,
test=arg.test,
)
ResultKey = collections.namedtuple('ResultKey',
'n_months_back alpha l1_ratio units_X units_y yyyymm',
)
ResultValue = collections.namedtuple('ResultValue',
'actuals predictions',
)
def do_linval(control, samples):
'run grid search on elastic net and random forest models'
pdb.set_trace()
# HP settings to test
# common across models
n_months_back_seq = (1, 2, 3, 4, 5, 6)
# for ElasticNet
alpha_seq = (0.0, 0.1, 0.3, 1.0, 3.0), # multiplies the penalty term
l1_ratio_seq = (0.0, 0.25, 0.50, 0.75, 1.0), # 0 ==> L2 penalty, 1 ==> L1 penalty
units_X_seq = ('natural', 'log')
units_y_seq = ('natural', 'log')
result = {}
def run(n_months_back, n_estimators, alpha, l1_ratio, units_X, units_y):
pdb.set_trace()
avm = AVM.AVM(
model_name='ElasticNet',
forecast_time_period=control.arg.yyyymm,
n_months_back=n_months_back,
random_state=control.random_seed,
alpha=alpha,
l1_ratio=l1_ratio,
units_X=units_X,
units_y=units_y,
)
avm.fit(samples)
mask = samples[layout_transactions.yyyymm] == control.arg.yyyymm
samples_yyyymm = samples[mask]
predictions = avm.predict(samples_yyyymm)
actuals = samples_yyyymm[layout_transactions.price]
result_key = ResultKey(n_months_back, alpha, l1_ratio, units_X, units_y, control.arg.yyyymm)
print result_key
result[result_key] = ResultValue(actuals, predictions)
for n_months_back in n_months_back_seq:
for alpha in alpha_seq:
for l1_ratio in l1_ratio_seq:
for units_X in units_X_seq:
for units_y in units_y_seq:
run(n_months_back, alpha, l1_ratio, units_X, units_y)
return result
def main(argv):
control = make_control(argv)
if False:
# avoid error in sklearn that requires flush to have no arguments
sys.stdout = Logger(base_name=control.arg.base_name)
print control
samples = pd.read_csv(
control.path_in,
nrows=1000 if control.test else None,
)
print 'samples.shape', samples.shape
result = do_linval(control, samples)
with open(control.path_out, 'wb') as f:
pickle.dump((result, control), f)
print control
if control.test:
print 'DISCARD OUTPUT: test'
print 'done'
return
if __name__ == '__main__':
if False:
# avoid pyflakes warnings
pdb.set_trace()
pprint()
pd.DataFrame()
np.array()
main(sys.argv)
|
from django.contrib import admin
from reports.models import Machine, MunkiReport
class MachineAdmin(admin.ModelAdmin):
list_display = ('hostname', 'mac', 'username', 'last_munki_update',
'last_inventory_update')
class MunkiReportAdmin(admin.ModelAdmin):
list_display = ('hostname', 'mac', 'timestamp', 'errors', 'warnings')
admin.site.register(Machine, MachineAdmin)
admin.site.register(MunkiReport, MunkiReportAdmin)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import syft as sy
# Set everything up
hook = sy.TorchHook(torch)
alice = sy.VirtualWorker(id="alice", hook=hook)
bob = sy.VirtualWorker(id="bob", hook=hook)
james = sy.VirtualWorker(id="james", hook=hook)
# A Toy Dataset
data = torch.tensor([[0,0],[0,1],[1,0],[1,1.]])
target = torch.tensor([[0],[0],[1],[1.]])
# A Toy Model
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(2, 2)
self.fc2 = nn.Linear(2, 1)
def forward(self, x):
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
return x
model = Net()
# We encode everything
data = data.fix_precision().share(bob, alice, crypto_provider=james, requires_grad=True)
target = target.fix_precision().share(bob, alice, crypto_provider=james, requires_grad=True)
model = model.fix_precision().share(bob, alice, crypto_provider=james, requires_grad=True)
print(data)
opt = optim.SGD(params=model.parameters(),lr=0.1).fix_precision()
for iter in range(20):
# 1) erase previous gradients (if they exist)
opt.zero_grad()
# 2) make a prediction
pred = model(data)
# 3) calculate how much we missed
loss = ((pred - target)**2).sum()
# 4) figure out which weights caused us to miss
loss.backward()
# 5) change those weights
opt.step()
# 6) print our progress
print(loss.get().float_precision())
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
#os.environ["PYOPENGL_PLATFORM"] = "egl"
import numpy as np
from scipy.spatial.transform import Rotation as R
import pyrender
import trimesh
import pterotactyl.objects as objects
from pterotactyl.utility import utils
from random import randrange
HAND_COLOUR = [119, 136, 153, 255]
DIGIT_COLOUR = [119, 225, 153, 175]
class Renderer:
def __init__(self, hand, pb, cameraResolution=[256, 256]):
self.scene = self.init_scene()
self.hand = hand
self.pb = pb
self.hand_nodes = []
self.object_nodes = []
self.init_camera()
self.init_hand()
self.update_hand()
self.r = pyrender.OffscreenRenderer(cameraResolution[0], cameraResolution[1])
# scene is initialized with fixed lights, this can be easily changed to match the desired environment
def init_scene(self):
scene = pyrender.Scene(ambient_light=[0.3, 0.3, 0.3])
light_pose = utils.euler2matrix(
angles=[0, 0, 0], translation=[0, -0.8, 0.3], xyz="xyz", degrees=False
)
light = pyrender.PointLight(color=np.ones(3), intensity=1.0)
scene.add(light, pose=light_pose)
light_pose = utils.euler2matrix(
angles=[0, 0, 0], translation=[0, 0.8, 0.3], xyz="xyz", degrees=False
)
light = pyrender.PointLight(color=np.ones(3), intensity=1.0)
scene.add(light, pose=light_pose)
light_pose = utils.euler2matrix(
angles=[0, 0, 0], translation=[-1, 0, 1], xyz="xyz", degrees=False
)
light = pyrender.PointLight(color=np.ones(3), intensity=1.0)
scene.add(light, pose=light_pose)
light_pose = utils.euler2matrix(
angles=[0, 0, 0], translation=[1, 0, 1], xyz="xyz", degrees=False
)
light = pyrender.PointLight(color=np.ones(3), intensity=1.0)
scene.add(light, pose=light_pose)
return scene
def init_camera(self):
# initializes the camera parameters
camera = pyrender.PerspectiveCamera(
yfov=60.0 / 180.0 * np.pi, znear=0.01, zfar=10.0, aspectRatio=1.0
)
camera_pose = utils.euler2matrix(
xyz="xyz", angles=[0, 0, 0], translation=[0, 0, 0], degrees=True
)
camera_node = pyrender.Node(camera=camera, matrix=camera_pose)
self.scene.add_node(camera_node)
self.scene.main_camera_node = camera_node
self.camera = camera_node
# this viewpoint is used in the paper
# if you change this, you will need to update the camaera parameter matrix in the reconstruction model as well
initial_matrix = R.from_euler("xyz", [45.0, 0, 270.0], degrees=True).as_matrix()
self.update_camera_pose([-0.3, 0, 0.3], initial_matrix)
def add_object(
self,
mesh,
position=[0, 0, 0],
orientation=[0, 0, 0],
colour=[228, 217, 111, 255],
):
mesh.visual.vertex_colors = colour
mesh = pyrender.Mesh.from_trimesh(mesh)
pose = utils.euler2matrix(angles=orientation, translation=position)
obj_node = pyrender.Node(mesh=mesh, matrix=pose)
self.scene.add_node(obj_node)
self.object_nodes.append(obj_node)
# defines the hand in the scene
def init_hand(self):
hand_location = os.path.join(
os.path.dirname(objects.__file__), "hand/meshes_obj/"
)
base_obj = trimesh.load(hand_location + "0_base.obj")
base_obj = trimesh.Trimesh(vertices=base_obj.vertices, faces=base_obj.faces)
base_obj.visual.vertex_colors = HAND_COLOUR
self.add_hand_obj(base_obj)
for _ in range(3):
for i in range(1, 5):
element = trimesh.load(hand_location + f"{i}_finger.obj")
element = trimesh.Trimesh(
vertices=element.vertices, faces=element.faces
)
element.visual.vertex_colors = HAND_COLOUR
self.add_hand_obj(element)
element = trimesh.load(hand_location + "5_digit.obj")
element = trimesh.Trimesh(vertices=element.vertices, faces=element.faces)
element.visual.vertex_colors = DIGIT_COLOUR
self.add_hand_obj(element)
for i in range(6, 10):
element = trimesh.load(hand_location + f"{i}_thumb.obj")
element = trimesh.Trimesh(vertices=element.vertices, faces=element.faces)
element.visual.vertex_colors = HAND_COLOUR
self.add_hand_obj(element)
element = trimesh.load(hand_location + "5_digit.obj")
element = trimesh.Trimesh(vertices=element.vertices, faces=element.faces)
element.visual.vertex_colors = DIGIT_COLOUR
self.add_hand_obj(element)
def add_hand_obj(self, obj_location):
mesh = pyrender.Mesh.from_trimesh(obj_location)
pose = utils.euler2matrix(angles=[0, 0, 0], translation=[0, 0, 0])
obj_node = pyrender.Node(mesh=mesh, matrix=pose)
self.scene.add_node(obj_node)
self.hand_nodes.append(obj_node)
# gets the various hand element's position and orientation and uses them to update the hand in the scene
def update_hand(self):
# base of the hand
position, orientation = self.pb.getBasePositionAndOrientation(self.hand)
orientation = self.pb.getEulerFromQuaternion(orientation)
pose = utils.euler2matrix(angles=orientation, translation=position)
self.scene.set_pose(self.hand_nodes[0], pose=pose)
indices = [
0,
1,
2,
3,
4,
7,
8,
9,
10,
11,
14,
15,
16,
17,
18,
21,
22,
23,
24,
25,
]
# all other elements
for node, index in zip(self.hand_nodes[1:], indices):
position, orientation = self.pb.getLinkState(self.hand, index)[:2]
orientation = self.pb.getEulerFromQuaternion(orientation)
pose = utils.euler2matrix(angles=orientation, translation=position)
self.scene.set_pose(node, pose=pose)
# moves the hand our of the perspective of the camera
def remove_hand(self):
for node in self.hand_nodes:
pose = utils.euler2matrix(angles=[0, 0, 0], translation=[0, 0, -10.0])
self.scene.set_pose(node, pose=pose)
def remove_objects(self):
for obj in self.object_nodes:
self.scene.remove_node(obj)
self.object_nodes = []
def update_camera_pose(self, position, orientation):
pose = np.eye(4)
if np.array(orientation).shape == (3,):
orientation = R.from_euler("xyz", orientation, degrees=True).as_matrix()
pose[:3, 3] = position
pose[:3, :3] = orientation
self.camera.matrix = pose
def render(self, get_depth=False):
colour, depth = self.r.render(self.scene)
if get_depth:
return colour, depth
return colour
|
import docker
import os, sys, yaml, copy, string, StringIO
import maestro, template, utils
from requests.exceptions import HTTPError
from .container import Container
class ContainerError(Exception):
pass
class Service:
def __init__(self, conf_file=None, environment=None):
self.log = utils.setupLogging()
self.containers = {}
self.templates = {}
self.state = 'live'
if environment:
self.load(environment)
else:
# If we didn't get an absolute path to a file, look for it in the current directory.
if not conf_file.startswith('/'):
conf_file = os.path.join(os.path.dirname(sys.argv[0]), conf_file)
data = open(conf_file, 'r')
self.config = yaml.load(data)
# On load, order templates into the proper startup sequence
self.start_order = utils.order(self.config['templates'])
def get(self, container):
return self.containers[container]
def build(self, wait_time=60):
# Setup and build all the templates
for tmpl in self.start_order:
if not self.config['templates'][tmpl]:
sys.stderr.write('Error: no configuration found for template: ' + tmpl + '\n')
exit(1)
config = self.config['templates'][tmpl]
# Create the template. The service name and version will be dynamic once the new config format is implemented
utils.status('Building template %s' % (tmpl))
tmpl_instance = template.Template(tmpl, config, 'service', '0.1')
tmpl_instance.build()
self.templates[tmpl] = tmpl_instance
# We'll store the running instances as a dict under the template
self.containers[tmpl] = {}
# Start the envrionment
for tmpl in self.start_order:
self._handleRequire(tmpl, wait_time)
tmpl_instance = self.templates[tmpl]
config = self.config['templates'][tmpl]
# If count is defined in the config then we're launching multiple instances of the same thing
# and they'll need to be tagged accordingly. Count only applies on build.
count = tag_name = 1
if 'count' in config:
count = tag_name = config['count']
while count > 0:
name = tmpl
if tag_name > 1:
name = name + '__' + str(count)
utils.status('Launching instance of template %s named %s' % (tmpl, name))
instance = tmpl_instance.instantiate(name)
instance.run()
self.containers[tmpl][name] = instance
count = count - 1
def destroy(self, timeout=None):
for tmpl in reversed(self.start_order):
for container in self.containers[tmpl]:
self.log.info('Destroying container: %s', container)
self.containers[tmpl][container].destroy(timeout)
self.state = 'destroyed'
return True
def start(self, container=None, wait_time=60):
if not self._live():
utils.status('Environment has been destroyed and can\'t be started')
return False
# If a container is provided we just start that container
# TODO: may need an abstraction here to handle names of multi-container groups
if container:
tmpl = self._getTemplate(container)
rerun = self._handleRequire(tmpl, wait_time)
# We need to see if env has changed and then commit and run a new container.
# This rerun functionality should only be a temporary solution as each time the
# container is restarted this way it will consume a layer.
# This is only necessary because docker start won't take a new set of env vars
if rerun:
self.containers[tmpl][container].rerun()
else:
self.containers[tmpl][container].start()
else:
for tmpl in self.start_order:
rerun = self._handleRequire(tmpl, wait_time)
for container in self.containers[tmpl]:
if rerun:
self.containers[tmpl][container].rerun()
else:
self.containers[tmpl][container].start()
return True
def stop(self, container=None, timeout=None):
if not self._live():
utils.status('Environment has been destroyed and can\'t be stopped.')
return False
if container:
self.containers[self._getTemplate(container)][container].stop(timeout)
else:
for tmpl in reversed(self.start_order):
for container in self.containers[tmpl]:
self.containers[tmpl][container].stop(timeout)
return True
def load(self, filename='envrionment.yml'):
self.log.info('Loading environment from: %s', filename)
with open(filename, 'r') as input_file:
self.config = yaml.load(input_file)
self.state = self.config['state']
for tmpl in self.config['templates']:
# TODO fix hardcoded service name and version
self.templates[tmpl] = template.Template(tmpl, self.config['templates'][tmpl], 'service', '0.1')
self.containers[tmpl] = {}
self.start_order = utils.order(self.config['templates'])
for container in self.config['containers']:
tmpl = self.config['containers'][container]['template']
self.containers[tmpl][container] = Container(container, self.config['containers'][container],
self.config['templates'][tmpl]['config'])
def save(self, filename='environment.yml'):
self.log.info('Saving environment state to: %s', filename)
with open(filename, 'w') as output_file:
output_file.write(self.dump())
def run(self, template, commandline=None, wait_time=60, attach=False, dont_add=False):
if template in self.templates:
self._handleRequire(template, wait_time)
name = template + "-" + str(os.getpid())
# TODO: name need to be dynamic here. Need to handle static and temporary cases.
container = self.templates[template].instantiate(name, commandline)
container.run()
# For temporary containers we may not want to save it in the environment
if not dont_add:
self.containers[template][name] = container
# for dynamic runs there needs to be a way to display the output of the command.
if attach:
container.attach()
return container
else:
# Should handle arbitrary containers
raise ContainerError('Unknown template')
def ps(self):
columns = '{0:<14}{1:<19}{2:<44}{3:<11}{4:<15}\n'
result = columns.format('ID', 'NODE', 'COMMAND', 'STATUS', 'PORTS')
for tmpl in self.templates:
for container in self.containers[tmpl]:
container_id = self.containers[tmpl][container].state['container_id']
node_name = (container[:15] + '..') if len(container) > 17 else container
command = ''
status = 'Stopped'
ports = ''
try:
state = docker.Client().inspect_container(container_id)
command = string.join([state['Path']] + state['Args'])
command = (command[:40] + '..') if len(command) > 42 else command
p = []
if state['NetworkSettings']['PortMapping']:
p = state['NetworkSettings']['PortMapping']['Tcp']
for port in p:
if ports:
ports += ', '
ports += p[port] + '->' + port
if state['State']['Running']:
status = 'Running'
except HTTPError:
status = 'Destroyed'
result += columns.format(container_id, node_name, command, status, ports)
return result.rstrip('\n')
def dump(self):
result = {}
result['state'] = self.state
result['templates'] = {}
result['containers'] = {}
for template in self.templates:
result['templates'][template] = self.templates[template].config
for container in self.containers[template]:
result['containers'][container] = self.containers[template][container].state
return yaml.dump(result, Dumper=yaml.SafeDumper)
def _getTemplate(self, container):
# Find the template for this container
for tmpl in self.containers:
if container in self.containers[tmpl]:
return tmpl
def _live(self):
return self.state == 'live'
def _pollService(self, container, service, name, port, wait_time):
# Based on start_order the service should already be running
service_ip = self.containers[service][name].get_ip_address()
utils.status('Starting %s: waiting for service %s on ip %s and port %s' % (container, service, service_ip, port))
result = utils.waitForService(service_ip, int(port), wait_time)
if result < 0:
utils.status('Never found service %s on port %s' % (service, port))
raise ContainerError('Couldn\d find required services, aborting')
utils.status('Found service %s on ip %s and port %s' % (service, service_ip, port))
#return service_ip + ":" + str(port)
return service_ip
def _handleRequire(self, tmpl, wait_time):
env = []
# Wait for any required services to finish registering
config = self.config['templates'][tmpl]
if 'require' in config:
try:
# Containers can depend on mulitple services
for service in config['require']:
service_env = []
port = config['require'][service]['port']
if port:
# If count is defined then we need to wait for all instances to start
count = config['require'][service].get('count', 1)
if count > 1:
while count > 0:
name = service + '__' + str(count)
service_env.append(self._pollService(tmpl, service, name, port, wait_time))
count = count - 1
else:
service_env.append(self._pollService(tmpl, service, service, port, wait_time))
env.append(service.upper() + '=' + ' '.join(service_env))
except:
utils.status('Failure on require. Shutting down the environment')
self.destroy()
raise
# If the environment changes then dependent containers will need to be re-run not just restarted
rerun = False
# Setup the env for dependent services
if 'environment' in config['config']:
for entry in env:
name, value = entry.split('=')
result = []
replaced = False
# See if an existing variable exists and needs to be updated
for var in config['config']['environment']:
var_name, var_value = var.split('=')
if var_name == name and var_value != value:
replaced = True
rerun = True
result.append(entry)
elif var_name == name and var_value == value:
# Just drop any full matches. We'll add it back later
pass
else:
result.append(var)
if not replaced:
result.append(entry)
config['config']['environment'] = result
else:
config['config']['environment'] = env
# Determines whether or not a container can simply be restarted
return rerun
|
from collections import namedtuple
DB2Feature = namedtuple('DB2Feature', [
"dbCapacity",
"dbVersion",
"instanceName",
"productName",
"dbName",
"serviceLevel",
"instanceConn",
"instanceUsedMem",
"dbConn",
"usedLog",
"transcationInDoubt",
"xlocksEscalation",
"locksEscalation",
"locksTimeOut",
"deadLock",
"lastBackupTime",
"dbStatus",
"instanceStatus",
"bpIndexHitRatio",
"bpDatahitRatio",
"sortsInOverflow",
"agetnsWait",
"updateRows",
"insertRows",
"selectedRows",
"deleteRows",
"selects",
"selectSQLs",
"dynamicSQLs",
"rollbacks",
"commits",
"bpTempIndexHitRatio",
"bpTempDataHitRatio"
])
|
from __future__ import absolute_import
import logging
import itertools
from typing import List, Dict, Optional, Union
from overrides import overrides
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data import Instance
from allennlp.data.tokenizers import Token
from allennlp.data.token_indexers import TokenIndexer
from allennlp.data.fields import TextField, SequenceLabelField, Field, MetadataField, \
MultiLabelField, ListField
from ccm_model.reader.utils import get_sentence_markers_from_tokens
logger = logging.getLogger(__name__)
def _is_divider(line: str) -> bool:
empty_line = line.strip() == ''
return empty_line
@DatasetReader.register("handcrafted_feature_reader")
class HandCraftedFeatureReader(DatasetReader):
def __init__(self,
token_indexers: Dict[str, TokenIndexer],
features_index_map: Union[Dict[str, int], str],
feature_label_namespace: str = "feature_labels",
lazy: bool = False,
coding_scheme: str = "IOB1",
label_namespace: str = "labels",
use_sentence_markers: bool = False) -> None:
super(HandCraftedFeatureReader, self).__init__(lazy=lazy)
self._token_indexers = token_indexers
self.label_namespace = label_namespace
if isinstance(features_index_map, str):
with open(features_index_map, "r") as fil:
_features_index_map: Dict[str, int] = {}
for index, line in enumerate(fil):
line = line.strip()
assert line not in _features_index_map
_features_index_map[line] = index
self._features_index_map = _features_index_map
else:
self._features_index_map = features_index_map
self._coding_scheme = coding_scheme
self.feature_label_namespace = feature_label_namespace
self._use_sentence_markers = use_sentence_markers
self._train = True
def eval(self):
self._train = False
def train(self):
self._train = True
@overrides
def _read(self, file_path: str) -> List[Instance]:
instances: List[Instance] = []
with open(file_path, "r") as data_file:
logger.info("Reading instances from lines in file at: %s", file_path)
for is_divider, lines in itertools.groupby(data_file, _is_divider):
if not is_divider:
fields = [line.strip().split() for line in lines]
tokens: List[str] = []
tags: List[str] = []
features: List[List[str]] = []
for field in fields:
tokens.append(field[0])
tags.append(field[-1])
features.append(field[1:-1])
tags = tags if self._train else None
instances.append(self.text_to_instance(
tokens=tokens, features=features, tags=tags))
return instances
@overrides
def text_to_instance(self,
tokens: List[str],
features: List[List[str]],
tags: Optional[List[str]] = None,
tag_label_namespace: Optional[str] = None):
# pylint: disable=arguments-differ
tokens: List[Token] = [Token(x) for x in tokens]
sequence = TextField(tokens, self._token_indexers)
instance_fields: Dict[str, Field] = {"tokens": sequence}
metadata = {"words": [x.text for x in tokens]}
if self._use_sentence_markers:
sentence_markers = get_sentence_markers_from_tokens(tokens)
metadata["sentence_markers"] = sentence_markers
instance_fields["metadata"] = MetadataField(metadata)
# now encode the features
feature_list: List[MultiLabelField] = []
for feature in features:
indexed_feature: List[int] = [
self._features_index_map[x] for x in feature if x in self._features_index_map
]
feature_list.append(MultiLabelField(indexed_feature, label_namespace=self.feature_label_namespace,
skip_indexing=True, num_labels=len(self._features_index_map)))
instance_fields["features"] = ListField(feature_list)
if tags:
tag_label_namespace = tag_label_namespace or self.label_namespace
converted_tags: List[str] = self.convert_tags(tags)
instance_fields["tags"] = SequenceLabelField(converted_tags,
sequence, tag_label_namespace)
return Instance(instance_fields)
@staticmethod
def convert_tags(tags: List[str]) -> List[str]:
"""Converts tags into an IOB1 formatted tag structure
"""
new_tags = []
for tag in tags:
new_tags.append(f"I-{tag}" if tag != "O" else tag)
return new_tags
def read_partial(self, file_path: str):
instances: List[Instance] = []
with open(file_path, "r") as data_file:
logger.info("Reading instances from lines in file at: %s", file_path)
for is_divider, lines in itertools.groupby(data_file, _is_divider):
if not is_divider:
fields = [line.strip().split() for line in lines]
tokens: List[str] = []
tags: List[str] = []
features: List[List[str]] = []
for field in fields:
tokens.append(field[0])
tags.append(field[-1])
features.append(field[1:-1])
instances.append(self.text_to_instance(
tokens=tokens, features=features, tags=tags,
tag_label_namespace="partial_labels"))
return instances
|
# -*- coding: utf-8 -*-
from django.conf.urls import url, include
from . import views
from rest_framework import routers
routers = routers.DefaultRouter()
routers.register('problem', views.ProblemView)
routers.register('problemdata', views.ProblemDataView)
routers.register('problemtag', views.ProblemTagView)
urlpatterns = [
url('', include(routers.urls)),
url(r'^uploadfile', views.UploadFileAPIView.as_view()),
]
|
from __future__ import print_function
from pylearn2.devtools.run_pyflakes import run_pyflakes
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
def test_via_pyflakes():
d = run_pyflakes(True)
if len(d.keys()) != 0:
print('Errors detected by pyflakes')
for key in d.keys():
print(key+':')
for l in d[key].split('\n'):
print('\t',l)
raise AssertionError("You have errors detected by pyflakes")
|
import nagisa
from seqeval.metrics import classification_report
def main():
ner_tagger = nagisa.Tagger(
vocabs='data/kwdlc_ner_model.vocabs',
params='data/kwdlc_ner_model.params',
hp='data/kwdlc_ner_model.hp'
)
fn_in_test = "data/kwdlc.test"
test_X, test_Y = nagisa.utils.load_file(fn_in_test)
true_Y = []
pred_Y = []
for x, true_y in zip(test_X, test_Y):
pred_y = ner_tagger.decode(x)
true_Y += true_y
pred_Y += pred_y
report = classification_report(true_Y, pred_Y)
print(report)
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.10 on 2016-11-22 05:19
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('parkstay', '0012_campsiterate_update_level'),
]
operations = [
migrations.CreateModel(
name='CampgroundPriceHistory',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('date_start', models.DateField()),
('date_end', models.DateField()),
('rate_id', models.IntegerField()),
('adult', models.DecimalField(decimal_places=2, max_digits=8)),
('concession', models.DecimalField(decimal_places=2, max_digits=8)),
('child', models.DecimalField(decimal_places=2, max_digits=8)),
],
options={
'db_table': 'parkstay_campground_pricehistory_v',
'managed': False,
},
),
]
|
"""Test the frigate binary sensor."""
from __future__ import annotations
import logging
from typing import Any
from unittest.mock import AsyncMock
import pytest
from pytest_homeassistant_custom_component.common import async_fire_mqtt_message
from custom_components.frigate.api import FrigateApiClientError
from custom_components.frigate.const import DOMAIN, NAME
from homeassistant.core import HomeAssistant
from homeassistant.helpers import device_registry as dr, entity_registry as er
from . import (
TEST_BINARY_SENSOR_FRONT_DOOR_PERSON_MOTION_ENTITY_ID,
TEST_BINARY_SENSOR_STEPS_PERSON_MOTION_ENTITY_ID,
TEST_CONFIG_ENTRY_ID,
TEST_SERVER_VERSION,
create_mock_frigate_client,
setup_mock_frigate_config_entry,
)
_LOGGER = logging.getLogger(__name__)
async def test_binary_sensor_setup(hass: HomeAssistant) -> None:
"""Verify a successful binary sensor setup."""
await setup_mock_frigate_config_entry(hass)
entity_state = hass.states.get(
TEST_BINARY_SENSOR_FRONT_DOOR_PERSON_MOTION_ENTITY_ID
)
assert entity_state
assert entity_state.state == "unavailable"
async_fire_mqtt_message(hass, "frigate/available", "online")
await hass.async_block_till_done()
entity_state = hass.states.get(
TEST_BINARY_SENSOR_FRONT_DOOR_PERSON_MOTION_ENTITY_ID
)
assert entity_state
assert entity_state.state == "off"
async_fire_mqtt_message(hass, "frigate/front_door/person", "1")
await hass.async_block_till_done()
entity_state = hass.states.get(
TEST_BINARY_SENSOR_FRONT_DOOR_PERSON_MOTION_ENTITY_ID
)
assert entity_state
assert entity_state.state == "on"
# Verify the steps (zone) motion sensor works.
async_fire_mqtt_message(hass, "frigate/steps/person", "1")
await hass.async_block_till_done()
entity_state = hass.states.get(TEST_BINARY_SENSOR_STEPS_PERSON_MOTION_ENTITY_ID)
assert entity_state
assert entity_state.state == "on"
async_fire_mqtt_message(hass, "frigate/front_door/person", "not_an_int")
await hass.async_block_till_done()
entity_state = hass.states.get(
TEST_BINARY_SENSOR_FRONT_DOOR_PERSON_MOTION_ENTITY_ID
)
assert entity_state
assert entity_state.state == "off"
async_fire_mqtt_message(hass, "frigate/available", "offline")
entity_state = hass.states.get(
TEST_BINARY_SENSOR_FRONT_DOOR_PERSON_MOTION_ENTITY_ID
)
assert entity_state
assert entity_state.state == "unavailable"
async def test_binary_sensor_api_call_failed(hass: HomeAssistant) -> None:
"""Verify a failed API call results in unsuccessful setup."""
client = create_mock_frigate_client()
client.async_get_stats = AsyncMock(side_effect=FrigateApiClientError)
await setup_mock_frigate_config_entry(hass, client=client)
assert not hass.states.get(TEST_BINARY_SENSOR_FRONT_DOOR_PERSON_MOTION_ENTITY_ID)
@pytest.mark.parametrize(
"camerazone_entity",
[
("front_door", TEST_BINARY_SENSOR_FRONT_DOOR_PERSON_MOTION_ENTITY_ID),
("steps", TEST_BINARY_SENSOR_STEPS_PERSON_MOTION_ENTITY_ID),
],
)
async def test_binary_sensor_device_info(
camerazone_entity: Any, hass: HomeAssistant
) -> None:
"""Verify switch device information."""
camerazone, entity = camerazone_entity
config_entry = await setup_mock_frigate_config_entry(hass)
device_registry = dr.async_get(hass)
entity_registry = er.async_get(hass)
device = device_registry.async_get_device(
identifiers={(DOMAIN, f"{config_entry.entry_id}:{camerazone}")}
)
assert device
assert device.manufacturer == NAME
assert device.model.endswith(f"/{TEST_SERVER_VERSION}")
entities_from_device = [
entry.entity_id
for entry in er.async_entries_for_device(entity_registry, device.id)
]
assert entity in entities_from_device
async def test_binary_sensor_unique_id(hass: HomeAssistant) -> None:
"""Verify entity unique_id(s)."""
await setup_mock_frigate_config_entry(hass)
registry_entry = er.async_get(hass).async_get(
TEST_BINARY_SENSOR_FRONT_DOOR_PERSON_MOTION_ENTITY_ID
)
assert registry_entry
assert (
registry_entry.unique_id
== f"{TEST_CONFIG_ENTRY_ID}:motion_sensor:front_door_person"
)
|
# import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
# def do_canny(frame):
# gray = cv.cvtColor(frame, cv.COLOR_RGB2GRAY)
# blur = cv.GaussianBlur(gray, (5, 5), 0)
# canny = cv.Canny(blur, 50, 150)
# return canny
def do_segment(frame):
# Since an image is a multi-directional array containing the relative intensities of each pixel in the image, we can use frame.shape to return a tuple: [number of rows, number of columns, number of channels] of the dimensions of the frame
# frame.shape[0] give us the number of rows of pixels the frame has. Since height begins from 0 at the top, the y-coordinate of the bottom of the frame is its height
height = frame.shape[0]
# Creates a triangular polygon for the mask defined by three (x, y) coordinates
polygons = np.array([
[(0, height), (800, height), (380, 290)]
])
# Creates an image filled with zero intensities with the same dimensions as the frame
mask = np.zeros_like(frame)
# Allows the mask to be filled with values of 1 and the other areas to be filled with values of 0
cv.fillPoly(mask, polygons, 255)
# A bitwise and operation between the mask and frame keeps only the triangular area of the frame
segment = cv.bitwise_and(frame, mask)
return segment
# cap = cv.VideoCapture("input.mp4")
# while (cap.isOpened()):
# ret, frame = cap.read()
# canny = do_canny(frame)
# First, visualize the frame to figure out the three coordinates defining the triangular mask
plt.imshow(frame)
plt.show()
segment = do_segment(canny)
# if cv.waitKey(10) & 0xFF == ord('q'):
# break
# cap.release()
# cv.destroyAllWindows()
|
from nose.tools import eq_
from mozillians.common.tests import TestCase
from mozillians.users.models import IdpProfile, UserProfile
from mozillians.users.tests import UserFactory
from mozillians.phonebook.utils import get_profile_link_by_email
class UtilsTests(TestCase):
def test_link_email_by_not_found(self):
user = UserFactory.create()
link = get_profile_link_by_email(user.email)
eq_(link, "")
def test_link_by_email(self):
user = UserFactory.create()
IdpProfile.objects.create(
profile=user.userprofile,
auth0_user_id="email|",
email=user.email,
primary=True,
primary_contact_identity=True,
)
profile = UserProfile.objects.get(pk=user.userprofile.pk)
link = get_profile_link_by_email(user.email)
eq_(link, profile.get_absolute_url())
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import json
import logging
from enum import Enum
import attr
from metadata.exc import ERPError
from metadata.runtime import rt_context, rt_local
from metadata.type_system.core import MetaData, parse_list_type
from metadata.util.i18n import lazy_selfish as _
module_logger = logging.getLogger(__name__)
class LineageDirection(Enum):
BOTH = 'BOTH'
INPUT = 'INPUT'
OUTPUT = 'OUTPUT'
@attr.s(frozen=True)
class LineageDataNode(object):
uid = attr.ib()
data_set_type = attr.ib()
data_set_id = attr.ib(converter=str)
qualified_name = attr.ib(init=False)
type = attr.ib(init=False)
generate_type = attr.ib(default='user')
extra = attr.ib(factory=dict)
def __attrs_post_init__(self):
object.__setattr__(self, str("qualified_name"), self.data_set_id)
object.__setattr__(self, str("type"), self.data_set_type)
@attr.s(frozen=True)
class LineageProcessNode(object):
uid = attr.ib()
processing_id = attr.ib()
qualified_name = attr.ib(init=False)
type = attr.ib(default='data_processing')
generate_type = attr.ib(default='user')
extra = attr.ib(factory=dict)
def __attrs_post_init__(self):
object.__setattr__(self, str("qualified_name"), self.processing_id)
@attr.s(frozen=True)
class LineageEdge(object):
start = attr.ib(default=None)
end = attr.ib(default=None)
class DgraphLineageMixin(object):
"""
Dgraph 关联计算功能
"""
def __init__(self, *args, **kwargs):
super(DgraphLineageMixin, self).__init__(*args, **kwargs)
self.lineage_with_extra_template = self.jinja_env.get_template('lineage_with_extra.jinja2')
self.lineage_template = self.jinja_env.get_template('lineage.jinja2')
self.lineage_check_template = self.jinja_env.get_template('check_lineage.jinja2')
def query_lineage(
self,
entity_type,
data_set_id,
depth=3,
raw_direction=LineageDirection.BOTH,
extra_retrieve=None,
only_user_entity=False,
):
"""
查询血缘接口,目前专指数据源的血缘,仅支持从 result_table, raw_data 为搜索根节点
:param raw_direction: 检索方向
:param entity_type 数据集类型,目前仅支持 ResultTable / RawData
:param data_set_id 数据集ID
:param depth 血缘深度,以数据链路的深度为标准,以下举例说明
:param extra_retrieve: ERP检索表达式
:param only_user_entity: 是否只检索用户生成的实体
A(DataNode) -> B^(ProcessNode) -> B(DataNode) -> C^(ProcessNode) -> C(DataNode)
以 A 为起点,按照以上血缘关系,则
depth(A) = 0
depth(B^) = 1
depth(B) = 1
depth(C^) = 2
depth(C) = 2
:param {LineageDirection} raw_direction 血缘追溯方向,OUTPUT 是正向 INPUT 是反向
TODO:用户模式需增加起点实体校验,必须为用户创建的实体。
"""
m_dgraph_entity_field = {'RawData': 'AccessRawData.id', 'ResultTable': 'ResultTable.result_table_id'}
lineage_nodes = []
lineage_edges = []
user_lineage_nodes = []
user_lineage_edges = []
# 血缘深度定义与 Dgraph 有关深度的定义不一致,需要转换
dgraph_depth = (depth + 1) * 2 - 1
m_direction_query_template_args = {
LineageDirection.OUTPUT.value: [
dict(
relation='children',
relation_predicate='lineage.descend',
dgraph_entity_id_field=m_dgraph_entity_field[entity_type],
dgraph_entity_id=data_set_id,
depth=dgraph_depth,
)
],
LineageDirection.INPUT.value: [
dict(
relation='parents',
relation_predicate='~lineage.descend',
dgraph_entity_id_field=m_dgraph_entity_field[entity_type],
dgraph_entity_id=data_set_id,
depth=dgraph_depth,
)
],
LineageDirection.BOTH.value: [
dict(
relation='children',
relation_predicate='lineage.descend',
dgraph_entity_id_field=m_dgraph_entity_field[entity_type],
dgraph_entity_id=data_set_id,
depth=dgraph_depth,
),
dict(
relation='parents',
relation_predicate='~lineage.descend',
dgraph_entity_id_field=m_dgraph_entity_field[entity_type],
dgraph_entity_id=data_set_id,
depth=dgraph_depth,
),
],
}
max_depth = 0
for template_args in m_direction_query_template_args[raw_direction.value]:
if extra_retrieve:
# 进行额外的ERP协议查询
parsed_retrieve_args = {}
for k, per_retrieve_args in extra_retrieve.items():
per_parsed_retrieve_args = {}
md_type = rt_context.md_types_registry.get(k, None)
if not md_type:
raise ERPError(_('Fail to get md type in extra retrieve protocol content.'))
self._parse_per_retrieve_expression(per_retrieve_args, per_parsed_retrieve_args, md_type)
parsed_retrieve_args[k] = per_parsed_retrieve_args
parsed_retrieve_args.update(template_args)
for md_type_name in ['ResultTable', 'AccessRawData', 'DataProcessing']:
if md_type_name not in parsed_retrieve_args:
parsed_retrieve_args[md_type_name] = {}
statement = self.lineage_with_extra_template.render(
is_dict=lambda x: isinstance(x, dict), **parsed_retrieve_args
)
else:
statement = self.lineage_template.render(template_args)
module_logger.info('[Dgraph Lineage] statement={}'.format(statement))
response = self.query(statement)
if extra_retrieve:
nodes_extra = {
'ResultTable': {item['identifier_value']: item for item in response['data']['ResultTable']},
'AccessRawData': {item['identifier_value']: item for item in response['data']['AccessRawData']},
'DataProcessing': {item['identifier_value']: item for item in response['data']['DataProcessing']},
}
else:
nodes_extra = None
dgraph_nodes = response['data']['target']
_max_depth = self._traverse_nodes(
None,
None,
dgraph_nodes,
lineage_nodes,
user_lineage_nodes,
lineage_edges,
user_lineage_edges,
nodes_extra=nodes_extra,
only_user_entity=only_user_entity,
)
if _max_depth > max_depth:
max_depth = _max_depth
# 观察 dgraph 返回数据结构,存在以下场景
# A -> B -> C -> D
# ^
# |
# E
# 当 B.children 里有 (B -> C, C->D) 的关系时,E.children 中存在两种情况 (E->C, C->D) 或者 (E->C)
# 所以对于最终遍历出来的结果来看,需要去重操作,保证结果的唯一性
lineage_nodes = self._remove_dup_nodes(lineage_nodes)
lineage_edges = self._remove_dup_edges(lineage_edges)
# RT -> DP -> RT -> DP -> RT 虽然有三个 RT,但是深度只能算 2
max_depth = max_depth - 1
if only_user_entity:
return_lineage_nodes = user_lineage_nodes
return_lineage_edges = user_lineage_edges
else:
return_lineage_nodes = lineage_nodes
return_lineage_edges = lineage_edges
return {
'criteria': {
'entity_type': entity_type,
'data_set_id': data_set_id,
'depth': max_depth,
'direction': raw_direction.value,
},
'nodes': [attr.asdict(node) for node in return_lineage_nodes],
'relations': [self._purify_relation(attr.asdict(relation)) for relation in return_lineage_edges],
}
def check_lineage_integrity(self, check_list, session_ins=None):
"""
检查data_processing_relation关联的数据流程血缘的完整性
:param check_list: list 携带用于查询目标rt主键信息的数据列表
:param session_ins: obj session实例,若没有session环境,则使用dgraph_backend代替
:return: boolean 完整(True) or 不完整(False)
"""
if check_list is None:
return False
if session_ins is None:
session_ins = self
lineage_nodes = []
lineage_edges = []
user_lineage_nodes = []
user_lineage_edges = []
identifier_set = set()
for check_data in check_list:
if check_data.get('method', None) != 'CREATE':
continue
changed_dict = json.loads(check_data['changed_data'])
if (
changed_dict.get('data_directing', None) == 'input'
and changed_dict.get('data_set_type', None) == 'raw_data'
):
continue
identifier_val = changed_dict.get('data_set_id', None)
if identifier_val:
identifier_set.add(str(identifier_val))
if not identifier_set:
return True
identifier_list_str = '["{}"]'.format('", "'.join(identifier_set))
statement = self.lineage_check_template.render(
dict(dgraph_entity_id_field='ResultTable.result_table_id', dgraph_entity_id=identifier_list_str, depth=201)
)
response = session_ins.query(statement)
dgraph_nodes = response['data']['target']
# DPR和DS的关系是一对一的,检查查询结果是否完备
if len(identifier_set) != len(dgraph_nodes):
return False
# 获取豁免的节点
check_exclude_nodes = self._get_check_exclude_nodes(response)
# 只对platform为bkdata的dt进行检查,且排除豁免标签的dt检查
need_check_nodes = [
item
for item in dgraph_nodes
if item['ResultTable.platform'] == 'bkdata' and item['uid'] not in check_exclude_nodes
]
if not need_check_nodes:
return True
# 遍历所有查到的data_set的血缘根源,若血缘根源不是raw_data,说明血缘断层
for check_node in need_check_nodes:
self._traverse_nodes(
None, None, [check_node], lineage_nodes, user_lineage_nodes, lineage_edges, user_lineage_edges
)
lineage_nodes_list = [attr.asdict(node) for node in self._remove_dup_nodes(lineage_nodes)]
if not lineage_nodes_list:
return False
root_node = lineage_nodes_list.pop()
if root_node.get('type', None) != 'raw_data':
return False
return True
@staticmethod
def _get_check_exclude_nodes(check_data):
# 检查数据结果是否有豁免血缘检查的标签
check_exclude_nodes = set()
check_tags = check_data.get('data', {}).get('check_tags', [])
for check_tag in check_tags:
for tag_item in check_tag.get('tags', []):
if tag_item['Tag.code'] == 'lineage_check_exclude':
check_exclude_nodes.add(str(check_tag['uid']))
break
return check_exclude_nodes
def _traverse_nodes(
self,
from_lineage_node,
direction,
dgraph_nodes,
lineage_nodes,
user_lineage_nodes,
lineage_edges,
user_lineage_edges,
nodes_extra=None,
only_user_entity=False,
):
"""
遍历 dgraph 引擎查询结果,提取 -> 转换 -> 存放血缘结果
:param {LineageNode} 遍历起点,可为 None
:param {LineageDirection} direction 起点与子节点的方向
:param {DgraphNode[]} dgraph_nodes dgraph 引擎查询出来的结果
:param {LineageNode[]} lineage_nodes 血缘节点列表,用于存放 dgraph 遍历过程中存在的节点
:param {LineageEdge[]} lineage_edges 血缘边缘列表,用于存放 dgraph 遍历过程中存在的节点关系
:paramExample dgraph_nodes 样例
[
{
"AccessRawData.id": 1,
"AccessRawData.raw_data_nane": "rawdata1",
"children": [
{
"DataProcessing.processing_id": "591_rt1",
"DataProcessing.processing_alias": "591_rt1"
}
]
}
]
:return {Int} 当前 dgraph_nodes 所有节点往下遍历的最大深度
:note 注意确保 dgraph_nodes 数据不会同时存在 children 和 parents
"""
if nodes_extra is None:
nodes_extra = {}
# 当前深度
max_depth = 0
for _node in dgraph_nodes:
_lineage_node = generate_lineage_node(_node, nodes_extra)
if getattr(rt_local, 'last_user_status', None) is None:
rt_local.last_user_status = {}
# 遇到非预期的血缘节点,则跳过
if _lineage_node is None:
continue
# 添加节点关系
if from_lineage_node is not None and direction is not None:
edge = None
if direction == LineageDirection.OUTPUT:
edge = LineageEdge(from_lineage_node, _lineage_node)
if direction == LineageDirection.INPUT:
edge = LineageEdge(_lineage_node, from_lineage_node)
lineage_edges.append(edge)
if only_user_entity:
edge_into_user = attr.evolve(edge)
self._process_user_edge(
direction, from_lineage_node, _lineage_node, user_lineage_edges, edge_into_user
)
# 当前节点深度,等待遍历后的结果,默认0
depth = 0
lineage_nodes.append(_lineage_node)
if _lineage_node.generate_type == 'user':
user_lineage_nodes.append(_lineage_node)
# 正向遍历
if 'children' in _node:
depth = self._traverse_nodes(
_lineage_node,
LineageDirection.OUTPUT,
_node['children'],
lineage_nodes,
user_lineage_nodes,
lineage_edges,
user_lineage_edges,
nodes_extra=nodes_extra,
only_user_entity=only_user_entity,
)
# 反向遍历
if 'parents' in _node:
depth = self._traverse_nodes(
_lineage_node,
LineageDirection.INPUT,
_node['parents'],
lineage_nodes,
user_lineage_nodes,
lineage_edges,
user_lineage_edges,
nodes_extra=nodes_extra,
only_user_entity=only_user_entity,
)
# 如果当前节点为数据节点,则深度 +1
depth += 1 if isinstance(_lineage_node, LineageDataNode) else 0
if depth > max_depth:
max_depth = depth
return max_depth
def _parse_per_retrieve_expression(self, retrieve_expression, parsed_retrieve_expression, md_type):
"""
解析额外的ERP协议表达式(临时)
:param retrieve_expression: 协议表达式
:param parsed_retrieve_expression: 解析后的适配GraphQL模板的表达式
:param md_type: md类型
:return:
"""
attr_defs = attr.fields_dict(md_type)
if retrieve_expression is True:
retrieve_expression = {
attr_def.name: True
for attr_def in attr.fields(md_type)
if not issubclass(parse_list_type(attr_def.type)[1], MetaData)
}
wildcard = retrieve_expression.pop('*', False)
if wildcard:
retrieve_expression.update(
{
attr_def.name: True
for attr_def in attr.fields(md_type)
if not issubclass(parse_list_type(attr_def.type)[1], MetaData)
}
)
for k, v in retrieve_expression.items():
if k in attr_defs:
is_list, ib_primitive_type = parse_list_type(attr_defs[k].type)
if not issubclass(ib_primitive_type, MetaData):
parsed_retrieve_expression[
''.join(
[
k,
':',
md_type.__name__,
'.',
k,
]
)
if k not in md_type.__metadata__.get('dgraph').get('common_predicates')
else k
] = v
else:
k_parsed_retrieve_expression = {}
self._parse_per_retrieve_expression(v, k_parsed_retrieve_expression, ib_primitive_type.agent)
parsed_retrieve_expression[
''.join(
[
k,
':',
md_type.__name__,
'.',
k,
]
)
] = k_parsed_retrieve_expression
elif k.startswith('~') and k.split('~')[1] in md_type.metadata['dgraph']['reverse_edges']:
k_parsed_retrieve_expression = {}
self._parse_per_retrieve_expression(
v, k_parsed_retrieve_expression, md_type.metadata['dgraph']['reverse_edges'][k.split('~')[1]]
)
parsed_retrieve_expression[k] = k_parsed_retrieve_expression
else:
raise ERPError(_('Not existed attr name {}'.format(k)))
parsed_retrieve_expression[
''.join(
[
'identifier_value',
':',
md_type.__name__,
'.',
md_type.metadata['identifier'].name,
]
)
] = True
@staticmethod
def _process_user_edge(direction, from_lineage_node, _lineage_node, user_lineage_edges, edge_into_user):
"""
记录血缘中,仅用户创建的实体节点的关联。并生成跨虚拟节点的关联。
:param direction: 方向
:param from_lineage_node: 起始节点
:param _lineage_node: 指向节点
:param user_lineage_edges: 用户血缘关联
:param edge_into_user: 可能被用户血缘记录的当前关联
:return:
"""
if from_lineage_node.generate_type == 'user':
rt_local.last_user_status[_lineage_node.qualified_name] = edge_into_user
if _lineage_node.generate_type == 'system':
return
user_lineage_edges.append(edge_into_user)
elif from_lineage_node.generate_type == 'system' and _lineage_node.generate_type == 'system':
previous_user_edge = rt_local.last_user_status[from_lineage_node.qualified_name]
rt_local.last_user_status[_lineage_node.qualified_name] = previous_user_edge
elif from_lineage_node.generate_type == 'system' and _lineage_node.generate_type == 'user':
previous_user_edge = rt_local.last_user_status[from_lineage_node.qualified_name]
edge = attr.evolve(
previous_user_edge, **{'end' if direction == LineageDirection.OUTPUT else 'start': _lineage_node}
)
user_lineage_edges.append(edge)
@staticmethod
def _remove_dup_nodes(lineage_nodes):
"""
移除重复节点
:todo 当 lineage_nodes 过大时,会引起过多的内存开销
"""
verified_lineage_nodes = []
# 使用集合判断是否存在更为高效
indexs = set()
for _node in lineage_nodes:
if _node.uid not in indexs:
verified_lineage_nodes.append(_node)
indexs.add(_node.uid)
return verified_lineage_nodes
@staticmethod
def _remove_dup_edges(lineage_edges):
"""
移除重复的关系
"""
verified_lineage_edges = []
# 使用集合判断是否存在更为高效
indexs = set()
for _edge in lineage_edges:
_index = '{}::{}'.format(_edge.start.uid, _edge.end.uid)
if _index not in indexs:
verified_lineage_edges.append(_edge)
indexs.add(_index)
return verified_lineage_edges
@staticmethod
def _purify_relation(relation):
"""
仅保留 relation 中的目标字段
"""
targer_node_fields = ['type', 'qualified_name']
return {
'from': {k: v for k, v in relation['start'].items() if k in targer_node_fields},
'to': {k: v for k, v in relation['end'].items() if k in targer_node_fields},
}
def generate_lineage_node(node, nodes_extra):
"""
识别 dgraph 数据,转换为对应的血缘节点对象
"""
if 'AccessRawData.id' in node:
extra = get_node_extra(nodes_extra, node['AccessRawData.id'], 'AccessRawData')
return LineageDataNode(node['uid'], 'raw_data', node['AccessRawData.id'], extra=extra)
if 'ResultTable.result_table_id' in node:
extra = get_node_extra(nodes_extra, node['ResultTable.result_table_id'], 'ResultTable')
return LineageDataNode(
node['uid'],
'result_table',
node['ResultTable.result_table_id'],
extra=extra,
generate_type=node['ResultTable.generate_type'],
)
if 'DataProcessing.processing_id' in node:
extra = get_node_extra(nodes_extra, node['DataProcessing.processing_id'], 'DataProcessing')
return LineageProcessNode(
node['uid'],
node['DataProcessing.processing_id'],
extra=extra,
generate_type=node['DataProcessing.generate_type'],
)
return None
def get_node_extra(nodes_extra, identifier_value, md_type_name):
extra = {}
if nodes_extra and identifier_value in nodes_extra[md_type_name]:
extra = nodes_extra[md_type_name][identifier_value]
# 血缘原生遍历时,会有重复节点
if 'identifier_value' in extra:
extra.pop('identifier_value')
return extra
|
# coding: utf-8
"""
3Di API
3Di simulation API (latest version: 3.0) Framework release: 1.0.16 3Di core release: 2.0.11 deployed on: 07:33AM (UTC) on September 04, 2020 # noqa: E501
The version of the OpenAPI document: 3.0
Contact: info@nelen-schuurmans.nl
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from openapi_client.configuration import Configuration
class File(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'url': 'str',
'storage_name': 'str',
'filename': 'str',
'bucket': 'str',
'prefix': 'str',
'etag': 'str',
'size': 'int',
'expiry_date': 'date',
'related_object': 'str',
'type': 'str',
'state': 'str',
'state_description': 'str',
'meta': 'object',
'id': 'int'
}
attribute_map = {
'url': 'url',
'storage_name': 'storage_name',
'filename': 'filename',
'bucket': 'bucket',
'prefix': 'prefix',
'etag': 'etag',
'size': 'size',
'expiry_date': 'expiry_date',
'related_object': 'related_object',
'type': 'type',
'state': 'state',
'state_description': 'state_description',
'meta': 'meta',
'id': 'id'
}
def __init__(self, url=None, storage_name=None, filename=None, bucket=None, prefix=None, etag=None, size=None, expiry_date=None, related_object=None, type=None, state=None, state_description=None, meta=None, id=None, local_vars_configuration=None): # noqa: E501
"""File - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._url = None
self._storage_name = None
self._filename = None
self._bucket = None
self._prefix = None
self._etag = None
self._size = None
self._expiry_date = None
self._related_object = None
self._type = None
self._state = None
self._state_description = None
self._meta = None
self._id = None
self.discriminator = None
if url is not None:
self.url = url
if storage_name is not None:
self.storage_name = storage_name
self.filename = filename
self.bucket = bucket
self.prefix = prefix
self.etag = etag
self.size = size
if expiry_date is not None:
self.expiry_date = expiry_date
if related_object is not None:
self.related_object = related_object
self.type = type
self.state = state
self.state_description = state_description
self.meta = meta
if id is not None:
self.id = id
@property
def url(self):
"""Gets the url of this File. # noqa: E501
:return: The url of this File. # noqa: E501
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this File.
:param url: The url of this File. # noqa: E501
:type: str
"""
self._url = url
@property
def storage_name(self):
"""Gets the storage_name of this File. # noqa: E501
:return: The storage_name of this File. # noqa: E501
:rtype: str
"""
return self._storage_name
@storage_name.setter
def storage_name(self, storage_name):
"""Sets the storage_name of this File.
:param storage_name: The storage_name of this File. # noqa: E501
:type: str
"""
if (self.local_vars_configuration.client_side_validation and
storage_name is not None and len(storage_name) < 1):
raise ValueError("Invalid value for `storage_name`, length must be greater than or equal to `1`") # noqa: E501
self._storage_name = storage_name
@property
def filename(self):
"""Gets the filename of this File. # noqa: E501
:return: The filename of this File. # noqa: E501
:rtype: str
"""
return self._filename
@filename.setter
def filename(self, filename):
"""Sets the filename of this File.
:param filename: The filename of this File. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and filename is None: # noqa: E501
raise ValueError("Invalid value for `filename`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
filename is not None and len(filename) > 256):
raise ValueError("Invalid value for `filename`, length must be less than or equal to `256`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
filename is not None and len(filename) < 1):
raise ValueError("Invalid value for `filename`, length must be greater than or equal to `1`") # noqa: E501
self._filename = filename
@property
def bucket(self):
"""Gets the bucket of this File. # noqa: E501
:return: The bucket of this File. # noqa: E501
:rtype: str
"""
return self._bucket
@bucket.setter
def bucket(self, bucket):
"""Sets the bucket of this File.
:param bucket: The bucket of this File. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and bucket is None: # noqa: E501
raise ValueError("Invalid value for `bucket`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
bucket is not None and len(bucket) > 256):
raise ValueError("Invalid value for `bucket`, length must be less than or equal to `256`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
bucket is not None and len(bucket) < 1):
raise ValueError("Invalid value for `bucket`, length must be greater than or equal to `1`") # noqa: E501
self._bucket = bucket
@property
def prefix(self):
"""Gets the prefix of this File. # noqa: E501
:return: The prefix of this File. # noqa: E501
:rtype: str
"""
return self._prefix
@prefix.setter
def prefix(self, prefix):
"""Sets the prefix of this File.
:param prefix: The prefix of this File. # noqa: E501
:type: str
"""
if (self.local_vars_configuration.client_side_validation and
prefix is not None and len(prefix) > 256):
raise ValueError("Invalid value for `prefix`, length must be less than or equal to `256`") # noqa: E501
self._prefix = prefix
@property
def etag(self):
"""Gets the etag of this File. # noqa: E501
Optional eTag (md5sum) # noqa: E501
:return: The etag of this File. # noqa: E501
:rtype: str
"""
return self._etag
@etag.setter
def etag(self, etag):
"""Sets the etag of this File.
Optional eTag (md5sum) # noqa: E501
:param etag: The etag of this File. # noqa: E501
:type: str
"""
if (self.local_vars_configuration.client_side_validation and
etag is not None and len(etag) > 256):
raise ValueError("Invalid value for `etag`, length must be less than or equal to `256`") # noqa: E501
self._etag = etag
@property
def size(self):
"""Gets the size of this File. # noqa: E501
Filesize in bytes # noqa: E501
:return: The size of this File. # noqa: E501
:rtype: int
"""
return self._size
@size.setter
def size(self, size):
"""Sets the size of this File.
Filesize in bytes # noqa: E501
:param size: The size of this File. # noqa: E501
:type: int
"""
if (self.local_vars_configuration.client_side_validation and
size is not None and size > 9223372036854775807): # noqa: E501
raise ValueError("Invalid value for `size`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
size is not None and size < -9223372036854775808): # noqa: E501
raise ValueError("Invalid value for `size`, must be a value greater than or equal to `-9223372036854775808`") # noqa: E501
self._size = size
@property
def expiry_date(self):
"""Gets the expiry_date of this File. # noqa: E501
:return: The expiry_date of this File. # noqa: E501
:rtype: date
"""
return self._expiry_date
@expiry_date.setter
def expiry_date(self, expiry_date):
"""Sets the expiry_date of this File.
:param expiry_date: The expiry_date of this File. # noqa: E501
:type: date
"""
self._expiry_date = expiry_date
@property
def related_object(self):
"""Gets the related_object of this File. # noqa: E501
:return: The related_object of this File. # noqa: E501
:rtype: str
"""
return self._related_object
@related_object.setter
def related_object(self, related_object):
"""Sets the related_object of this File.
:param related_object: The related_object of this File. # noqa: E501
:type: str
"""
self._related_object = related_object
@property
def type(self):
"""Gets the type of this File. # noqa: E501
:return: The type of this File. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this File.
:param type: The type of this File. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
allowed_values = ["timeseries", "rastertimeseries", "savedstate", "results", "rasters", "gridadmin", "geojson", "initialwaterlevel", "bulklateral", "bulk_boundaryconditions"] # noqa: E501
if self.local_vars_configuration.client_side_validation and type not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}" # noqa: E501
.format(type, allowed_values)
)
self._type = type
@property
def state(self):
"""Gets the state of this File. # noqa: E501
:return: The state of this File. # noqa: E501
:rtype: str
"""
return self._state
@state.setter
def state(self, state):
"""Sets the state of this File.
:param state: The state of this File. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and state is None: # noqa: E501
raise ValueError("Invalid value for `state`, must not be `None`") # noqa: E501
allowed_values = ["created", "uploaded", "processed", "error", "removed"] # noqa: E501
if self.local_vars_configuration.client_side_validation and state not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `state` ({0}), must be one of {1}" # noqa: E501
.format(state, allowed_values)
)
self._state = state
@property
def state_description(self):
"""Gets the state_description of this File. # noqa: E501
:return: The state_description of this File. # noqa: E501
:rtype: str
"""
return self._state_description
@state_description.setter
def state_description(self, state_description):
"""Sets the state_description of this File.
:param state_description: The state_description of this File. # noqa: E501
:type: str
"""
if (self.local_vars_configuration.client_side_validation and
state_description is not None and len(state_description) > 512):
raise ValueError("Invalid value for `state_description`, length must be less than or equal to `512`") # noqa: E501
self._state_description = state_description
@property
def meta(self):
"""Gets the meta of this File. # noqa: E501
:return: The meta of this File. # noqa: E501
:rtype: object
"""
return self._meta
@meta.setter
def meta(self, meta):
"""Sets the meta of this File.
:param meta: The meta of this File. # noqa: E501
:type: object
"""
self._meta = meta
@property
def id(self):
"""Gets the id of this File. # noqa: E501
:return: The id of this File. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this File.
:param id: The id of this File. # noqa: E501
:type: int
"""
self._id = id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, File):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, File):
return True
return self.to_dict() != other.to_dict()
|
import os
from datetime import datetime
from werkzeug.utils import secure_filename
import app
from app.base.models import FileInputStorage
from app.SendStatements import blueprint
from flask import render_template, request, redirect, flash
from flask_login import login_required
from config import *
@blueprint.route('/<template>')
@login_required
def route_template(template):
return render_template(template + '.html')
ALLOWED_EXTENSIONS = {'xml'}
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@blueprint.route('/save_demand_file', methods=["GET", "POST"])
@login_required
def save():
if request.method == "POST":
if 'file' not in request.files:
flash('No file part', "warning")
return render_template("demand.html")
file = request.files['file']
if file.filename == '':
flash('No file selected for uploading')
return render_template("demand.html")
if file and allowed_file(file.filename):
filename = secure_filename(datetime.now().strftime("%Y_%m_%d-%I_%M_%S_%p") + "_" + file.filename)
file.save(os.path.join('app/base/static/files/Demand', filename))
file_Path = os.path.join('app/base/static/files/Demand', filename);
if os.path.exists('app/base/static/files/Demand/' + filename):
save_file = FileInputStorage(fileName=filename,
filePath=file_Path, fileType="DemandFile", save_at=datetime.now(),
status="Pending")
app.db.session.add(save_file)
app.db.session.commit()
flash('File successfully uploaded', 'success')
else:
flash("File was not found check this folder to confirm :" + file_Path, "warning")
return render_template("demand.html")
else:
flash('Not saved Allowed file types are xml', 'danger')
return render_template("demand.html")
return render_template("demand.html")
@blueprint.route('/Dormancy_Notification', methods=["GET", "POST"])
@login_required
def Dormancy_Notification():
if request.method == "POST":
if 'file' not in request.files:
flash('No file part', "warning")
return render_template("Dormancy_Notification.html")
file = request.files['file']
if file.filename == '':
flash('No file selected for uploading')
return render_template("Dormancy_Notification.html")
if file and allowed_file(file.filename):
filename = secure_filename(datetime.now().strftime("%Y_%m_%d-%I_%M_%S_%p") + "_" + file.filename)
file.save(os.path.join('app/base/static/files/dormancyNotification', filename))
file_Path = os.path.join('app/base/static/files/dormancyNotification', filename);
if os.path.exists('app/base/static/files/dormancyNotification/' + filename):
save_file = FileInputStorage(fileName=filename,
filePath=file_Path, fileType="dormancyNotification", save_at=datetime.now(),
status="Pending")
app.db.session.add(save_file)
app.db.session.commit()
flash('File successfully uploaded', 'success')
else:
flash("File was not found check this folder to confirm :" + file_Path, "warning")
return render_template("Dormancy_Notification.html")
else:
flash('Not saved Allowed file types are xml', 'danger')
return render_template("Dormancy_Notification.html")
return render_template("Dormancy_Notification.html")
@blueprint.route('/ufaclaimletter', methods=["GET", "POST"])
@login_required
def ufaclaimletter():
if request.method == "POST":
if 'file' not in request.files:
flash('No file part', "warning")
return render_template("ufaclaimletter.html")
file = request.files['file']
if file.filename == '':
flash('No file selected for uploading')
return render_template("ufaclaimletter.html")
if file and allowed_file(file.filename):
filename = secure_filename(datetime.now().strftime("%Y_%m_%d-%I_%M_%S_%p") + "_" + file.filename)
file.save(os.path.join('app/base/static/files/Ufaa', filename))
file_Path = os.path.join('app/base/static/files/Ufaa', filename);
if os.path.exists('app/base/static/files/Ufaa/' + filename):
save_file = FileInputStorage(fileName=filename,
filePath=file_Path, fileType="Ufaa", save_at=datetime.now(),
status="Pending")
app.db.session.add(save_file)
app.db.session.commit()
flash('File successfully uploaded', 'success')
else:
flash("File was not found check this folder to confirm :" + file_Path, "warning")
return render_template("ufaclaimletter.html")
else:
flash('Not saved Allowed file types are xml', 'danger')
return render_template("ufaclaimletter.html")
return render_template("ufaclaimletter.html")
@blueprint.route('/bankStatement', methods=["GET", "POST"])
@login_required
def bankStatement():
if request.method == "POST":
if 'file' not in request.files:
flash('No file part', "warning")
return render_template("bankstatement.html")
file = request.files['file']
if file.filename == '':
flash('No file selected for uploading')
return render_template("bankstatement.html")
if file and allowed_file(file.filename):
filename = secure_filename(datetime.now().strftime("%Y_%m_%d-%I_%M_%S_%p") + "_" + file.filename)
file.save(os.path.join('app/base/static/files/bankStatements', filename))
file_Path = os.path.join('app/base/static/files/bankStatements', filename);
if os.path.exists('app/base/static/files/bankStatements/' + filename):
save_file = FileInputStorage(fileName=filename,
filePath=file_Path, fileType="bank_statement", save_at=datetime.now(),
status="Pending")
app.db.session.add(save_file)
app.db.session.commit()
flash('File successfully uploaded', 'success')
else:
flash("File was not found check this folder to confirm :" + file_Path, "warning")
return render_template("bankstatement.html")
else:
flash('Not saved Allowed file types are xml', 'danger')
return render_template("bankstatement.html")
return render_template("bankstatement.html")
|
#
# HELLO ROUTES
#
from main import app
@app.route('/hello', methods=['GET'])
def get_hello():
return "Hello"
@app.route('/goodbye', methods=['GET'])
def get_goodbye():
return "Goodbye"
|
#!/usr/bin/python
# coding=utf-8
from __future__ import with_statement
try:
import json
json # workaround for pyflakes issue #13
except ImportError:
import simplejson as json
import subprocess
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import patch, call
import ceph
def run_only(func, predicate):
if predicate():
return func
else:
def f(arg):
pass
return f
def run_only_if_assertSequenceEqual_is_available(func):
pred = lambda: 'assertSequenceEqual' in dir(unittest.TestCase)
return run_only(func, pred)
def run_only_if_subprocess_check_output_is_available(func):
pred = lambda: 'check_output' in dir(subprocess)
return run_only(func, pred)
class TestCounterIterator(unittest.TestCase):
@run_only_if_assertSequenceEqual_is_available
def test_empty(self):
data = {}
expected = []
actual = list(ceph.flatten_dictionary(data))
self.assertSequenceEqual(actual, expected)
@run_only_if_assertSequenceEqual_is_available
def test_simple(self):
data = {'a': 1, 'b': 2}
expected = [('a', 1), ('b', 2)]
actual = list(ceph.flatten_dictionary(data))
self.assertSequenceEqual(actual, expected)
@run_only_if_assertSequenceEqual_is_available
def test_prefix(self):
data = {'a': 1, 'b': 2}
expected = [('Z.a', 1), ('Z.b', 2)]
actual = list(ceph.flatten_dictionary(data, prefix='Z'))
self.assertSequenceEqual(actual, expected)
@run_only_if_assertSequenceEqual_is_available
def test_sep(self):
data = {'a': 1, 'b': 2}
expected = [('Z:a', 1), ('Z:b', 2)]
actual = list(ceph.flatten_dictionary(data, prefix='Z', sep=':'))
self.assertSequenceEqual(actual, expected)
@run_only_if_assertSequenceEqual_is_available
def test_nested(self):
data = {'a': 1, 'b': 2, 'c': {'d': 3}}
expected = [('a', 1), ('b', 2), ('c.d', 3)]
actual = list(ceph.flatten_dictionary(data))
self.assertSequenceEqual(actual, expected)
@run_only_if_assertSequenceEqual_is_available
def test_doubly_nested(self):
data = {'a': 1, 'b': 2, 'c': {'d': 3}, 'e': {'f': {'g': 1}}}
expected = [('a', 1), ('b', 2), ('c.d', 3), ('e.f.g', 1)]
actual = list(ceph.flatten_dictionary(data))
self.assertSequenceEqual(actual, expected)
@run_only_if_assertSequenceEqual_is_available
def test_complex(self):
data = {"val": 0,
"max": 524288000,
"get": 60910,
"wait": {"avgcount": 0,
"sum": 0},
}
expected = [
('get', 60910),
('max', 524288000),
('val', 0),
('wait.avgcount', 0),
('wait.sum', 0),
]
actual = list(ceph.flatten_dictionary(data))
self.assertSequenceEqual(actual, expected)
class TestCephCollectorSocketNameHandling(CollectorTestCase):
def setUp(self):
config = get_collector_config('CephCollector', {
'interval': 10,
})
self.collector = ceph.CephCollector(config, None)
def test_counter_default_prefix(self):
expected = 'ceph.osd.325'
sock = '/var/run/ceph/ceph-osd.325.asok'
actual = self.collector._get_counter_prefix_from_socket_name(sock)
self.assertEquals(actual, expected)
def test_counter_alternate_prefix(self):
expected = 'ceph.keep-osd.325'
sock = '/var/run/ceph/keep-osd.325.asok'
actual = self.collector._get_counter_prefix_from_socket_name(sock)
self.assertEquals(actual, expected)
def test_get_socket_paths(self):
config = get_collector_config('CephCollector', {
'socket_path': '/path/',
'socket_prefix': 'prefix-',
'socket_ext': 'ext',
})
collector = ceph.CephCollector(config, None)
with patch('glob.glob') as glob:
collector._get_socket_paths()
glob.assert_called_with('/path/prefix-*.ext')
class TestCephCollectorGettingStats(CollectorTestCase):
def setUp(self):
config = get_collector_config('CephCollector', {
'interval': 10,
})
self.collector = ceph.CephCollector(config, None)
@run_only_if_subprocess_check_output_is_available
def test_load_works(self):
expected = {'a': 1,
'b': 2,
}
with patch('subprocess.check_output') as check_output:
check_output.return_value = json.dumps(expected)
actual = self.collector._get_stats_from_socket('a_socket_name')
check_output.assert_called_with(['/usr/bin/ceph',
'--admin-daemon',
'a_socket_name',
'perf',
'dump',
])
self.assertEqual(actual, expected)
@run_only_if_subprocess_check_output_is_available
def test_ceph_command_fails(self):
with patch('subprocess.check_output') as check_output:
check_output.side_effect = subprocess.CalledProcessError(
255, ['/usr/bin/ceph'], 'error!',
)
actual = self.collector._get_stats_from_socket('a_socket_name')
check_output.assert_called_with(['/usr/bin/ceph',
'--admin-daemon',
'a_socket_name',
'perf',
'dump',
])
self.assertEqual(actual, {})
@run_only_if_subprocess_check_output_is_available
def test_json_decode_fails(self):
input = {'a': 1,
'b': 2,
}
with patch('subprocess.check_output') as check_output:
check_output.return_value = json.dumps(input)
with patch('json.loads') as loads:
loads.side_effect = ValueError('bad data')
actual = self.collector._get_stats_from_socket('a_socket_name')
check_output.assert_called_with(['/usr/bin/ceph',
'--admin-daemon',
'a_socket_name',
'perf',
'dump',
])
loads.assert_called_with(json.dumps(input))
self.assertEqual(actual, {})
class TestCephCollectorPublish(CollectorTestCase):
def setUp(self):
config = get_collector_config('CephCollector', {
'interval': 10,
})
self.collector = ceph.CephCollector(config, None)
def test_simple(self):
with patch.object(self.collector, 'publish') as publish:
self.collector._publish_stats('prefix', {'a': 1})
publish.assert_called_with('prefix.a', 1)
def test_multiple(self):
with patch.object(self.collector, 'publish') as publish:
self.collector._publish_stats('prefix', {'a': 1, 'b': 2})
publish.assert_has_calls([call('prefix.a', 1),
call('prefix.b', 2),
])
if __name__ == "__main__":
unittest.main()
|
from django.urls import path
from crash_course import views
from django.contrib.auth.decorators import login_required
urlpatterns = [
path('courses', views.CrashCourseList.as_view()),
path('course/<str:course_slug>/chapters', views.CourseChapterList.as_view()),
path('course/<str:course_slug>/chapter/<str:chapter_slug>/sections', views.ChapterSectionList.as_view()),
path('course/<str:course_slug>/chapter/<str:chapter_slug>/section/<str:section_slug>', views.SectionContentList.as_view()),
path('section/<str:section_slug>', login_required(views.BookMarkUpdate.as_view())),
path('course/<str:course_slug>', login_required(views.StarMarkUpdate.as_view())),
]
|
from app import socketio
from flask.ext.socketio import emit
from models import vehicles, tokens
#Clean up / replace with Victors "Garage" stuff.
def find_user_vehicles(email):
for id, token in tokens.items():
if token['email'] == email:
print('found user ', email)
try:
my_vehicles = info['vehicles']
except KeyError:
my_vehicles = vehicles.find_vehicles(info['email'])
token['vehicles'] = my_vehicles
print('find_user_vehicles: ', my_vehicles)
return my_vehicles
return None
def find_all_vehicles():
return vehicles.find_all_vehicles()
@socketio.on('list_vehicle_ids')
def list_vehicle_ids(params):
print('list_vehicle_ids ', params)
message = {}
if params and params['email']:
message['email'] = params['email'],
my_vehicles = find_user_vehicles(params['email'])
else:
my_vehicles = find_all_vehicles()
if(my_vehicles == None):
return
vehicle_ids = []
for vehicle in my_vehicles:
vehicle_ids.append(vehicle)
message['response'] = {
"vehicle_ids": vehicle_ids
}
print('list_vehicle_ids result', message)
emit('list_vehicle_ids', message)
@socketio.on('connect')
def connect():
print('Client connected')
@socketio.on('disconnect')
def disconnect():
print('Client disconnected')
|
# This example checks the current criteria of the active LOD object 'op'.
# If it is "User LOD Level" the current level is set to the maximum level.
import c4d
# get current criteria from active LOD object
criteria = op[c4d.LOD_CRITERIA]
# check if User LOD Level
if criteria == c4d.LOD_CRITERIA_MANUAL:
# get maximum level
maxLevel = op.GetLevelCount() - 1
# set current level to max level
op[c4d.LOD_CURRENT_LEVEL] = maxLevel
c4d.EventAdd()
|
from django.conf import settings
from django.http import HttpResponse
from django.shortcuts import render
import os
from .models import *
def home(request, game_name=None):
filtered_game = None
context = {}
if settings.DEBUG:
context["debug"] = (
f'< DB_HOST="{settings.DATABASES["default"]["HOST"]}"'
f'| PROXY="{os.environ.get("USE_CLOUD_SQL_AUTH_PROXY", "False")}"'
f'| BUCKET="{settings.GS_BUCKET_NAME}" >'
)
context["game_name"] = game_name
context["games"] = Game.objects.all().order_by("name")
if game_name:
search = Game.objects.filter(name__iexact=game_name)
if len(search) == 1:
filtered_game = search.get()
else:
context["error"] = f"Invalid game: {game_name}"
return render(request, "index.html", context)
context["matches"] = Match.objects.filter(game=filtered_game).order_by(
"-datetime"
)
else:
context["matches"] = Match.objects.all().order_by("-datetime")
context["players"] = Player.objects.all().order_by("name")
context["filtered_game"] = filtered_game
winrates = []
for player in context["players"]:
if filtered_game:
rate = player.winning_matches.filter(game=filtered_game).count() / player.matches.filter(game=filtered_game).count()
else:
rate = player.winning_matches.count() / player.matches.count()
winrates.append({"player": player.name, "rate": f"{rate * 100}%" })
context["winrates"] = winrates
return render(request, "index.html", context)
|
import discord
import json
import os
import requests
from discord.ext import commands, tasks
from dotenv import load_dotenv
from itertools import cycle
from function import *
#地震
load_dotenv()
bot = commands.Bot(command_prefix = '-')
status = cycle(['請使用:-help 查看指令','Python好難QQ','努力學習Python中'])
bot.remove_command('help')
data = sets(
os.getenv("token"), APIToken=os.getenv("APIToken"))
def setup():
try:
open(data.checkFile)
except:
with open(data.checkFile, "w") as outfile:
json.dump({}, outfile, ensure_ascii=False, indent=4)
print("建立 check.json 完成")
@bot.event
async def on_ready():
print("-"*15)
print(bot.user.name)
print(bot.user.id)
print(bot.user)
print("-"*15)
change_status.start()
setup()
if data.APIToken:
earthquake.start()
print("地震報告啟動")
else:
print("請至 https://opendata.cwb.gov.tw/userLogin 獲取中央氣象局TOKEN並放置於 .env 檔案中")
@tasks.loop(seconds=10)
async def earthquake():
# 大型地震
API = f"https://opendata.cwb.gov.tw/api/v1/rest/datastore/E-A0015-001?Authorization={data.APIToken}&format=JSON&areaName="
# 小型地震
API2 = f"https://opendata.cwb.gov.tw/api/v1/rest/datastore/E-A0016-001?Authorization={data.APIToken}&format=JSON"
b = requests.get(API).json()
s = requests.get(API2).json()
_API = b["records"]["earthquake"][0]["earthquakeInfo"]["originTime"]
_API2 = s["records"]["earthquake"][0]["earthquakeInfo"]["originTime"]
async def goTo(how, now):
for ch in data.channels:
await sosIn(bot.get_channel(ch), ({API: b, API2: s}[how]), data)
with open(data.checkFile, 'w') as outfile:
json.dump(now, outfile, ensure_ascii=False, indent=4)
with open(data.checkFile, "r") as file:
file = json.load(file)
for i in [API, API2]:
if not file.get(i):
file[i] = ""
if file[API] != _API:
file[API] = _API
await goTo(API, file)
if file[API2] != _API2:
file[API2] = _API2
await goTo(API2, file)
#地震
#Command指令
@bot.command()
async def load(ctx, extension):
bot.load_extension(f'cogs.{extension}')
@bot.command()
async def unload(ctx, extension):
bot.unload_extension(f'cogs.{extension}')
@bot.command()
async def reload(ctx, extension):
bot.unload_extension(f'cogs.{extension}')
bot.load_extension(f'cogs.{extension}')
for filename in os.listdir('./cogs'):
if filename.endswith('.py'):
bot.load_extension(f'cogs.{filename[:-3]}')
@bot.command()
async def clear(ctx, num:int):
await ctx.channel.purge(limit = num+1)
await ctx.send('已清理 {} 則訊息'.format(num))
@bot.command(pass_context=True)
async def join(ctx):
channel = ctx.message.author.voice.voice_channel
await bot.join_voice_channel(channel)
@bot.command(pass_context=True)
async def leave(ctx):
server = ctx.message.server
voice_client = bot.voice.client_in(server)
await voice_client.disconnect()
#Event事件
@tasks.loop(seconds=5)
async def change_status():
await bot.change_presence(activity=discord.Game(next(status)))
@bot.event
async def on_command_error(ctx, error):
if isinstance(error, commands.CommandNotFound):
await ctx.send('請使用-help來查詢目前有的指令!')
bot.run(data.token)
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Miscellaneous utilities."""
import json
import math
def unique(array):
"""Return a sorted array of the unique elements in `array`.
No element may be a floating-point NaN. If your data set includes
NaNs, omit them before passing them here.
"""
for x in array:
assert not (isinstance(x, float) and math.isnan(x))
if len(array) < 2:
return array
array_sorted = sorted(array)
array_unique = [array_sorted[0]]
for x in array_sorted[1:]:
assert array_unique[-1] <= x
if array_unique[-1] != x:
array_unique.append(x)
return array_unique
def unique_indices(array):
"""Return an array of the indices of the unique elements in `array`.
No element may be a floating-point NaN. If your data set includes
NaNs, omit them before passing them here.
"""
for x in array:
assert not (isinstance(x, float) and math.isnan(x))
if len(array) == 0:
return []
if len(array) == 1:
return [0]
array_sorted = sorted((x, i) for i, x in enumerate(array))
array_unique = [array_sorted[0][1]]
for x, i in array_sorted[1:]:
assert array[array_unique[-1]] <= x
if array[array_unique[-1]] != x:
array_unique.append(i)
return sorted(array_unique)
def float_sum(iterable):
"""Return the sum of elements of `iterable` in floating-point.
This implementation uses Kahan-Babuška summation.
"""
s = 0.0
c = 0.0
for x in iterable:
xf = float(x)
s1 = s + xf
if abs(x) < abs(s):
c += ((s - s1) + xf)
else:
c += ((xf - s1) + s)
s = s1
return s + c
def casefold(string):
# XXX Not really right, but it'll do for now.
return string.upper().lower()
def cursor_row(cursor, nullok=None):
if nullok is None:
nullok = False
try:
row = cursor.next()
except StopIteration:
if nullok:
return None
raise ValueError('Empty cursor')
else:
try:
cursor.next()
except StopIteration:
pass
else:
raise ValueError('Multiple-result cursor')
return row
def cursor_value(cursor, nullok=None):
row = cursor_row(cursor, nullok)
if row is None:
assert nullok
return None
if len(row) != 1:
raise ValueError('Non-unit cursor')
return row[0]
def json_dumps(obj):
"""Return a JSON string of obj, compactly and deterministically."""
return json.dumps(obj, sort_keys=True)
def override(interface):
def wrap(method):
assert method.__name__ in dir(interface)
return method
return wrap
|
from django.apps import AppConfig
class FHIRAppConfig(AppConfig):
name = 'corehq.motech.fhir'
def ready(self):
from . import signals # noqa # pylint: disable=unused-import
from . import serializers # noqa # pylint: disable=unused-import
|
# this one is like your scripts with argv
def print_two(*args):
arg1, arg2 = args
print(f"arg1: {arg1}, arg2: {arg2}")
# ok, that *args is actually pointless, we can just do this
def print_two_again(arg1, arg2):
print(f"arg1: {arg1}, arg2: {arg2}")
# this just takes one arguement
def print_one(arg1):
print(f"agr1: {arg1}")
# this one takes no arguements
def print_none():
print("I got nothin'.")
print_two("CoRy", "Wyszynski")
print_two_again("CoRy", "Wyszynski")
print_one("First!")
print_none()
|
#!/usr/bin/env python3
'''
Developed with <3 by the Bishop Fox Continuous Attack Surface Testing (CAST) team.
https://www.bishopfox.com/continuous-attack-surface-testing/how-cast-works/
Author: @noperator
Purpose: Determine the software version of a remote PAN-OS target.
Notes: - Requires version-table.txt in the same directory.
- Usage of this tool for attacking targets without prior mutual
consent is illegal. It is the end user's responsibility to obey
all applicable local, state, and federal laws. Developers assume
no liability and are not responsible for any misuse or damage
caused by this program.
Usage: python3 panos-scanner.py [-h] [-v] [-s] -t TARGET
'''
from argparse import ArgumentParser
from datetime import datetime, timedelta
from requests import get
from requests.exceptions import HTTPError, ConnectTimeout, SSLError, ConnectionError, ReadTimeout
from sys import argv, stderr, exit
from urllib3 import disable_warnings
from urllib3.exceptions import InsecureRequestWarning
disable_warnings(InsecureRequestWarning)
verbose = False
def etag_to_datetime(etag):
epoch_hex = etag[-8:]
return datetime.fromtimestamp(
int(epoch_hex, 16)
).date()
def last_modified_to_datetime(last_modified):
return datetime.strptime(
last_modified[:-4],
'%a, %d %b %Y %X'
).date()
def get_resource(target, resource, date_headers, errors):
try:
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:54.0) Gecko/20100101 Firefox/54.0',
'Connection': 'close',
'Accept-Language': 'en-US,en;q=0.5',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Upgrade-Insecure-Requests': '1'
}
resp = get(
'%s/%s' % (target, resource),
headers=headers,
timeout=5,
verify=False
)
resp.raise_for_status()
if verbose:
print('[+]', resource, file=stderr)
return {h: resp.headers[h].strip('"') for h in date_headers
if h in resp.headers}
except (HTTPError, ReadTimeout) as e:
if verbose:
print('[-]', resource, '({})'.format(type(e).__name__), file=stderr)
return None
except errors as e:
raise e
def load_version_table(version_table):
with open(version_table, 'r') as f:
entries = [line.strip().split() for line in f.readlines()]
return {e[0]: datetime.strptime(' '.join(e[1:]), '%b %d %Y').date()
for e in entries}
def check_date(version_table, date):
matches = {}
for n in [0, 1, -1, 2, -2]:
nearby_date = date + timedelta(n)
versions = [version for version, date in version_table.items()
if date == nearby_date]
if n == 0:
key = 'exact'
else:
key = 'approximate'
if key not in matches:
matches[key] = {'date': nearby_date, 'versions': versions}
return matches
def get_matches(date_headers, resp_headers, version_table):
matches = {}
for header in date_headers.keys():
if header in resp_headers:
date = globals()[date_headers[header]](resp_headers[header])
date_matches = check_date(version_table, date)
for precision, match in date_matches.items():
if match['versions']:
if precision not in matches.keys():
matches[precision] = []
matches[precision].append(match)
if verbose:
print(
'[*]',
'%s ~ %s' % (date, match['date']) if date != match['date'] else date,
'=>',
','.join(match['versions']),
file=stderr
)
return matches
def main():
parser = ArgumentParser('Determine the software version of a remote PAN-OS target. Requires version-table.txt in the same directory.')
parser.add_argument('-v', dest='verbose', action='store_true', help='verbose output')
parser.add_argument('-s', dest='stop', action='store_true', help='stop after one exact match')
parser.add_argument('-c', dest='link_cve_url', action='store_true', help='link to PAN-OS CVE URL for discovered versions')
parser.add_argument('-t', dest='target', required=True, help='https://example.com')
args = parser.parse_args()
static_resources = [
'global-protect/login.esp',
'php/login.php',
'global-protect/portal/css/login.css',
'js/Pan.js',
'global-protect/portal/images/favicon.ico',
'login/images/favicon.ico',
'global-protect/portal/images/logo-pan-48525a.svg',
]
version_table = load_version_table('version-table.txt')
# The keys in "date_headers" represent HTTP response headers that we're
# looking for. Each of those headers maps to a function in this namespace
# that knows how to decode that header value into a datetime.
date_headers = {
'ETag': 'etag_to_datetime',
'Last-Modified': 'last_modified_to_datetime'
}
# A match is a dictionary containing a date/version pair. When populated,
# each precision key (i.e., "exact" and "approximate") in this
# "total_matches" data structure will map to a single list of possibly
# several match dictionaries.
total_matches = {
'exact': [],
'approximate': []
}
# These errors are indicative of target-level issues. Don't continue
# requesting other resources when encountering these; instead, bail.
target_errors = (ConnectTimeout, SSLError, ConnectionError)
if args.verbose:
print('[*]', args.target, file=stderr)
global verbose
verbose = True
# Check for the presence of each static resource.
for resource in static_resources:
try:
resp_headers = get_resource(
args.target,
resource,
date_headers.keys(),
target_errors,
)
except target_errors as e:
print(type(e).__name__, file=stderr)
exit(1)
if resp_headers == None:
continue
# Convert date-related HTTP headers to a standardized format, and
# store any matching version strings.
total_matches.update(get_matches(date_headers, resp_headers, version_table))
if args.stop and len(total_matches['exact']):
break
# Print results.
if not len(sum(total_matches.values(), [])):
print('no matches found')
else:
printed = []
for precision, matches in total_matches.items():
for match in matches:
if match['versions'] and match not in printed:
printed.append(match)
if args.link_cve_url:
cve_url = 'https://security.paloaltonetworks.com/?product=PAN-OS&version=PAN-OS+'
for version in match['versions']:
major, minor = version.split('.')[:2]
print('[*]', 'CVEs for PAN-OS v{}.{}:\n[*] {}{}.{}'.format(major, minor, cve_url, major, minor))
print(','.join(match['versions']), match['date'], '(%s)' % precision)
if __name__ == '__main__':
main()
|
#!/usr/bin/python
# DQN implementation of https://github.com/matthiasplappert/keras-rl for Keras
# was used with epsilon-greedy per-episode decay policy.
import numpy as np
import gym
from gym import wrappers
from tfinterface.utils import get_run
from tfinterface.reinforcement import DQN, ExpandedStateEnv
import random
import tensorflow as tf
ENV_NAME = 'LunarLander-v2'
run = get_run()
#env
env = gym.make(ENV_NAME)
env = wrappers.Monitor(env, "monitor/{run}".format(run = run), video_callable=lambda step: step % 50 == 0)
env = ExpandedStateEnv(env, 3)
# To get repeatable results.
sd = 16
np.random.seed(sd)
random.seed(sd)
env.seed(sd)
#parameters
state_temporal_augmentation = 3
nb_actions = env.action_space.n
nb_states = env.observation_space.shape[0] * state_temporal_augmentation
class Network(object):
def __init__(self, inputs, nb_actions):
ops = dict(
kernel_initializer=tf.random_uniform_initializer(minval=0.0, maxval=0.01),
bias_initializer=tf.random_uniform_initializer(minval=0.0, maxval=0.01)
)
net = inputs.s
net = tf.layers.dense(net, 64, activation=tf.nn.elu, name="elu_layer")
net = tf.nn.dropout(net, inputs.keep_prob)
self.Qs = tf.layers.dense(net, nb_actions)
with tf.device("cpu:0"):
dqn = DQN(
lambda inputs: Network(inputs, nb_actions),
nb_states,
seed = sd,
eps = 0.1,
target_update = 0.001
)
dqn.fit(env)
tf.train.exponential_decay
|
import numpy as np
print("Hoi","lekkkkahh")
def surface_area(r):
return np.pi*r**2
def circumference(r):
return 2*np.pi*r
#print("Circumference of circle with radius 1: ", circumference(1))
#print("Surface area of circle with radius 1: ", surface_area(1))
|
import numpy as np
import pylab
import mahotas as mh
import search
import image_split as split
import UI
import time
"""
f = open('sample.txt','w')
for i in range(len(label)):
f.write(' '.join(str(x) for x in label[i])+'\n')
pylab.show()
"""
count = 0
while count <6:
count+=1
RECT =UI.getOrigin()
pic = UI.getPic(RECT)
#pic.save("screenshot.png")
#image = mh.imread('screenshot.png')
image = np.array(pic)
#mg stands for "matching game"
position,info = split.locate(image)
print position
print info
label = split.split(image,position,info)
[click,board,success] = search.solve(label)
num_click = 0
for (x,y) in click:
UI.click((RECT[0] + position[2]+(2*y-1)*info[1]/2,RECT[1] + position[0]+(2*x-1)*info[0]/2))
num_click+=1
time.sleep(0.05)
if num_click%2==0:
time.sleep(0.2)
if success:
break
for i in range(3):
time.sleep(0.5)
UI.click((RECT[0] + (position[2]+position[3])/2,RECT[1] + (position[0]+position[1]-info[0])/2))
|
from .SdsBoundaryType import SdsBoundaryType
from .SdsNamespace import SdsNamespace
from .SdsSearchMode import SdsSearchMode
from .SdsStream import SdsStream
from .SdsExtrapolationMode import SdsExtrapolationMode
from .SdsInterpolationMode import SdsInterpolationMode
from .SdsStreamIndex import SdsStreamIndex
from .SdsStreamMode import SdsStreamMode
from .SdsStreamPropertyOverride import SdsStreamPropertyOverride
from .SdsStreamView import SdsStreamView
from .SdsStreamViewMap import SdsStreamViewMap
from .SdsStreamViewProperty import SdsStreamViewProperty
from .SdsType import SdsType
from .SdsTypeCode import SdsTypeCode
from .SdsTypeProperty import SdsTypeProperty
|
from sys import stdin
def island(m):
cont = 0
for i in range(len(m)):
for j in range(len(m[0])):
if m[i][j] == 1:
cont += 4
if j > 0 and m[i][j - 1]:
cont -= 2
if i > 0 and m[i - 1][j]:
cont -= 2
return cont
if __name__ == '__main__':
isl = []
while True:
a = [int(c) for c in stdin.readline().strip().split()]
if not a:
break
isl.append(a)
print(island(isl))
|
""" Testing package info
"""
import nibabel as nib
def test_pkg_info():
"""Simple smoke test
Hits:
- nibabel.get_info
- nibabel.pkg_info.get_pkg_info
- nibabel.pkg_info.pkg_commit_hash
"""
info = nib.get_info()
|
from django.db import models
class KeyValueManager(models.Manager):
def create_batch(self, **keys):
return [
self.create(key=key, value=value)
for key, value in keys.items()
]
class KeyValue(models.Model):
key = models.CharField(max_length=32, unique=True)
value = models.CharField(max_length=32)
objects = KeyValueManager()
|
import numpy as np
class PointFeatureEncoder(object):
def __init__(self, config, point_cloud_range=None):
super().__init__()
self.point_encoding_config = config
assert list(self.point_encoding_config.src_feature_list[0:3]) == ['x', 'y', 'z']
self.used_feature_list = self.point_encoding_config.used_feature_list
self.src_feature_list = self.point_encoding_config.src_feature_list
self.point_cloud_range = point_cloud_range
@property
def num_point_features(self):
return getattr(self, self.point_encoding_config.encoding_type)(points=None)
def forward(self, data_dict):
"""
Args:
data_dict:
points: (N, 3 + C_in)
...
Returns:
data_dict:
points: (N, 3 + C_out),
use_lead_xyz: whether to use xyz as point-wise features
...
"""
# data_dict['points'], use_lead_xyz = getattr(self, self.point_encoding_config.encoding_type)(
# data_dict['points']
# )
if self.point_encoding_config.encoding_type == "absolute_coordinates_encoding":
data_dict['use_lead_xyz'] = True
else:
data_dict['use_lead_xyz'] = False
# print("points_intensity", np.min(data_dict['points'][:,3]), np.max(data_dict['points'][:,3]))
return data_dict
def absolute_coordinates_encoding(self, points=None):
if points is None:
num_output_features = len(self.used_feature_list)
return num_output_features
point_feature_list = [points[:, 0:3]]
for x in self.used_feature_list:
if x in ['x', 'y', 'z']:
continue
idx = self.src_feature_list.index(x)
point_feature_list.append(points[:, idx:idx+1])
function = self.point_encoding_config.get("function", None)
if function is not None:
pos = int(function[0]) - 2
if function[1] == "tanh":
point_feature_list[pos] = np.tanh(point_feature_list[pos])
elif function[1].startswith("clip"):
min, max = function[1][5:].split("-")
min, max = float(min), float(max)
point_feature_list[pos] = np.tanh(np.clip(point_feature_list[pos], min, max))
point_features = np.concatenate(point_feature_list, axis=1)
return point_features, True
|
import h5py
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import argparse
import os
parser = argparse.ArgumentParser(description='Plot matching evaluation results')
parser.add_argument('results_path', help='matching results file, source bag file, or working directory')
args = parser.parse_args()
motion_map = {'yaw': 'Yaw', 'strafe_side': 'Strafe', 'strafe_back': 'Strafe'}
style_map = {'yaw': False, 'strafe_side': 'Sideways', 'strafe_back': 'Backwards'}
unit_map = {'yaw': 'degrees', 'strafe_side': 'meters', 'strafe_back': 'meters'}
scale_map = {'yaw': 1, 'strafe_side': 0.2, 'strafe_back': 0.5}
fovs = []
for yaml in os.listdir(args.results_path):
if not os.path.isdir(os.path.join(args.results_path, yaml)) and yaml.endswith('.yaml'):
fov = os.path.splitext(os.path.basename(yaml))[0]
fovs.append(fov)
fovs.sort(key=int)
rep = dict()
framediff = dict()
detdesclist = []
for motion in os.listdir(args.results_path):
if os.path.isdir(os.path.join(args.results_path, motion)):
if motion not in motion_map:
continue
bag_dir = os.path.join(args.results_path, motion)
for fovstr in fovs:
for filename in os.listdir(bag_dir):
if filename.split('.')[1] == fovstr and filename.endswith('.matching.hdf5') and "+LR" not in filename:
results_file = os.path.join(bag_dir, filename)
with h5py.File(results_file, 'r') as f:
attrs = dict(f['attributes'].attrs.items())
detdesc = (attrs['detector_type'], int(fovstr), motion)
detdesclist.append(detdesc)
stats = f['match_stats'][:]
df = pd.DataFrame(stats)
statsavg = df.groupby(0).mean().to_records()
statsavg = statsavg.view(np.float64).reshape(len(statsavg), -1)
framediff[detdesc], _, _, _, rep[detdesc] = statsavg.T
if len(detdesclist) > 0:
sns.set()
detdesclist = sorted(list(set(detdesclist)))
df = pd.DataFrame()
for detdesc in detdesclist:
for i in range(int(framediff[detdesc].max())):
if i not in framediff[detdesc] and detdesc[2] == 'yaw' and i > int(detdesc[1]) and i < 360 - int(detdesc[1]) and int(detdesc[1]) < 180:
df = df.append(pd.DataFrame({'Detector': detdesc[0], 'FOV': int(detdesc[1]), 'Repeatability': [0], 'Baseline ({})'.format(unit_map[detdesc[2]]): i * scale_map[detdesc[2]], 'Motion': motion_map[detdesc[2]], 'Direction': style_map[detdesc[2]]}))
continue
if style_map[detdesc[2]] is not False:
df = df.append(pd.DataFrame({'Detector': detdesc[0], 'FOV': int(detdesc[1]), 'Repeatability': rep[detdesc], 'Baseline ({})'.format(unit_map[detdesc[2]]): framediff[detdesc] * scale_map[detdesc[2]], 'Motion': motion_map[detdesc[2]], 'Direction': style_map[detdesc[2]]}))
else:
df = df.append(pd.DataFrame({'Detector': detdesc[0], 'FOV': int(detdesc[1]), 'Repeatability': rep[detdesc], 'Baseline ({})'.format(unit_map[detdesc[2]]): framediff[detdesc] * scale_map[detdesc[2]], 'Motion': motion_map[detdesc[2]]}))
for motion, _ in motion_map.iteritems():
if style_map[motion] is not False:
g = sns.relplot(y='Repeatability', x='Baseline ({})'.format(unit_map[motion]), hue='FOV', row='Detector', kind='line', data=df.loc[df['Motion'] == motion_map[motion]], estimator=None, legend='full', palette=sns.color_palette('muted', n_colors=df.FOV.unique().shape[0]), aspect=3, height=1.8, style='Direction')
g.fig.subplots_adjust(hspace=0.25, right=0.77)
else:
g = sns.relplot(y='Repeatability', x='Baseline ({})'.format(unit_map[motion]), hue='FOV', row='Detector', kind='line', data=df.loc[df['Motion'] == motion_map[motion]], estimator=None, legend='full', palette=sns.color_palette('muted', n_colors=df.FOV.unique().shape[0]), aspect=3, height=1.8)
g.fig.subplots_adjust(hspace=0.25, right=0.84)
[plt.setp(ax.texts, text="") for ax in g.axes.flat]
g.set_titles(row_template='{row_name}', col_template='FOV={col_name}')
g.savefig('repeatability_{}.png'.format(motion))
plt.show()
|
# Copyright (c) 2019 The Regents of the University of California.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from testlib import *
# Note: This isn't going to work because MSI caches won't be built. Need to
# think more about this. Maybe we should have another parameter to
# gem5_verify_config...
config_path = joinpath(config.base_dir, 'configs', 'learning_gem5', 'part3')
ref_path = joinpath(getcwd(), 'ref')
gem5_verify_config(
name='simple_ruby_test',
verifiers = (verifier.MatchStdoutNoPerf(joinpath(ref_path, 'threads')),),
config=joinpath(config_path, 'simple_ruby.py'),
config_args = [],
protocol = 'MSI',
valid_isas=("X86",), # Currently only x86 has the threads test
valid_hosts=constants.target_host["X86"], # dynamically linked
)
gem5_verify_config(
name='ruby_test_test',
verifiers = (verifier.MatchStdout(joinpath(ref_path, 'test')),),
config=joinpath(config_path, 'ruby_test.py'),
config_args = [],
protocol = 'MSI',
valid_isas=("X86",), # Currently only x86 has the threads test
)
|
def foo(a: float, b: float, c: float, d: float, cache=set()):
cache.update([a, b, c, d])
return sum([a, b, c, d])/4, max(cache)
|
"""EnvSpec class."""
from garage import InOutSpec
class EnvSpec(InOutSpec):
"""Describes the action and observation spaces of an environment.
Args:
observation_space (akro.Space): The observation space of the env.
action_space (akro.Space): The action space of the env.
"""
def __init__(self, observation_space, action_space):
super().__init__(action_space, observation_space)
@property
def action_space(self):
"""Get action space.
Returns:
akro.Space: Action space of the env.
"""
return self.input_space
@property
def observation_space(self):
"""Get observation space of the env.
Returns:
akro.Space: Observation space.
"""
return self.output_space
@action_space.setter
def action_space(self, action_space):
"""Set action space of the env.
Args:
action_space (akro.Space): Action space.
"""
self._input_space = action_space
@observation_space.setter
def observation_space(self, observation_space):
"""Set observation space of the env.
Args:
observation_space (akro.Space): Observation space.
"""
self._output_space = observation_space
def __eq__(self, other):
"""See :meth:`object.__eq__`.
Args:
other (EnvSpec): :class:`~EnvSpec` to compare with.
Returns:
bool: Whether these :class:`~EnvSpec` instances are equal.
"""
return (self.observation_space == other.observation_space
and self.action_space == other.action_space)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.