content
stringlengths 5
1.05M
|
|---|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for rich-text components."""
__author__ = 'Sean Lip'
import os
from extensions import domain
import feconf
import utils
class BaseRichTextComponent(object):
"""Base class for definitions of rich-text components.
This class is not meant to be user-editable. The only methods on it should
be get()-type methods.
"""
# The human-readable name of the rich-text component. Overridden in
# subclasses.
name = ''
# The category the rich-text component falls under in the repository.
# Overridden in subclasses.
category = ''
# A description of the rich-text component. Overridden in subclasses.
description = ''
# The HTML tag name for the component. Overridden in subclasses.
frontend_name = ''
# The tooltip for the icon in the rich-text editor. Overridden in
# subclasses.
tooltip = ''
# Customization arg specifications for the component, including their
# descriptions, schemas and default values. Overridden in subclasses.
_customization_arg_specs = []
# The icon to show in the rich-text editor. This is a representation of the
# .png file in this rich-text component folder, generated with the
# utils.convert_png_to_data_url() function. Overridden in subclasses.
icon_data_url = ''
@property
def id(self):
return self.__class__.__name__
@property
def customization_arg_specs(self):
return [
domain.CustomizationArgSpec(**cas)
for cas in self._customization_arg_specs]
@property
def html_body(self):
"""The HTML code containing directives and templates for the component.
This contains everything needed to display the component once the
necessary attributes are supplied. For rich-text components, this
consists of a single directive/template pair.
"""
js_directives = utils.get_file_contents(os.path.join(
feconf.RTE_EXTENSIONS_DIR, self.id, '%s.js' % self.id))
html_templates = utils.get_file_contents(os.path.join(
feconf.RTE_EXTENSIONS_DIR, self.id, '%s.html' % self.id))
return '<script>%s</script>\n%s' % (js_directives, html_templates)
def to_dict(self):
"""Gets a dict representing this component. Only the default values for
customization args are provided.
"""
return {
'backend_name': self.name,
'customization_arg_specs': [{
'name': ca_spec.name,
'description': ca_spec.description,
'default_value': ca_spec.default_value,
'schema': ca_spec.schema,
} for ca_spec in self.customization_arg_specs],
'frontend_name': self.frontend_name,
'icon_data_url': self.icon_data_url,
'tooltip': self.tooltip,
}
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the tests for the code-blocks in the skill-guide.md file."""
import filecmp
import json
import logging
import os
import shutil
import signal
import subprocess # nosec
import sys
import tempfile
import time
from pathlib import Path
import jsonschema
from jsonschema import Draft4Validator
import pytest
from aea import AEA_DIR
from aea.cli import cli
from aea.configurations.base import DEFAULT_VERSION
from ..helper import extract_code_blocks
from ...common.click_testing import CliRunner
from ...conftest import (
AUTHOR,
CLI_LOG_OPTION,
CONFIGURATION_SCHEMA_DIR,
ROOT_DIR,
SKILL_CONFIGURATION_SCHEMA,
)
MD_FILE = "docs/skill-guide.md"
logger = logging.getLogger(__name__)
class TestBuildSkill:
"""This class contains the tests for the code-blocks in the skill-guide.md file."""
@pytest.fixture(autouse=True)
def _start_oef_node(self, network_node):
"""Start an oef node."""
@classmethod
def setup_class(cls):
"""Setup the test class."""
cls.path = os.path.join(ROOT_DIR, MD_FILE)
cls.code_blocks = extract_code_blocks(filepath=cls.path, filter="python")
cls.runner = CliRunner()
cls.agent_name = "myagent"
cls.resource_name = "my_search"
cls.skill_id = AUTHOR + "/" + cls.resource_name + ":" + DEFAULT_VERSION
cls.cwd = os.getcwd()
cls.t = tempfile.mkdtemp()
# add packages folder
packages_src = os.path.join(cls.cwd, "packages")
packages_dst = os.path.join(cls.t, "packages")
shutil.copytree(packages_src, packages_dst)
cls.schema = json.load(open(SKILL_CONFIGURATION_SCHEMA))
cls.resolver = jsonschema.RefResolver(
"file://{}/".format(Path(CONFIGURATION_SCHEMA_DIR).absolute()), cls.schema
)
cls.validator = Draft4Validator(cls.schema, resolver=cls.resolver)
os.chdir(cls.t)
cls.init_result = cls.runner.invoke(
cli,
[*CLI_LOG_OPTION, "init", "--local", "--author", AUTHOR],
standalone_mode=False,
)
cls.fetch_result = cls.runner.invoke(
cli,
[
*CLI_LOG_OPTION,
"fetch",
"--local",
"fetchai/simple_service_registration:0.1.0",
],
standalone_mode=False,
)
cls.create_result = cls.runner.invoke(
cli,
[*CLI_LOG_OPTION, "create", "--local", cls.agent_name],
standalone_mode=False,
)
if cls.create_result.exit_code == 0:
os.chdir(Path(cls.t, cls.agent_name))
# scaffold skill
cls.result = cls.runner.invoke(
cli,
[*CLI_LOG_OPTION, "scaffold", "skill", cls.resource_name],
standalone_mode=False,
)
# add oef connection
cls.result = cls.runner.invoke(
cli,
[*CLI_LOG_OPTION, "add", "--local", "connection", "fetchai/oef:0.1.0"],
standalone_mode=False,
)
def test_agent_is_fetched(self):
"""Test that the setup was successful."""
assert self.fetch_result.exit_code == 0, "Agent not fetched, setup incomplete"
def test_agent_is_created(self):
"""Test that the setup was successful."""
assert self.create_result.exit_code == 0, "Agent not created, setup incomplete"
def test_read_md_file(self):
"""Teat that the md file is not empty."""
assert self.code_blocks != [], "File must not be empty."
def test_update_skill_and_run(self, pytestconfig):
"""Test that the resource folder contains scaffold handlers.py module."""
if pytestconfig.getoption("ci"):
pytest.skip("Skipping the test since it doesn't work in CI.")
# add packages folder
packages_src = os.path.join(self.cwd, "packages")
packages_dst = os.path.join(os.getcwd(), "packages")
shutil.copytree(packages_src, packages_dst)
path = Path(
self.t, self.agent_name, "skills", self.resource_name, "behaviours.py"
)
original = Path(AEA_DIR, "skills", "scaffold", "behaviours.py")
assert filecmp.cmp(path, original)
with open(path, "w") as file:
file.write(self.code_blocks[0])
path = Path(
self.t, self.agent_name, "skills", self.resource_name, "handlers.py"
)
original = Path(AEA_DIR, "skills", "scaffold", "handlers.py")
assert filecmp.cmp(path, original)
with open(path, "w") as file:
file.write(self.code_blocks[1])
path = Path(
self.t, self.agent_name, "skills", self.resource_name, "my_model.py"
)
os.remove(path)
# Update the yaml file.
path = Path(self.t, self.agent_name, "skills", self.resource_name, "skill.yaml")
yaml_code_block = extract_code_blocks(self.path, filter="yaml")
with open(path, "w") as file:
file.write(yaml_code_block[0])
# update fingerprint
result = self.runner.invoke(
cli,
[*CLI_LOG_OPTION, "fingerprint", "skill", self.skill_id],
standalone_mode=False,
)
assert result.exit_code == 0, "Fingerprinting not successful"
os.chdir(Path(self.t, "simple_service_registration"))
try:
# run service agent
process_one = subprocess.Popen( # nosec
[
sys.executable,
"-m",
"aea.cli",
"run",
"--connections",
"fetchai/oef:0.1.0",
],
stdout=subprocess.PIPE,
env=os.environ.copy(),
)
# run the agent
os.chdir(Path(self.t, self.agent_name))
process_two = subprocess.Popen( # nosec
[
sys.executable,
"-m",
"aea.cli",
"run",
"--connections",
"fetchai/oef:0.1.0",
],
stdout=subprocess.PIPE,
env=os.environ.copy(),
)
time.sleep(7.0)
process_one.send_signal(signal.SIGINT)
process_two.send_signal(signal.SIGINT)
process_one.wait(timeout=5)
process_two.wait(timeout=5)
assert process_one.returncode == 0
assert process_two.returncode == 0
finally:
poll_one = process_one.poll()
if poll_one is None:
process_one.terminate()
process_one.wait(2)
poll_two = process_two.poll()
if poll_two is None:
process_two.terminate()
process_two.wait(2)
os.chdir(self.t)
result = self.runner.invoke(
cli, [*CLI_LOG_OPTION, "delete", self.agent_name], standalone_mode=False
)
assert result.exit_code == 0
result = self.runner.invoke(
cli,
[*CLI_LOG_OPTION, "delete", "simple_service_registration"],
standalone_mode=False,
)
assert result.exit_code == 0
@classmethod
def teardown_class(cls):
"""Teardowm the test."""
os.chdir(cls.cwd)
try:
shutil.rmtree(cls.t)
except (OSError, IOError):
pass
|
import os
tg_token = os.environ['TG_TOKEN']
lastfm_api_key = os.environ['LASTFM_API_KEY']
lastfm_secret_key = os.environ['LASTFM_SECRET_KEY']
db = {"user": os.environ['DB_USER'],
"password": os.environ['DB_PASS'],
"address": os.environ['DB_ADDRESS'],
"port": os.environ['DB_PORT'],
"db_name": os.environ['DB_NAME']}
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_bcrypt import Bcrypt
from flask_login import LoginManager
# from flask_bootstrap import Bootstrap
from config import app_config
db = SQLAlchemy()
migrate = Migrate()
bcrypt = Bcrypt()
login_manager = LoginManager()
def create_app(config_name):
app = Flask(__name__, instance_relative_config=True)
app.config.from_object(app_config[config_name])
app.config.from_pyfile('config.py')
# Bootstrap(app)
db.init_app(app)
bcrypt.init_app(app)
login_manager.init_app(app)
login_manager.login_message = "You must be logged in to access this page"
login_manager.login_view = "auth.login"
with app.app_context():
if db.engine.url.drivername == 'sqlite':
migrate.init_app(app, db, compare_type=True, render_as_batch=True)
else:
migrate.init_app(app, db)
from tt_scheduler import models
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint)
from .admin import admin as admin_blueprint
app.register_blueprint(admin_blueprint, url_prefix='/admin')
from .user import user as user_blueprint
app.register_blueprint(user_blueprint)
return app
|
import os
def options():
ops = ["op1: Normal", "op2: Repair"]
showops = input(ops)
if showops == "op1":
os.system("python3 Op1.py")
elif showops == "op2":
os.system("python3 Op2.py")
else:
print("Oops! Try Again")
options()
|
"""A collection of examples for CARLAEnvironment"""
import pygame
from tensorforce import Agent
from tensorforce.environments import CARLAEnvironment
def training_example(num_episodes: int, max_episode_timesteps: int):
# Instantiate the environment (run the CARLA simulator before doing this!)
env = CARLAEnvironment(debug=True)
# Create your own agent (here is just an example)
agent = Agent.create(agent='ppo',
environment=env,
max_episode_timesteps=max_episode_timesteps,
batch_size=1)
# Training loop (you couldn't use a Runner instead)
# `weights_dir` and `record_dir` are `None` to prevent saving and recording
env.train(agent=agent,
num_episodes=num_episodes, max_episode_timesteps=max_episode_timesteps,
weights_dir=None, record_dir=None)
pygame.quit()
def custom_env_example(num_episodes: int, max_episode_timesteps: int):
# import some libs
import carla
import numpy as np
from tensorforce.environments.carla_environment import CARLAEnvironment, SensorSpecs, env_utils
# Subclass `CARLAEnvironment` to customize it:
class MyCARLAEnvironment(CARLAEnvironment):
# Change actions space: (throttle, steer, brake, reverse)
ACTIONS_SPEC = dict(type='float', shape=(4,), min_value=-1.0, max_value=1.0)
DEFAULT_ACTIONS = np.array([0.0, 0.0, 0.0, 0.0])
# Define your own mapping: actions -> carla.VehicleControl
def actions_to_control(self, actions):
self.control.throttle = float((actions[0] + 1) / 2.0)
self.control.steer = float(actions[1])
self.control.brake = float((actions[2] + 1) / 2.0)
self.control.reverse = bool(actions[3] > 0)
self.control.hand_brake = False
# Define which sensors to use:
def default_sensors(self) -> dict:
sensors = super().default_sensors()
# Substitute the default rgb camera with a semantic segmentation camera
sensors['camera'] = SensorSpecs.segmentation_camera(position='front', attachment_type='Rigid',
image_size_x=self.window_size[0],
image_size_y=self.window_size[1],
sensor_tick=self.tick_time)
# Add a radar sensor
sensors['radar'] = SensorSpecs.radar(position='radar', sensor_tick=self.tick_time)
return sensors
# Define a default agent (only used if env.train(agent=None, ...))
def default_agent(self, **kwargs) -> Agent:
return Agent.create(agent='ppo',
environment=self,
max_episode_timesteps=kwargs.get('max_episode_timesteps'),
batch_size=1)
# Define your own reward function:
def reward(self, actions, time_cost=-2.0):
speed = env_utils.speed(self.vehicle)
speed_limit = self.vehicle.get_speed_limit()
if speed <= speed_limit:
speed_penalty = -1.0 if speed < speed_limit / 2 else 0.0
else:
speed_penalty = speed_limit - speed
return time_cost - self.collision_penalty * 2.0 + speed_penalty
def render(self, sensors_data: dict):
super().render(sensors_data)
env_utils.draw_radar_measurement(debug_helper=self.world.debug, data=sensors_data['radar'])
# Training:
env = MyCARLAEnvironment(debug=True)
env.train(agent=None, # pass None to use the default_agent
num_episodes=num_episodes, max_episode_timesteps=max_episode_timesteps,
weights_dir=None, record_dir=None)
pygame.quit()
|
#!/usr/bin/python
# pickle,使用该模块你可以将任意对象存储在文件中,
# 之后又可以将其完整的取出来。
# 这被称为持久地存储对象
import pickle
shoplistfile = 'shoplist.data'
shoplist = ['apple', 'mango', 'carrot']
print(shoplist)
# write to the file
f = open(shoplistfile, 'wb')
pickle.dump(shoplist, f) # dump the object to a file
f.close()
del shoplist # detroy the shoplist variable
# print(shoplist)
# Read back from the storage
f = open(shoplistfile, 'rb')
storedlist = pickle.load(f)
print(storedlist)
|
# Licensed under an MIT open source license - see LICENSE
import os
from astropy.table import Table
import numpy as np
import matplotlib.pyplot as p
import sys
import matplotlib.cm as cm
from matplotlib import rc
# rc("font", **{"family": "sans-serif", "size": 16})
# rc("text", usetex=True)
from scipy.stats import scoreatpercentile
import itertools
widths = {}
lengths = {}
curvature = {}
amplitude = {}
orientation = {}
background = {}
median_bright = {}
branches = {}
# Setup to work on noise. If using from repo, use ["."]
# folders = ["."]
folders = [f for f in os.listdir(".") if os.path.isdir(f) and f[-3:] == '350']
# Proper names to show in plots
labels = {"pipeCenterB59-350": "Pipe",
"polaris-350": "Polaris",
"ic5146-350": "IC-5146",
"orionA-S-350": "Orion-A South",
"lupusI-350": "Lupus",
"orionB-350": "Orion-B",
"taurusN3-350": "Taurus",
"aquilaM2-350": "Aquila",
"orionA-C-350": "Orion-A Center",
"perseus04-350": "Perseus",
"california_cntr-350": "California Center",
"california_east-350": "California East",
"california_west-350": "California West",
"chamaeleonI-350": "Chamaeleon"}
offsets = {"pipeCenterB59-350": 31.697,
"polaris-350": 9.330,
"ic5146-350": 20.728,
"orionA-S-350": 35.219,
"lupusI-350": 14.437,
"orionB-350": 26.216,
"taurusN3-350": 21.273,
"aquilaM2-350": 85.452,
"orionA-C-350": 32.616,
"perseus04-350": 23.698,
"california_cntr-350": 9.005,
"california_east-350": 10.124,
"california_west-350": 14.678,
"chamaeleonI-350": -879.063}
for folder in folders:
# csv = [f for f in os.listdir(folder) if f[-3:] == "csv" and not "rht" in f
# and not "deg" in f]
csv = [f for f in os.listdir(folder) if f[-4:] == "fits" and 'table' in f]
if csv == []:
print "No csv file in %s" % (folder)
else:
for fil in csv:
data = Table.read(folder + "/" + fil)
name = fil[:-11]
widths[name] = data["FWHM"][np.isfinite(data["FWHM"])]
amplitude[name] = data["Amplitude"][np.isfinite(data["Amplitude"])]
lengths[name] = data["Lengths"][np.isfinite(data["FWHM"])]
curvature[name] = data["Curvature"][np.isfinite(data["FWHM"])]
orientation[name] = data["Orientation"][np.isfinite(data["FWHM"])]
background[name] = data["Background"][np.isfinite(data["FWHM"])]
median_bright[name] = data["Median Brightness"][
np.isfinite(data["FWHM"])]
# branches[name] = data['Branch Length']
# Make scatter plots
scatter = sys.argv[1]
if scatter == "T":
scatter = True
else:
scatter = False
# Make triangle plot
triangle_plot = sys.argv[2]
if triangle_plot == "T":
triangle_plot = True
else:
triangle_plot = False
# Create KS Test tables
ks_tests = sys.argv[3]
if ks_tests == "T":
ks_tests = True
else:
ks_tests = False
# Compute the covering fraction
covering_frac = sys.argv[4]
if covering_frac == "T":
covering_frac = True
else:
covering_frac = False
# Examine branch lengths
bran_len = sys.argv[5]
if bran_len == "T":
bran_len = True
else:
bran_len = False
# Return numbers of unresolved widths and non param usage
width_stats = sys.argv[6]
if width_stats == "T":
width_stats = True
else:
width_stats = False
# Scatter plots
if scatter:
# print labels
symb_col = ["bD", "gD", "rD", "kD", "b^", "g^", "r^",
"k^", "bo", "go", "ro", "ko", "bv", "gh", "rh", "kh"]
# for i, key in enumerate(widths.keys()):
# p.plot(np.log10(widths[key][(widths[key] > 0.0)]), np.log10(amplitude[key][(widths[key] > 0.0)]), symb_col[i], label=labels[key],
# markersize=6)
# p.legend()
# p.grid(True)
# p.xlabel("Widths (pc)")
# p.ylabel("Surface Brightness (MJy/sr)")
# # p.xlim([0.0, 1.0])
# # p.ylim([0.0, 2500.])
# p.show()
# p.clf()
# Amplitude vs. Length
lenmed = []
lenstd = []
ampmed = []
ampstd = []
fig = p.figure() # figsize=(12, 9), dpi=100)
for i, key in enumerate(np.sort(widths.keys())):
loglength = np.log10(lengths[key])
lenmed = scoreatpercentile(loglength, 50)
len75 = scoreatpercentile(loglength, 75)
len25 = scoreatpercentile(loglength, 25)
print labels[key]+": "+str([len25, lenmed, len75])
logamp = np.log10(amplitude[key] - background[key])
ampmed = scoreatpercentile(logamp, 50)
amp75 = scoreatpercentile(logamp, 75)
amp25 = scoreatpercentile(logamp, 25)
p.errorbar(lenmed, ampmed, fmt=symb_col[i], xerr=[[np.abs(lenmed - len25)], [np.abs(lenmed + len75)]],
yerr=[[np.abs(ampmed - amp25)], [np.abs(ampmed - amp75)]], label=labels[key], markersize=10,
alpha=0.6)
p.xlabel("log$_{10}($L/ pc)", fontsize=18)
p.ylabel("log$_{10}(I$/ MJy/sr)", fontsize=18)
p.grid(True)
p.legend(loc="lower right", ncol=2, prop={"size": 12}, markerscale=0.75,
numpoints=1)
p.ylim([-0.3, 1.55])
p.xlim([0.1, 1.7])
# fig.savefig("length_vs_amp_centroids.eps", format="eps", dpi=1000)
# p.clf()
p.tight_layout()
p.show()
# Length vs Width
lenmed = []
lenstd = []
widthmed = []
widthstd = []
fig = p.figure() # figsize=(12, 9), dpi=100)
for i, key in enumerate(widths.keys()):
loglength = np.log10(lengths[key][(widths[key] > 0.0)])
lenmed = scoreatpercentile(loglength, 50)
len75 = scoreatpercentile(loglength, 75)
len25 = scoreatpercentile(loglength, 25)
# print labels[i]+": "+str([len25, lenmed, len75])
logwidth = np.log10(widths[key][(widths[key] > 0.0)])
widthmed = scoreatpercentile(logwidth, 50)
width75 = scoreatpercentile(logwidth, 75)
width25 = scoreatpercentile(logwidth, 25)
p.errorbar(lenmed, widthmed, fmt=symb_col[i], xerr=[[np.abs(lenmed - len25)], [np.abs(lenmed + len75)]],
yerr=[
[np.abs(widthmed - width25)], [np.abs(widthmed - width75)]],
label=labels[key], markersize=10, alpha=0.6)
p.xlabel("log$_{10}($L/ pc)", fontsize=18)
p.ylabel("log$_{10}(W$/ pc)", fontsize=18)
p.grid(True)
p.legend(loc="lower right", ncol=2, prop={"size": 12}, markerscale=0.75)
# p.ylim([-1.0, -0.05])
# p.xlim([0.18, 1.6])
# fig.savefig("length_vs_width_centroids.eps", format="eps", dpi=1000)
# p.clf()
p.show()
# Triangle plot
if triangle_plot:
import triangle
for i, key in enumerate(widths.keys()):
if i == 0:
data = np.asarray([np.log10(widths[key][(widths[key] > 0.0)]), np.log10((amplitude[key]-background[key])[(widths[key] > 0.0)]),
np.log10(lengths[key][(widths[key] > 0.0)]), curvature[key][(widths[key] > 0.0)]])
else:
data = np.hstack([data, np.asarray([np.log10(widths[key][(widths[key] > 0.0)]), np.log10(amplitude[key][(widths[key] > 0.0)]),
np.log10(lengths[key][(widths[key] > 0.0)]), curvature[key][(widths[key] > 0.0)]])])
truths = np.array([-1.26, np.NaN, np.NaN, np.NaN])
# Plot it.
figure = triangle.corner(data.T, labels=["log$_{10}$(W/ pc)",
"log$_{10}$($I$/ MJy/sr)",
"log$_{10}$(L/ pc)", r"$\delta$$\theta$", "$\theta$"],
quantiles=[0.15, 0.50, 0.85, 0.995], bins=7,
show_titles=False, title_args={"fontsize": 18},
truths=truths, truth_color='r')
# figure.savefig('hgbs_scatter_hists.pdf', format='pdf', dpi=1000)
p.show()
if ks_tests:
from scipy.stats import ks_2samp
from pandas import DataFrame
import warnings
# Because we have non-continuous distributions, we use a
# bootstrap to create a value pvalue for the KS Test
try:
execfile("/Users/ekoch/Dropbox/code_development/misc/R_ksboot.py")
boot = True
except: # You need R and the Matching package for this to work
warnings.warn("Using scipy ks_2samp, not the bootstrap method.")
boot = False
boot = False
def ks_table(param_dict, boot=boot):
'''
'''
pvals = np.zeros((len(param_dict.keys()), len(param_dict.keys())))
stats = np.zeros((len(param_dict.keys()), len(param_dict.keys())))
for i, key in enumerate(np.sort(param_dict.keys())[::-1]):
print key
for j, key2 in enumerate(np.sort(param_dict.keys())[::-1]):
if i == j:
pvals[i, j] = 0
stats[i, j] = 0
else:
if boot:
values = ks_boot(param_dict[key], param_dict[key2])
else:
values = ks_2samp(param_dict[key], param_dict[key2])
pvals[i, j] = values[1]
stats[i, j] = values[0]
return stats, pvals
ordered_labels = []
for key in np.sort(widths.keys())[::-1]:
ordered_labels.append(labels[key])
# Widths
width_tables = ks_table(widths, boot=boot)
# width_kd_table = DataFrame(
# width_tables[0], index=ordered_labels, columns=ordered_labels)
# # width_kd_table.to_latex("width_ks_table.tex")
# width_kd_table.to_csv("width_ks_table.csv")
width_kd_table = DataFrame(
width_tables[1], index=ordered_labels, columns=ordered_labels)
# width_kd_table.to_latex("width_ks_table_pvals.tex")
width_kd_table.to_csv("width_ks_table_pvals.csv")
# Lengths
# length_tables = ks_table(lengths, boot=boot)
# length_kd_table = DataFrame(
# length_tables[0], index=ordered_labels, columns=ordered_labels)
# # length_kd_table.to_latex("length_ks_table.tex")
# length_kd_table.to_csv("length_ks_table.csv")
# length_kd_table = DataFrame(
# length_tables[1], index=ordered_labels, columns=ordered_labels)
# # length_kd_table.to_latex("length_ks_table_pvals.tex")
# length_kd_table.to_csv("length_ks_table_pvals.csv")
# Orientations
# Convert to sin(2*phi) to deal with continuity issues
# for key in orientation.keys():
# orientation[key] = np.sin(2 * orientation[key])
# orientation_tables = ks_table(orientation, boot=boot)
# orientation_kd_table = DataFrame(
# orientation_tables[0], index=ordered_labels, columns=ordered_labels)
# # orientation_kd_table.to_latex("orientation_ks_table.tex")
# orientation_kd_table.to_csv("orientation_ks_table.csv")
# orientation_kd_table = DataFrame(
# orientation_tables[1], index=ordered_labels, columns=ordered_labels)
# # orientation_kd_table.to_latex("orientation_ks_table_pvals.tex")
# orientation_kd_table.to_csv("orientation_ks_table_pvals.csv")
# Curvature
curvature_tables = ks_table(curvature, boot=boot)
# curvature_kd_table = DataFrame(
# curvature_tables[0], index=ordered_labels, columns=ordered_labels)
# # curvature_kd_table.to_latex("curvature_ks_table.tex")
# curvature_kd_table.to_csv("curvature_ks_table.csv")
curvature_kd_table = DataFrame(
curvature_tables[1], index=ordered_labels, columns=ordered_labels)
# curvature_kd_table.to_latex("curvature_ks_table_pvals.tex")
curvature_kd_table.to_csv("curvature_ks_table_pvals.csv")
# Amplitudes
# amplitude_tables = ks_table(amplitude, boot=boot)
# amplitude_kd_table = DataFrame(
# amplitude_tables[0], index=ordered_labels, columns=ordered_labels)
# # amplitude_kd_table.to_latex("amplitude_ks_table.tex")
# amplitude_kd_table.to_csv("amplitude_ks_table.csv")
# amplitude_kd_table = DataFrame(
# amplitude_tables[1], index=ordered_labels, columns=ordered_labels)
# # amplitude_kd_table.to_latex("amplitude_ks_table_pvals.tex")
# amplitude_kd_table.to_csv("amplitude_ks_table_pvals.csv")
if covering_frac:
from pandas import DataFrame
from astropy.io.fits import getdata
cf = dict.fromkeys(widths.keys())
for i, name in enumerate(np.sort(widths.keys())):
# Load the image in
img = getdata(name + "/" + name + "_regrid_convolved.fits")
model = getdata(name + "/" + name + "_filament_model.fits")
cf[name] = np.nansum(model) / np.nansum(img)
df = DataFrame(cf.values(), index=cf.keys(), columns=["Covering Fraction"])
df = df.sort()
print(df)
df.to_csv("covering_fracs.csv")
if bran_len:
new_branches = {}
for key in branches.keys():
per_branch = []
for lis in branches[key]:
# Split out parts
str_list = lis[1:-1].split(',')
float_list = []
for string in str_list:
float_list.append(float(string))
per_branch.append(float_list)
new_branches[key] = per_branch
for i, key in enumerate(new_branches.keys()):
all_branches = list(itertools.chain(*new_branches[key]))
num_bin = np.sqrt(len(all_branches))
p.subplot(2, 7, i+1)
p.title(labels[key])
p.hist(all_branches, bins=num_bin)
print labels[key], np.percentile(all_branches, [15, 50, 85])
p.show()
if width_stats:
from astropy.table import Table
csv = []
for folder in folders:
test = [f for f in os.listdir(folder) if f[-4:] == 'fits' and 'table' in f]
try:
csv.append(test[0])
except:
print "Not found for " + folder
fail_frac = np.empty((len(csv), ))
unres_frac = np.empty((len(csv), ))
nonparam_frac = np.empty((len(csv), ))
nonparam_success = np.empty((len(csv), ))
num_fils = np.empty((len(csv), ))
for i, (fil, fold) in enumerate(zip(csv, folders)):
t = Table.read(fold+"/"+fil)
# Failed fits
fail_frac[i, ] = sum(np.isnan(t['FWHM'])) #/ float(t['FWHM'].shape[0])
# Unresolved widths
fwhm = t['FWHM']
fwhm = fwhm[np.isfinite(fwhm)]
unres_frac[i, ] = sum(fwhm > 0) #/ float(t['FWHM'].shape[0])
# Number that use non-param fits
nonparam_frac[i, ] = sum(t['Fit Type'] == 'n') #/ float(t['FWHM'].shape[0])
# Number of successful nonparam fits
nonparam_success[i, ] = sum(np.logical_and(t['Fit Type'] == 'n', ~np.isnan(t['FWHM'])))
# Number of filaments
num_fils[i, ] = t['FWHM'].shape[0]
df = Table(np.vstack([csv, num_fils, fail_frac, unres_frac,
nonparam_frac, nonparam_success]).T,
names=['Names', "Number", 'Fail', 'Resolved',
'Nonparam', 'Nonparam Success'])
print(df)
print sum(num_fils)
|
# Copyright (c) 2009 - 2015 Tropo, now part of Cisco
# Released under the MIT license. See the file LICENSE
# for the complete license
# --------------------------------------------
# python app that says a random number between 1 and 100
# --------------------------------------------
from random import *
# The arguments to 'randint' define the start and end of the range.
number = randint(1,100)
answer()
say("Hello. Your magic number today is %s. Goodbye." % number)
hangup()
|
import gmpy2
n = 2739699434633097765008468371124644741923408864896396205946954196101304653772173210372608955799251139999322976228678445908704975780068946332615022064030241384638601426716056067126300711933438732265846838735860353259574129074615298010047322960704972157930663061480726566962254887144927753449042590678730779046154516549667611603792754880414526688217305247008627664864637891883902537649625488225238118503996674292057904635593729208703096877231276911845233833770015093213639131244386867600956112884383105437861665666273910566732634878464610789895607273567372933766243229798663389032807187003756226177111720510187664096691560511459141773632683383938152396711991246874813205614169161561906148974478519987935950318569760474249427787310865749167740917232799538099494710964837536211535351200520324575676987080484141561336505103872809932354748531675934527453231255132361489570816639925234935907741385330442961877410196615649696508210921
e = 65537
c = 2082926013138674164997791605512226759362824531322433048281306983526001801581956788909408046338065370689701410862433705395338736589120086871506362760060657440410056869674907314204346790554619655855805666327905912762300412323371126871463045993946331927129882715778396764969311565407104426500284824495461252591576672989633930916837016411523983491364869137945678029616541477271287052575817523864089061675401543733151180624855361245733039022140321494471318934716652758163593956711915212195328671373739342124211743835858897895276513396783328942978903764790088495033176253777832808572717335076829539988337505582696026111326821783912902713222712310343791755341823415393931813610365987465739339849380173805882522026704474308541271732478035913770922189429089852921985416202844838873352090355685075965831663443962706473737852392107876993485163981653038588544562512597409585410384189546449890975409183661424334789750460016306977673969147
estimate = gmpy2.iroot(n,3)[0] - 10000 # rough guess for the smallest prime
prime = gmpy2.next_prime(estimate)
for _ in range(512):
prime = gmpy2.next_prime(prime)
if n%prime ==0:
print(prime)
p = 139926822890670655977195962770726941986198973494425759476822219188316377933161673759394901805855617939978281385708941597117531007973713846772205166659227214187622925135931456526921198848312215276630974951050306344412865900075089120689559331322162952820292429725303619113876104177529039691490258588465409397803
q = 139926822890670655977195962770726941986198973494425759476822219188316377933161673759394901805855617939978281385708941597117531007973713846772205166659227214187622925135931456526921198848312215276630974951050306344412865900075089120689559331322162952820292429725303619113876104177529039691490258588465409494847
r = n//(p*q)
phi = (p-1)*(q-1)*(r-1)
d = pow(e, -1, phi)
m = pow(c, d, n)
print(bytes.fromhex(hex(m)[2:]).decode())
|
import numpy as np
import matplotlib.pyplot as plt
from astropy.cosmology import FlatLambdaCDM
#import pysynphot as S
from abspecphot import abspecphot
cosmo = FlatLambdaCDM(H0=73, Om0=0.3)
# testing
#vega_wave,vega_flux = np.loadtxt('spectra/vega.dat',dtype=float,usecols=(0,1),unpack=True)
#vega_u=abspecphot(vega_wave,vega_flux, 'filters/U_UVOT.txt')
#print(vega_u)
# Here is the input spectrum and the corresponding distance
input_wave,input_flux = np.loadtxt('spectra/Gaia16apd_uv.dat', dtype=float,usecols=(0,1),unpack=True)
# distance in Megaparsecs, here calculated from redshift for Gaia16apd
distance_sn=cosmo.luminosity_distance(0.102).value
wave11fe, flux11fe=np.loadtxt('spectra/SN2011fe_uv.dat', dtype=float,usecols=(0,1),unpack=True)
distance_11fe=6.7
wave16ccj, flux16ccj=np.loadtxt('spectra/SN2016ccj_uv.dat', dtype=float,usecols=(0,1),unpack=True)
distance_16ccj=cosmo.luminosity_distance(0.041).value
wave06aj, flux06aj=np.loadtxt('/Users/pbrown/Desktop/SN/localtemplates/ANT-SN2006aj.20A.sed.restframe.dat_upeakspectrum.dat', dtype=float,usecols=(0,1),unpack=True)
distance_06aj=0.000010
# in megaparsecs
#set redshift array and initialize other arrays which will have the same length
redshifts=[0.1,0.2,0.5,1.0,1.5,2.0,2.5,3.0,3.5,4.0,4.5,5.0,5.0,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]
redshiftmags=[]
redshiftmags2=[]
redshift11femags=[]
redshift16ccjmags=[]
redshift11femags2=[]
redshift16ccjmags2=[]
redshift06ajmags=[]
redshift06ajmags2=[]
distances=[]
lightyears=[]
for counter in range(0,len(redshifts),1):
z=redshifts[counter]
# calculate distance in Megaparsecs
lumidist = cosmo.luminosity_distance(z).value
distances.append(lumidist)
# distances[counter]=lumidist
# lightyears[counter]=lumidist*3.26*10.0**6.0
lightyears.append(lumidist*3.26*10.0**6.0)
# print(lightyears[counter])
# correct for the effects of distance and flux dilution
redshiftedflux = np.multiply(distance_sn**2.0,input_flux)
redshiftedflux = np.divide(redshiftedflux, lumidist**2.0)
redshiftedflux = np.divide(redshiftedflux, 1.0+z)
redshifted11feflux = np.multiply(distance_11fe**2.0,flux11fe)
redshifted11feflux = np.divide(redshifted11feflux, lumidist**2.0)
redshifted11feflux = np.divide(redshifted11feflux, 1.0+z)
redshifted16ccjflux = np.multiply(distance_16ccj**2.0,flux16ccj)
redshifted16ccjflux = np.divide(redshifted16ccjflux, lumidist**2.0)
redshifted16ccjflux = np.divide(redshifted16ccjflux, 1.0+z)
redshifted06ajflux = np.multiply(distance_06aj**2.0,flux06aj)
redshifted06ajflux = np.divide(redshifted06ajflux, lumidist**2.0)
redshifted06ajflux = np.divide(redshifted06ajflux, 1.0+z)
filter2='filters/F444W_NRC_and_OTE_ModAB_mean.txt'
filter='filters/F200W_NRC_and_OTE_ModAB_mean.txt'
# print(z)
# abmag=[]
abmag=abspecphot(wave16ccj*(1.0+z),redshifted16ccjflux,filter )
ab16ccjmag=abmag
ab16ccjmag2=abspecphot(wave16ccj*(1.0+z),redshifted16ccjflux,filter2 )
# abmag=[]
abmag=abspecphot(wave11fe*(1.0+z),redshifted11feflux,filter )
ab11femag=abmag
ab11femag2=abspecphot(wave11fe*(1.0+z),redshifted11feflux,filter2 )
ab06ajmag=abspecphot(wave06aj*(1.0+z),redshifted06ajflux,filter )
ab06ajmag2=abspecphot(wave06aj*(1.0+z),redshifted06ajflux,filter2 )
# abmag=[]
abmag=abspecphot(input_wave*(1.0+z),redshiftedflux,filter )
abmag2=abspecphot(input_wave*(1.0+z),redshiftedflux,filter2 )
# print(mag_array[5])
redshiftmags.append(abmag)
redshiftmags2.append(abmag2)
redshift11femags.append(ab11femag)
redshift11femags2.append(ab11femag2)
redshift16ccjmags.append(ab16ccjmag)
redshift16ccjmags2.append(ab16ccjmag2)
redshift06ajmags.append(ab06ajmag)
redshift06ajmags2.append(ab06ajmag2)
# print(abmag)
goodmags,=(np.where(redshiftmags > 0))
color=np.subtract(redshiftmags,redshiftmags2)
color11fe=np.subtract(redshift11femags,redshift11femags2)
#color16ccj=np.subtract(redshift16ccjmags,redshift16ccjmags2)
#best fit line and plotting
#plt.ylabel(filter)
plt.ylabel('JWST F200 AB Mag')
#plt.ylabel('JWST F200-F444 AB Mag')
plt.xlabel('Redshift')
# plt.title('Gaia16apd Spectrum')
#plt.plot(redshifts[goodmags[0][:]],redshiftmags[goodmags[0][:]],'b*')
#plt.plot(redshifts,color, 'b*',color='black')
#plt.plot(redshifts,color11fe, 'b*',color='blue')
#plt.plot(redshifts,color16ccj, 'b*',color='purple')
plt.plot(redshifts,redshiftmags, 'b*',color='black')
plt.plot(redshifts,redshift11femags,'b*',color='blue')
plt.plot(redshifts,redshift06ajmags,'b*',color='green')
# 16ccj has [] in mag array
#plt.plot(redshifts,redshift16ccjmags,'b*',color='green')
#plt.plot([4.5, 5.5], [28, 28], color='k', linestyle='--', linewidth=2)
## invert y axis makes the brighter magnitude higher
plt.gca().invert_yaxis()
#plt.ylim([29,24])
plt.show()
|
import enum
class State(enum.Enum):
HEALTHY = "Healthy"
INFECTED = "Infected"
RISKY = "Risky"
EXPOSED = "Exposed"
|
# pylint:disable=missing-module-docstring,missing-class-docstring,missing-function-docstring
from .base import compare_template, SimpleTestCase
class DatePickerHtmlTest(SimpleTestCase):
maxDiff = None
def test_basic_short(self):
template = """
{% load carbondesign %}
{% DatePicker form.started_at_empty mode="basic" short=True label="Date Picker label" %}
"""
expected = r"""
<div class="bx--form-item">
<div class="bx--date-picker bx--date-picker--simple bx--date-picker--short">
<div class="bx--date-picker-container">
<label for="id_started_at_empty" class="bx--label">
Date Picker label
</label>
<input type="text" name="started_at_empty" class="bx--date-picker__input" data-date-picker-input="" pattern="\d{1,2}/\d{4,4}" placeholder="mm/yyyy" id="id_started_at_empty">
</div>
</div>
</div>
"""
rendered = compare_template(template, expected)
self.assertEqual(*rendered)
def test_basic_invalid(self):
template = """
{% load carbondesign %}
{% DatePicker form.started_at_missing mode="basic" label="Date Picker label" %}
"""
expected = r"""
<div class="bx--form-item">
<div class="bx--date-picker bx--date-picker--simple">
<div class="bx--date-picker-container">
<label for="id_started_at_missing" class="bx--label">
Date Picker label
</label>
<input type="text" name="started_at_missing" value="" class="bx--date-picker__input" data-date-picker-input="" pattern="\d{1,2}/\d{1,2}/\d{4,4}" placeholder="mm/dd/yyyy" data-invalid="" required id="id_started_at_missing">
<div class="bx--form-requirement">
<div class="bx--form-requirement__title">This field is required.</div>
</div>
</div>
</div>
</div>
"""
rendered = compare_template(template, expected)
self.assertEqual(*rendered)
def test_basic_short_light(self):
template = """
{% load carbondesign %}
{% DatePicker form.started_at_empty mode="basic" short=True label="Date Picker label" light=True %}
"""
expected = r"""
<div class="bx--form-item">
<div class="bx--date-picker bx--date-picker--simple bx--date-picker--light bx--date-picker--short">
<div class="bx--date-picker-container">
<label for="id_started_at_empty" class="bx--label">
Date Picker label
</label>
<input type="text" name="started_at_empty" class="bx--date-picker__input" data-date-picker-input="" pattern="\d{1,2}/\d{4,4}" placeholder="mm/yyyy" id="id_started_at_empty">
</div>
</div>
</div>
"""
rendered = compare_template(template, expected)
self.assertEqual(*rendered)
def test_basic_invalid_light(self):
template = """
{% load carbondesign %}
{% DatePicker form.started_at_missing mode="basic" label="Date Picker label" light=True %}
"""
expected = r"""
<div class="bx--form-item">
<div class="bx--date-picker bx--date-picker--simple bx--date-picker--light">
<div class="bx--date-picker-container">
<label for="id_started_at_missing" class="bx--label">
Date Picker label
</label>
<input type="text" name="started_at_missing" value="" class="bx--date-picker__input" data-date-picker-input="" pattern="\d{1,2}/\d{1,2}/\d{4,4}" placeholder="mm/dd/yyyy" data-invalid="" required id="id_started_at_missing">
<div class="bx--form-requirement">
<div class="bx--form-requirement__title">This field is required.</div>
</div>
</div>
</div>
</div>
"""
rendered = compare_template(template, expected)
self.assertEqual(*rendered)
def test_range(self):
template = """
{% load carbondesign %}
{% RangeDatePicker form.started_at form.stopped_at %}
"""
expected = r"""
<div class="bx--form-item">
<div data-date-picker data-date-picker-type="range"
class="bx--date-picker bx--date-picker--range">
<div class="bx--date-picker-container">
<label for="id_started_at" class="bx--label">
Started at
</label>
<div class="bx--date-picker-input__wrapper">
<input type="text" name="started_at" value="2022-02-03 01:02:03" class="bx--date-picker__input" pattern="\d{1,2}/\d{1,2}/\d{4,4}" placeholder="mm/dd/yyyy" data-date-picker-input-from="" required id="id_started_at">
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
data-date-picker-icon="true" class="bx--date-picker__icon"
width="16" height="16" viewBox="0 0 32 32" aria-hidden="true">
<path d="M26,4h-4V2h-2v2h-8V2h-2v2H6C4.9,4,4,4.9,4,6v20c0,1.1,0.9,2,2,2h20c1.1,0,2-0.9,2-2V6C28,4.9,27.1,4,26,4z M26,26H6V12h20 V26z M26,10H6V6h4v2h2V6h8v2h2V6h4V10z"></path>
</svg>
</div>
</div>
<div class="bx--date-picker-container">
<label for="id_stopped_at" class="bx--label">
Stopped at
</label>
<div class="bx--date-picker-input__wrapper">
<input type="text" name="stopped_at" value="2022-10-04 11:30:40" class="bx--date-picker__input" pattern="\d{1,2}/\d{1,2}/\d{4,4}" placeholder="mm/dd/yyyy" data-date-picker-input-to="" required id="id_stopped_at">
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
data-date-picker-icon="true" class="bx--date-picker__icon"
width="16" height="16" viewBox="0 0 32 32" aria-hidden="true">
<path d="M26,4h-4V2h-2v2h-8V2h-2v2H6C4.9,4,4,4.9,4,6v20c0,1.1,0.9,2,2,2h20c1.1,0,2-0.9,2-2V6C28,4.9,27.1,4,26,4z M26,26H6V12h20 V26z M26,10H6V6h4v2h2V6h8v2h2V6h4V10z"></path>
</svg>
</div>
</div>
</div>
</div>
"""
rendered = compare_template(template, expected)
self.assertEqual(*rendered)
def test_range_light(self):
template = """
{% load carbondesign %}
{% RangeDatePicker form.started_at form.stopped_at light=True %}
"""
expected = r"""
<div class="bx--form-item">
<div data-date-picker data-date-picker-type="range"
class="bx--date-picker bx--date-picker--range bx--date-picker--light">
<div class="bx--date-picker-container">
<label for="id_started_at" class="bx--label">
Started at
</label>
<div class="bx--date-picker-input__wrapper">
<input type="text" name="started_at" value="2022-02-03 01:02:03" class="bx--date-picker__input" pattern="\d{1,2}/\d{1,2}/\d{4,4}" placeholder="mm/dd/yyyy" data-date-picker-input-from="" required id="id_started_at">
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
data-date-picker-icon="true" class="bx--date-picker__icon"
width="16" height="16" viewBox="0 0 32 32" aria-hidden="true">
<path d="M26,4h-4V2h-2v2h-8V2h-2v2H6C4.9,4,4,4.9,4,6v20c0,1.1,0.9,2,2,2h20c1.1,0,2-0.9,2-2V6C28,4.9,27.1,4,26,4z M26,26H6V12h20 V26z M26,10H6V6h4v2h2V6h8v2h2V6h4V10z"></path>
</svg>
</div>
</div>
<div class="bx--date-picker-container">
<label for="id_stopped_at" class="bx--label">
Stopped at
</label>
<div class="bx--date-picker-input__wrapper">
<input type="text" name="stopped_at" value="2022-10-04 11:30:40" class="bx--date-picker__input" pattern="\d{1,2}/\d{1,2}/\d{4,4}" placeholder="mm/dd/yyyy" data-date-picker-input-to="" required id="id_stopped_at">
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
data-date-picker-icon="true" class="bx--date-picker__icon"
width="16" height="16" viewBox="0 0 32 32" aria-hidden="true">
<path d="M26,4h-4V2h-2v2h-8V2h-2v2H6C4.9,4,4,4.9,4,6v20c0,1.1,0.9,2,2,2h20c1.1,0,2-0.9,2-2V6C28,4.9,27.1,4,26,4z M26,26H6V12h20 V26z M26,10H6V6h4v2h2V6h8v2h2V6h4V10z"></path>
</svg>
</div>
</div>
</div>
</div>
"""
rendered = compare_template(template, expected)
self.assertEqual(*rendered)
def test_nolabel_light(self):
template = """
{% load carbondesign %}
{% DatePicker form.started_at_empty mode="nolabel" label="Date Picker label" light=True %}
"""
expected = r"""
<div class="bx--form-item">
<div data-date-picker data-date-picker-type="single"
class="bx--date-picker bx--date-picker--single bx--date-picker--nolabel bx--date-picker--light">
<div class="bx--date-picker-container">
<div class="bx--date-picker-input__wrapper">
<input type="text" name="started_at_empty" class="bx--date-picker__input" data-date-picker-input="" pattern="\d{1,2}/\d{1,2}/\d{4,4}" placeholder="mm/dd/yyyy" id="id_started_at_empty">
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
data-date-picker-icon="true" class="bx--date-picker__icon"
width="16" height="16" viewBox="0 0 32 32" aria-hidden="true">
<path d="M26,4h-4V2h-2v2h-8V2h-2v2H6C4.9,4,4,4.9,4,6v20c0,1.1,0.9,2,2,2h20c1.1,0,2-0.9,2-2V6C28,4.9,27.1,4,26,4z M26,26H6V12h20 V26z M26,10H6V6h4v2h2V6h8v2h2V6h4V10z"></path>
</svg>
</div>
</div>
</div>
</div>
"""
rendered = compare_template(template, expected)
self.assertEqual(*rendered)
def test_nolabel_invalid_light(self):
template = """
{% load carbondesign %}
{% DatePicker form.started_at_missing mode="nolabel" label="Date Picker label" light=True %}
"""
expected = r"""
<div class="bx--form-item">
<div data-date-picker data-date-picker-type="single"
class="bx--date-picker bx--date-picker--single bx--date-picker--nolabel bx--date-picker--light">
<div class="bx--date-picker-container">
<div class="bx--date-picker-input__wrapper">
<input type="text" name="started_at_missing" value="" class="bx--date-picker__input" data-date-picker-input="" pattern="\d{1,2}/\d{1,2}/\d{4,4}" placeholder="mm/dd/yyyy" data-invalid="" required id="id_started_at_missing">
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
data-date-picker-icon="true" class="bx--date-picker__icon"
width="16" height="16" viewBox="0 0 32 32" aria-hidden="true">
<path d="M26,4h-4V2h-2v2h-8V2h-2v2H6C4.9,4,4,4.9,4,6v20c0,1.1,0.9,2,2,2h20c1.1,0,2-0.9,2-2V6C28,4.9,27.1,4,26,4z M26,26H6V12h20 V26z M26,10H6V6h4v2h2V6h8v2h2V6h4V10z"></path>
</svg>
</div>
<div class="bx--form-requirement">
<div class="bx--form-requirement__title">This field is required.</div>
</div>
</div>
</div>
</div>
"""
rendered = compare_template(template, expected)
self.assertEqual(*rendered)
def test_single_light(self):
template = """
{% load carbondesign %}
{% DatePicker form.started_at_empty label="Date Picker label" light=True %}
"""
expected = r"""
<div class="bx--form-item">
<div data-date-picker data-date-picker-type="single"
class="bx--date-picker bx--date-picker--single bx--date-picker--light">
<div class="bx--date-picker-container">
<label for="id_started_at_empty" class="bx--label">
Date Picker label
</label>
<div class="bx--date-picker-input__wrapper">
<input type="text" name="started_at_empty" class="bx--date-picker__input" data-date-picker-input="" pattern="\d{1,2}/\d{1,2}/\d{4,4}" placeholder="mm/dd/yyyy" id="id_started_at_empty">
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
data-date-picker-icon="true" class="bx--date-picker__icon"
width="16" height="16" viewBox="0 0 32 32" aria-hidden="true">
<path d="M26,4h-4V2h-2v2h-8V2h-2v2H6C4.9,4,4,4.9,4,6v20c0,1.1,0.9,2,2,2h20c1.1,0,2-0.9,2-2V6C28,4.9,27.1,4,26,4z M26,26H6V12h20 V26z M26,10H6V6h4v2h2V6h8v2h2V6h4V10z"></path>
</svg>
</div>
</div>
</div>
</div>
"""
rendered = compare_template(template, expected)
self.assertEqual(*rendered)
def test_single_invalid_light(self):
template = """
{% load carbondesign %}
{% DatePicker form.started_at_missing label="Date Picker label" light=True %}
"""
expected = r"""
<div class="bx--form-item">
<div data-date-picker data-date-picker-type="single"
class="bx--date-picker bx--date-picker--single bx--date-picker--light">
<div class="bx--date-picker-container">
<label for="id_started_at_missing" class="bx--label">
Date Picker label
</label>
<div class="bx--date-picker-input__wrapper">
<input type="text" name="started_at_missing" value="" class="bx--date-picker__input" data-date-picker-input="" pattern="\d{1,2}/\d{1,2}/\d{4,4}" placeholder="mm/dd/yyyy" data-invalid="" required id="id_started_at_missing">
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
data-date-picker-icon="true" class="bx--date-picker__icon"
width="16" height="16" viewBox="0 0 32 32" aria-hidden="true">
<path d="M26,4h-4V2h-2v2h-8V2h-2v2H6C4.9,4,4,4.9,4,6v20c0,1.1,0.9,2,2,2h20c1.1,0,2-0.9,2-2V6C28,4.9,27.1,4,26,4z M26,26H6V12h20 V26z M26,10H6V6h4v2h2V6h8v2h2V6h4V10z"></path>
</svg>
</div>
<div class="bx--form-requirement">
<div class="bx--form-requirement__title">This field is required.</div>
</div>
</div>
</div>
</div>
"""
rendered = compare_template(template, expected)
self.assertEqual(*rendered)
def test_nolabel(self):
template = """
{% load carbondesign %}
{% DatePicker form.started_at_empty mode="nolabel" label="Date Picker label" %}
"""
expected = r"""
<div class="bx--form-item">
<div data-date-picker data-date-picker-type="single"
class="bx--date-picker bx--date-picker--single bx--date-picker--nolabel">
<div class="bx--date-picker-container">
<div class="bx--date-picker-input__wrapper">
<input type="text" name="started_at_empty" class="bx--date-picker__input" data-date-picker-input="" pattern="\d{1,2}/\d{1,2}/\d{4,4}" placeholder="mm/dd/yyyy" id="id_started_at_empty">
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
data-date-picker-icon="true" class="bx--date-picker__icon"
width="16" height="16" viewBox="0 0 32 32" aria-hidden="true">
<path d="M26,4h-4V2h-2v2h-8V2h-2v2H6C4.9,4,4,4.9,4,6v20c0,1.1,0.9,2,2,2h20c1.1,0,2-0.9,2-2V6C28,4.9,27.1,4,26,4z M26,26H6V12h20 V26z M26,10H6V6h4v2h2V6h8v2h2V6h4V10z"></path>
</svg>
</div>
</div>
</div>
</div>
"""
rendered = compare_template(template, expected)
self.assertEqual(*rendered)
def test_nolabel_invalid(self):
template = """
{% load carbondesign %}
{% DatePicker form.started_at_missing mode="nolabel" label="Date Picker label" %}
"""
expected = r"""
<div class="bx--form-item">
<div data-date-picker data-date-picker-type="single"
class="bx--date-picker bx--date-picker--single bx--date-picker--nolabel">
<div class="bx--date-picker-container">
<div class="bx--date-picker-input__wrapper">
<input type="text" name="started_at_missing" value="" class="bx--date-picker__input" data-date-picker-input="" pattern="\d{1,2}/\d{1,2}/\d{4,4}" placeholder="mm/dd/yyyy" data-invalid="" required id="id_started_at_missing">
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
data-date-picker-icon="true" class="bx--date-picker__icon"
width="16" height="16" viewBox="0 0 32 32" aria-hidden="true">
<path d="M26,4h-4V2h-2v2h-8V2h-2v2H6C4.9,4,4,4.9,4,6v20c0,1.1,0.9,2,2,2h20c1.1,0,2-0.9,2-2V6C28,4.9,27.1,4,26,4z M26,26H6V12h20 V26z M26,10H6V6h4v2h2V6h8v2h2V6h4V10z"></path>
</svg>
</div>
<div class="bx--form-requirement">
<div class="bx--form-requirement__title">This field is required.</div>
</div>
</div>
</div>
</div>
"""
rendered = compare_template(template, expected)
self.assertEqual(*rendered)
|
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.lib.packet.ether_types import ETH_TYPE_IP
from ryu.ofproto import ofproto_v1_3
from ryu.lib.packet import packet, ether_types
from ryu.lib.packet import ethernet
from ryu.lib.packet import tcp, arp, ipv4, icmp
from ryu.topology.api import get_all_host
class LoadBalancer(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
VIRTUAL_IP = '10.0.1.100'
VIRTUAL_MAC = '00:00:00:00:01:00'
SERVER_NUMBER = 2
# Enable consistent hashing on source port
HASH_ON_PORT = 1 # True = 1, False = 0
# CONFIG_DISPATCHER, Handle Features Reply
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
# Send all packet to the controller
match = parser.OFPMatch()
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER, ofproto.OFPCML_NO_BUFFER)]
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, actions)]
mod = parser.OFPFlowMod(
datapath=datapath,
priority=1,
match=match,
instructions=inst
)
datapath.send_msg(mod)
# MAIN_DISPATCHER, handle packet-In
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
in_port = msg.match['in_port']
# Obtain packet headers
pkt = packet.Packet(msg.data)
pkt_eth = pkt.get_protocol(ethernet.ethernet)
pkt_ipv4 = pkt.get_protocol(ipv4.ipv4)
pkt_tcp = pkt.get_protocol(tcp.tcp)
pkt_icmp = pkt.get_protocol(icmp.icmp)
macsrc = pkt_eth.src
# Handle ARP packets
if pkt_eth.ethertype == ether_types.ETH_TYPE_ARP:
pkt_arp = pkt.get_protocol(arp.arp)
if pkt_arp.opcode == arp.ARP_REQUEST:
if pkt_arp.dst_ip == self.VIRTUAL_IP:
mac_dst_arp = self.VIRTUAL_MAC
else:
hosts = get_all_host(self)
for host in hosts:
if pkt_arp.dst_ip in host.ipv4:
mac_dst_arp = host.mac
break
else:
self.logger.info("[ARP] MAC address not found")
return
self.logger.info("[ARP] Request received")
self.logger.info("[ARP] MAC destination is: " + mac_dst_arp)
reply_packet = packet.Packet()
reply_packet.add_protocol(
ethernet.ethernet(
dst=pkt_arp.src_mac,
src=mac_dst_arp,
ethertype=ether_types.ETH_TYPE_ARP
)
)
reply_packet.add_protocol(
arp.arp(
opcode=arp.ARP_REPLY,
src_mac=mac_dst_arp,
src_ip=pkt_arp.dst_ip,
dst_mac=pkt_arp.src_mac,
dst_ip=pkt_arp.src_ip
)
)
reply_packet.serialize()
actions = [parser.OFPActionOutput(in_port)]
packet_out = parser.OFPPacketOut(
datapath=datapath,
buffer_id=ofproto.OFP_NO_BUFFER,
in_port=ofproto.OFPP_CONTROLLER,
data=reply_packet.data,
actions=actions
)
datapath.send_msg(packet_out)
self.logger.info("[ARP] Reply sent!")
return
# Handle IPv4 packets
if pkt_ipv4 is not None:
# Handle TCP packets
if pkt_tcp is not None:
if self.HASH_ON_PORT == 1: # Consistent hashing on IPv4 source and TCP source port
server = hash((pkt_ipv4.src, pkt_tcp.src_port)) % self.SERVER_NUMBER
else: # Deterministic routing via consistent hashing only on IPv4 source
server = hash((pkt_ipv4.src)) % self.SERVER_NUMBER
server = server + 1
ipdst = "10.0.1." + str(server)
macdst = "00:00:00:00:01:0" + str(server)
out_port = server # IMPORTANT: Servers must be connected to port 1 and 2
# Inbound FlowMod
match = parser.OFPMatch(
eth_type=ETH_TYPE_IP,
ip_proto=pkt_ipv4.proto,
eth_src=macsrc,
tcp_src=pkt_tcp.src_port,
eth_dst=self.VIRTUAL_MAC
)
self.logger.info("[HASH] Chosen server is: SRV" + str(server))
actions = [parser.OFPActionSetField(eth_dst=macdst),
parser.OFPActionSetField(ipv4_dst=ipdst),
parser.OFPActionOutput(out_port)]
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, actions)]
ofmsg = parser.OFPFlowMod(
datapath=datapath,
hard_timeout=120,
priority=50,
match=match,
instructions=inst,
)
datapath.send_msg(ofmsg)
# Outbound FlowMod
match = parser.OFPMatch(
eth_type=ETH_TYPE_IP,
ip_proto=pkt_ipv4.proto,
eth_src=macdst,
tcp_dst=pkt_tcp.src_port,
eth_dst=macsrc
)
actions = [
parser.OFPActionSetField(eth_src=self.VIRTUAL_MAC),
parser.OFPActionSetField(ipv4_src=self.VIRTUAL_IP),
parser.OFPActionOutput(in_port)
]
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, actions)]
ofmsg = parser.OFPFlowMod(
datapath=datapath,
hard_timeout=120,
priority=50,
match=match,
instructions=inst,
)
datapath.send_msg(ofmsg)
# Change packet data
pkt_eth.dst = macdst
pkt_ipv4.dst = ipdst
pkt_tcp.csum = 0
pkt.serialize()
# Packet-Out
actions = [parser.OFPActionOutput(out_port)]
out = parser.OFPPacketOut(
datapath=datapath,
buffer_id=ofproto.OFP_NO_BUFFER,
in_port=in_port,
actions=actions,
data=msg.data
)
datapath.send_msg(out)
# Handle ICMP packets
elif pkt_icmp is not None:
if pkt_icmp.type == icmp.ICMP_ECHO_REQUEST:
if pkt_ipv4.dst == self.VIRTUAL_IP:
mac_icmp = self.VIRTUAL_MAC
self.logger.info("[ICMP] Request received")
reply_icmp = packet.Packet()
reply_icmp.add_protocol(
ethernet.ethernet(
ethertype=pkt_eth.ethertype,
dst=pkt_eth.src,
src=mac_icmp
)
)
reply_icmp.add_protocol(
ipv4.ipv4(
dst=pkt_ipv4.src,
src=pkt_ipv4.dst,
proto=pkt_ipv4.proto
)
)
reply_icmp.add_protocol(
icmp.icmp(
type_=icmp.ICMP_ECHO_REPLY,
code=icmp.ICMP_ECHO_REPLY_CODE,
csum=0,
data=pkt_icmp.data
)
)
reply_icmp.serialize()
actions = [parser.OFPActionOutput(in_port)]
packet_out = parser.OFPPacketOut(
datapath=datapath,
buffer_id=ofproto.OFP_NO_BUFFER,
in_port=ofproto.OFPP_CONTROLLER,
data=reply_icmp.data,
actions=actions
)
datapath.send_msg(packet_out)
self.logger.info("[ICMP] Reply sent!")
return
else:
return
else:
return
# Drop packet types not in the specifications
else:
return
|
from .abstract import make_design
def make_ascii(tiling):
return "\n".join(gen_ascii(tiling))
def gen_ascii(tiling):
faces, vlines, hlines, nodes = make_design(tiling)
for i in range(0, len(vlines)):
node_chars = ["+" if node else " " for node in nodes[i]]
h_chars = ["-" if h else " " for h in hlines[i]]
row = alternate(node_chars, h_chars)
yield "".join(row).rstrip()
face_chars = ["X" if face == -1 else " " for face in faces[i]]
v_chars = ["|" if v else " " for v in vlines[i]]
row = alternate(v_chars, face_chars)
yield "".join(row).rstrip()
i = len(vlines)
node_chars = ["+" if node else " " for node in nodes[i]]
h_chars = ["-" if h else " " for h in hlines[i]]
row = alternate(node_chars, h_chars)
yield "".join(row).rstrip()
def alternate(first, second):
assert len(second) in [len(first) - 1, len(first)]
n = len(first) + len(second)
for i in range(0, n):
if i % 2 == 0:
yield first[i // 2]
else:
yield second[i // 2]
|
from flask import Flask, request, redirect
from datetime import datetime
import os
from google.cloud import datastore
app = Flask(__name__)
dataclient = datastore.Client()
def addVisitor():
ent = dataclient.key('data','visitors')
total = dataclient.get(key=ent)
if total:
total['total'] += 1
dataclient.put(total)
else:
total = datastore.Entity(key=ent)
total['total'] = 0
dataclient.put(total)
@app.route('/')
def main_page():
ent = dataclient.key('data', 'posts')
posts = dataclient.get(key=ent)
article = ""
with open('articles.html','r') as page:
article = page.read()
html = ""
if posts:
for post in posts['posts']:
array = json.loads(post)
raw = article.replace("!content!", array[0])
raw = raw.replace("!time!", array[1])
raw = raw.replace("!title!", array[2])
html += raw
else:
return 'No Posts!'
@app.route('/version')
def vers():
return 'This is app version B'
@app.route('/instance')
def getid():
instanceid = os.getenv('GAE_INSTANCE')
return str(instanceid)
@app.route('/version-id')
def getversionid():
addVisitor()
versionid = os.getenv('GAE_VERSION')
return str(versionid)
@app.route('/visitors')
def getVisitor():
addVisitor()
ent = dataclient.key('data','visitors')
total = dataclient.get(key=ent)
if total:
return 'Total visitors: ' + str(total['total'])
else:
return 'Total Broke!'
@app.route('/editor')
def edit_page():
with open('editor.html','r') as page:
return page.read()
@app.route('/submit', methods = ['POST'])
def submit_post():
password = request_form['pass']
if password == "mySuperAwesomePassword":
content = request.form['content']
title = request.form['title']
time = str(datetime.utcnow())
post = json.dumps([content, title, time])
ent = dataclient.get(key=ent)
if posts:
posts['posts'] = [post] + posts['posts']
dataclient.put(posts)
else:
posts = datastore.Entity(key=ent)
posts['posts'] = [post]
dataclient.put(posts)
return redirect('/')
else:
return redirect('/')
if __name__ == '__main__':
app.run(host='127.0.0.1', port=8080, debug=True)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
#=============================================================================
#
# FileName: flask_util_js.py
# Desc: provide flask_util.js
# 在 app.config 中可以配置:
# FLASK_UTIL_JS_PATH: flask_util.js 的url路径
# FLASK_UTIL_JS_ENDPOINT: flask_util.js 的endpoint
#
# Author: dantezhu
# Email: zny2008@gmail.com
# HomePage: http://www.vimer.cn
#
# Created: 2012-07-09 17:23:51
# History:
# 0.0.1 | dantezhu | 2012-07-09 17:23:51 | initialization
# 0.1 | dantezhu | 2012-08-30 22:54:33 | 正式版本
# 0.2.0 | dantezhu | 2012-10-22 21:53:14 | 优化为实例的方式
# 0.2.3 | dantezhu | 2012-11-20 11:13:22 | 增加no cache
# 0.2.4 | dantezhu | 2012-11-30 10:58:13 | content-type
# 0.2.5 | dantezhu | 2012-12-04 11:41:15 | defaults不需要,缺少params报异常
#
#=============================================================================
'''
__version__ = (0, 2, 5)
from flask import Response
from flask import render_template_string, json
FLASK_UTIL_JS_PATH = '/flask_util.js'
FLASK_UTIL_JS_TPL_STRING = '''
{% autoescape false %}
var flask_util = function() {
var url_map = {{ json_url_map }};
function url_for(endpoint, params) {
if (!params) {
params = {};
}
if (!url_map[endpoint]) {
return '';
}
var rule = url_map[endpoint]['rule'];
var used_params = {};
var rex = /\<\s*(\w+:)*(\w+)\s*\>/ig;
var path = rule.replace(rex, function(_i, _0, _1) {
if (params[_1]) {
used_params[_1] = params[_1];
return encodeURIComponent(params[_1]);
} else {
throw(_1 + ' does not exist in params');
}
});
var query_string = '';
for(var k in params) {
if (used_params[k]) {
continue;
}
var v = params[k];
if(query_string.length > 0) {
query_string += '&';
}
query_string += encodeURIComponent(k)+'='+encodeURIComponent(v);
}
var url = path;
if (query_string.length > 0) {
url += '?'+query_string;
}
return url;
}
return {
url_for: url_for
}
}();
{% endautoescape %}
'''
class FlaskUtilJs(object):
"""FlaskUtilJs"""
def __init__(self, app=None):
"""init with app
:app: Flask instance
"""
self._app = None
if app:
self.init_app(app)
def init_app(self, app):
"""
安装到app上
"""
if self._app is not None:
raise Exception('Flask-Admin is already associated with an application.')
self._app = app
path = app.config.get('FLASK_UTIL_JS_PATH', FLASK_UTIL_JS_PATH)
endpoint = app.config.get('FLASK_UTIL_JS_ENDPOINT', None)
@app.route(path, endpoint=endpoint)
def flask_util_js():
org_url_map = app.url_map._rules_by_endpoint
#把重的逻辑还是放到python代码里
url_map = dict()
for k,v in org_url_map.items():
url_map[k] = dict(
rule=app.config["WEB_ROOT"] + (v[0].rule)[1:],
)
json_url_map = json.dumps(url_map, indent=4, ensure_ascii=False)
rv = render_template_string(
FLASK_UTIL_JS_TPL_STRING,
json_url_map=json_url_map
)
return Response(
rv,
content_type='text/javascript; charset=UTF-8',
headers={
'Cache-Control':'no-cache',
}
)
# 最后把数据写到实例里
self._path = path
self._endpoint = endpoint or flask_util_js.__name__
@property
def path(self):
return self._path
@property
def endpoint(self):
return self._endpoint
|
# -*- coding: utf-8 -*-
#
# This file is part of the RefTelState project
#
#
#
""" RefTelState
Ref (Reference Element) device of Type TelState
"""
# PyTango imports
import PyTango
from PyTango import DebugIt
from PyTango.server import run
from PyTango.server import Device, DeviceMeta
from PyTango.server import attribute, command
from PyTango.server import device_property
from PyTango import AttrQuality, DispLevel, DevState
from PyTango import AttrWriteType, PipeWriteType
from SKATelState import SKATelState
# Additional import
# PROTECTED REGION ID(RefTelState.additionnal_import) ENABLED START #
# PROTECTED REGION END # // RefTelState.additionnal_import
__all__ = ["RefTelState", "main"]
class RefTelState(SKATelState):
"""
Ref (Reference Element) device of Type TelState
"""
__metaclass__ = DeviceMeta
# PROTECTED REGION ID(RefTelState.class_variable) ENABLED START #
# PROTECTED REGION END # // RefTelState.class_variable
# -----------------
# Device Properties
# -----------------
# ----------
# Attributes
# ----------
# ---------------
# General methods
# ---------------
def init_device(self):
SKATelState.init_device(self)
# PROTECTED REGION ID(RefTelState.init_device) ENABLED START #
# PROTECTED REGION END # // RefTelState.init_device
def always_executed_hook(self):
# PROTECTED REGION ID(RefTelState.always_executed_hook) ENABLED START #
pass
# PROTECTED REGION END # // RefTelState.always_executed_hook
def delete_device(self):
# PROTECTED REGION ID(RefTelState.delete_device) ENABLED START #
pass
# PROTECTED REGION END # // RefTelState.delete_device
# ------------------
# Attributes methods
# ------------------
# --------
# Commands
# --------
# ----------
# Run server
# ----------
def main(args=None, **kwargs):
# PROTECTED REGION ID(RefTelState.main) ENABLED START #
return run((RefTelState,), args=args, **kwargs)
# PROTECTED REGION END # // RefTelState.main
if __name__ == '__main__':
main()
|
"""Batch Rename Program"""
__all__ = ["BatchRenamer"]
import re
import sys
from argparse import ArgumentError, Namespace
from copy import deepcopy
from shlex import split
try:
# pylint: disable=unused-import
import readline
except ModuleNotFoundError:
print("History not available")
from .filehistory import FileHistory
from .parser import generate_parser
CONFIRM = [True, "y", "yes"]
DENY = [False, "n", "no"]
BACK = ["b", "back", "q", "quit"]
class BatchRenamer:
"""Renaming thing"""
def __init__(self, *filenames, autofiles=None):
self.parser, help_list = generate_parser(self)
_help_dic = {}
for _help in help_list:
_help_dic.update(_help.cmds)
self._help_dic = _help_dic
self._help_text = "\n".join([h.help for h in help_list])
self._help_small = "\n".join([f" {h.usage}" for h in help_list])
self.files = [FileHistory(filename) for filename in filenames]
self.autofiles = autofiles or []
def __call__(self):
"""Go thru renaming things"""
if self.autofiles:
self.automate(*self.autofiles)
while True:
response = input("Action: ")
args = split(response)
args[0] = args[0].lower()
try:
resp_args = self.parser.parse_args(args)
except ArgumentError as e:
error_args = Namespace()
if e.message.startswith("unrecognized arguments"):
print("ERROR: Invalid argument\n")
setattr(error_args, "subparsers", [args[0]])
elif e.message.startswith("invalid choice"):
print("ERROR: Unknown command")
setattr(error_args, "small", True)
else:
print(e.message)
self.print_help(error_args)
else:
resp_args.func(resp_args)
@staticmethod
def _low_input(message):
"""Get user input and lower it"""
return input(message).lower()
def _print_file_changes(self, args=None):
if getattr(args, "automated", False):
return
for file_ in self.files:
file_.print_changes()
print("-" * 20)
def automate_manual(self, args):
"""Pass in manual automation filenames"""
filenames = args.filenames or split(input("Filepath(s): "))
self.automate(*filenames)
def automate(self, *autofiles):
"""Take file with list of commands and make those changes"""
for autofile in autofiles:
try:
with open(autofile, "r", encoding="utf-8") as fp:
lines = fp.readlines()
except FileNotFoundError:
print(
f"Unable to open {autofile}; moving to next file provided (if any)"
)
else:
for line in lines:
split_args = self.parser.convert_arg_line_to_args(line)
if split_args:
args = self.parser.parse_args(split_args)
setattr(args, "automated", True)
args.func(args)
def change_case(self, args):
"""Change the case of the filenames"""
styles = args.styles or split(input("Styles?: "))
for file_ in self.files:
file_.change_case(styles)
self._print_file_changes(args)
def append(self, args):
"""Append value to filenames either from a file or manually provided"""
setattr(args, "side", r"$")
setattr(args, "replace", args.append)
setattr(args, "value", "{pad}{repl}")
self._pend(args, "Append")
def prepend(self, args):
"""Prepend value to filenames either from a file or manually provided"""
setattr(args, "side", r"^")
setattr(args, "replace", args.prepend)
setattr(args, "value", "{repl}{pad}")
self._pend(args, "Prepend")
def _pend(self, args, msg="Pend"):
"""Add value to begining or end of filename from a file or manaully provided"""
if not args.filenames and not args.find and not args.replace:
do_files = self._low_input("Load from files? Yes or No?: ")
if do_files in CONFIRM:
self._pend_file(args)
elif args.filenames:
self._pend_file(args)
if args.find or args.replace or not args.filenames:
self._pend_manual(args, msg)
self._print_file_changes(args)
def _pend_file(self, og_args):
"""Add value to begining or end of filename from file"""
args = deepcopy(og_args)
for filename in args.filenames:
try:
with open(filename, "r", encoding="utf-8") as fp:
lines = fp.readlines()
except FileNotFoundError:
print(f"Unable to open {filename}; moving to next file provided")
continue
for line in lines:
temp = self.parser.convert_arg_line_to_args(line)
try:
find, repl = temp[:2]
except ValueError:
continue
setattr(args, "find", find)
setattr(args, "replace", repl)
self._pend_manual(args)
def _pend_manual(self, args, msg=None):
"""Add value to begining or end of filename"""
if not args.find:
find = input("Find: ")
setattr(args, "find", find)
if not args.replace:
repl = args.replace or input(f"{msg}: ")
setattr(args, "replace", repl)
for file_ in self.files:
if re.search(args.find, file_.rename.name):
repl = args.value.format(pad=args.padding, repl=args.replace)
file_.replace(args.side, repl)
else:
file_.noop()
def change_ext(self, args):
"""Change file extension for files"""
repl = args.ext or input("New Ext: ")
pattern = args.pattern or input("Match Pattern (Leave blank for no pattern): ")
for file_ in self.files:
file_.change_ext(repl, pattern)
self._print_file_changes(args)
def print_help(self, args=None):
"""Display help message"""
if args is None or getattr(args, "small", False):
print(self._help_small)
return
commands = [s for s in getattr(args, "commands", []) if s in self._help_dic]
if not commands:
print(self._help_text)
return
for sub in args.commands:
print(self._help_dic[sub].help)
def insert_string(self, args):
"""Insert value in specific position"""
val = args.value or input("Insert: ")
test_file = self.files[0].rename.name
while True:
try:
num = args.index or int(input("Index: "))
except ValueError:
print("Please enter a positive or negative integer.")
else:
test_len = len(test_file)
if num >= 0:
idx = num if num < test_len else test_len
find = r"^(.{" f"{idx}" r"})(.*)$"
elif num < 0:
idx = (-1 * num) if num > (-1 * test_len) else 0
find = r"^(.*?)(.{" f"{idx}" r"})$"
repl = r"\1" f"{val}" r"\2"
test = re.sub(find, repl, test_file)
print(f"Example: {test}")
good = args.confirm or self._low_input("Right index? ")
if good in CONFIRM:
break
if good in BACK:
return
if good in DENY:
args.index = None
print()
setattr(args, "find", find)
setattr(args, "replace", repl)
self.find_and_replace(args)
def list_file_changes(self, _):
"""List the current changes to the files"""
self._print_file_changes()
def history(self, args):
"""Print history of file changes"""
if args.peak:
self.files[0].print_history()
return
for file_ in self.files:
file_.print_history()
print("-" * 20)
def reset(self, args):
"""Reset filenames"""
really = args.confirm or self._low_input("Really reset? No undoing this action. ")
while True:
if really in CONFIRM:
for file_ in self.files:
file_.reset()
break
if really in DENY:
break
really = self._low_input("Yes or No? ")
def quit_app(self, args):
"""Exit program"""
really = args.confirm or self._low_input("Are you sure you want to quit? ")
while True:
if really in CONFIRM:
print("Thanks for using!")
sys.exit()
if really in DENY:
break
really = self._low_input("Yes or No? ")
def find_and_replace(self, args):
"""Find pattern and replace with new pattern"""
find = args.find or input("Find: ")
repl = args.replace or input("Repl: ")
for file_ in self.files:
file_.replace(find, repl)
self._print_file_changes(args)
def save(self, args):
"""Save name changes"""
really = args.confirm or self._low_input(
"Are you sure you want to save new names? "
)
while True:
if really in CONFIRM:
for file_ in self.files:
file_.save()
print("Files renamed.")
break
if really in DENY:
print("No files renamed.")
break
really = self._low_input("Yes or No? ")
def undo(self, args):
"""Undo last changes"""
undone_all = True
for _ in range(args.number):
for file_ in self.files:
undone_all = file_.undo()
if not undone_all:
break
if not undone_all:
break
if undone_all and not getattr(args, "automated", False):
self._print_file_changes()
print(
("Last " if undone_all else "All ")
+ "change"
+ (" has" if undone_all else "s have")
+ " been undone."
)
def save_and_quit(self, args):
"""save changes, exit the program"""
really = args.confirm or ("Are you sure you want to save and quit? ")
while True:
if really in CONFIRM:
args.confirm = True
self.save(args)
self.quit_app(args)
if really in DENY:
return
really = self._low_input("Yes or No? ")
|
from fastapi import APIRouter
# from .endpoints import
router = APIRouter(prefix="/v1")
# router.include_router(healthcheck.router)
|
"""
We have a collection of rocks, each rock has a positive integer weight.
Each turn, we choose the two heaviest rocks and smash them together.
Suppose the stones have weights x and y with x <= y. The result of this smash is:
If x == y, both stones are totally destroyed;
If x != y, the stone of weight x is totally destroyed,
and the stone of weight y has new weight y-x.
At the end, there is at most 1 stone left.
Return the weight of this stone (or 0 if there are no stones left.)
"""
class Solution:
def lastStoneWeight(self, stones: List[int]) -> int:
import heapq
# Since in Python, the heap is implemented as minimum heap,
# in order to have the maximum heap, we applied a trick here,
# i.e. we applied the negation on the original values.
neg_stones = [-stone for stone in stones]
heapq.heapify(neg_stones)
while len(neg_stones) > 1:
y = heapq.heappop(neg_stones)
x = heapq.heappop(neg_stones)
if x != y:
#new_weight = -((-y) - (-x))
new_weight = y - x
heapq.heappush(neg_stones, new_weight)
if neg_stones:
return - neg_stones[0]
else:
return 0
|
from dataclasses import dataclass
from typing import List
import os
import logging
from .config import get_config
@dataclass
class User:
"Class for storing information about the current user."
name: str
roles: List[str]
visitor: bool = False
_allowed_visitor_roles = set(('user', 'readdb', 'annotator', 'geodb', 'pgadmin', 'reporting', 'vis'))
_disallowed_visitor_roles = set(('writedb', 'dev', 'admin'))
def default_visitor_roles():
conf = get_config()
# get default roles for visitors
roles_in = conf.visitor_roles
if roles_in is None:
logging.getLogger('flask.error').info('Visitor handling is disabled.')
return None
roles = ['visitor']
for role in roles_in:
if role in _disallowed_visitor_roles:
logging.getLogger('flask.error').warning('Cannot assign role %s to visitors: Too many privileges. Skipping.', role)
elif role not in _allowed_visitor_roles:
logging.getLogger('flask.error').warning('Cannot assign role %s to visitors: Unknown role. Skipping.', role)
else:
roles.append(role)
logging.getLogger('flask.error').info('Visitors will have the following roles: %s', ', '.join(roles))
return roles
def visitor(roles):
if roles is None:
return None
return User(name='visitor', roles=roles, visitor=True)
|
from collections import namedtuple
Color = namedtuple("Color", "RED BLACK")
class Node:
def __init__(self):
self.color = None
self.height = None
self.lis = None
self.data = None
self.size = None
self.next = None
self.right = None
self.left = None
@staticmethod
def newNode(data):
n = Node()
n.data = data
n.lis = -1
n.height = 1
n.size = 1
n.color = Color.RED
return n
class BinaryTree:
def __init__(self):
pass
@staticmethod
def add_head(data, head):
temp_head = head
n = Node.newNode(data)
if head is None:
head = n
return head
prev = None
while head is not None:
prev = head
if head.data < data:
head = head.right
else:
head = head.left
if prev.data < data:
prev.right = n
else:
prev.left = n
return temp_head
|
"""
Build a simple mdlstm network with peepholes:
>>> n = buildSimpleMDLSTMNetwork(True)
>>> print(n)
simpleMDLstmNet
Modules:
[<BiasUnit 'bias'>, <LinearLayer 'i'>, <MDLSTMLayer 'MDlstm'>, <LinearLayer 'o'>]
Connections:
[<FullConnection 'f1': 'i' -> 'MDlstm'>, <FullConnection 'f2': 'bias' -> 'MDlstm'>, <FullConnection 'f3': 'MDlstm' -> 'o'>]
Recurrent Connections:
[<FullConnection 'r1': 'MDlstm' -> 'MDlstm'>, <IdentityConnection 'rstate': 'MDlstm' -> 'MDlstm'>]
Check its gradient:
>>> from pybrain.tests import gradientCheck
>>> gradientCheck(n)
Perfect gradient
True
Try writing it to an xml file, reread it and determine if it looks the same:
>>> from pybrain.tests import xmlInvariance
>>> xmlInvariance(n)
Same representation
Same function
Same class
"""
__author__ = 'Tom Schaul, tom@idsia.ch'
from pybrain.structure.networks.recurrent import RecurrentNetwork
from pybrain import LinearLayer, FullConnection, MDLSTMLayer, BiasUnit, IdentityConnection
from pybrain.tests import runModuleTestSuite
def buildSimpleMDLSTMNetwork(peepholes = False):
N = RecurrentNetwork('simpleMDLstmNet')
i = LinearLayer(1, name = 'i')
dim = 1
h = MDLSTMLayer(dim, peepholes = peepholes, name = 'MDlstm')
o = LinearLayer(1, name = 'o')
b = BiasUnit('bias')
N.addModule(b)
N.addOutputModule(o)
N.addInputModule(i)
N.addModule(h)
N.addConnection(FullConnection(i, h, outSliceTo = 4*dim, name = 'f1'))
N.addConnection(FullConnection(b, h, outSliceTo = 4*dim, name = 'f2'))
N.addRecurrentConnection(FullConnection(h, h, inSliceTo = dim, outSliceTo = 4*dim, name = 'r1'))
N.addRecurrentConnection(IdentityConnection(h, h, inSliceFrom = dim, outSliceFrom = 4*dim, name = 'rstate'))
N.addConnection(FullConnection(h, o, inSliceTo = dim, name = 'f3'))
N.sortModules()
return N
if __name__ == "__main__":
runModuleTestSuite(__import__('__main__'))
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 01 20:00:29 2020
@author: Ing. Daniel Villarreal
"""
from PyQt5.QtWidgets import QMainWindow
from PyQt5.uic import loadUi
import pandas as pd
from src_gui.generateDataframe import DataFrameModel
from Gui.View_dso import Ui_MainWindow
# Clase de la subventana
class Dso(QMainWindow, Ui_MainWindow):
def __init__(self, parent=None):
super(Dso,self).__init__(parent)
Ui_MainWindow.__init__(self)
self.setupUi(self)
# loadUi('Gui/View_dso.ui', self)
read = pd.ExcelFile('src_calc/onefile/DesignRestrictionResult.xlsx')
a = read.parse('CompressionDesign')
b = read.parse('FlexuralDesign_X')
c = read.parse('FlexuralDesign_Y')
d = read.parse('Ratio_DC')
model = DataFrameModel(a)
self.tableView_DComp.setModel(model)
model = DataFrameModel(b)
self.tableView_Dflexx.setModel(model)
model = DataFrameModel(c)
self.tableView_Dflexy.setModel(model)
model = DataFrameModel(d)
self.tableView_DSum.setModel(model)
class Dso_A(QMainWindow, Ui_MainWindow):
def __init__(self, parent=None):
super(Dso_A,self).__init__(parent)
Ui_MainWindow.__init__(self)
self.setupUi(self)
# loadUi('Gui/View_dso.ui', self)
read = pd.ExcelFile('src_calc/files/A_DesignRestrictionResult.xlsx')
a = read.parse('CompressionDesign')
b = read.parse('FlexuralDesign_X')
c = read.parse('FlexuralDesign_Y')
d = read.parse('Ratio_DC')
model = DataFrameModel(a)
self.tableView_DComp.setModel(model)
model = DataFrameModel(b)
self.tableView_Dflexx.setModel(model)
model = DataFrameModel(c)
self.tableView_Dflexy.setModel(model)
model = DataFrameModel(d)
self.tableView_DSum.setModel(model)
class Dso_B(QMainWindow, Ui_MainWindow):
def __init__(self, parent=None):
super(Dso_B,self).__init__(parent)
Ui_MainWindow.__init__(self)
self.setupUi(self)
# loadUi('Gui/View_dso.ui', self)
read = pd.ExcelFile('src_calc/files/B_DesignRestrictionResult.xlsx')
a = read.parse('CompressionDesign')
b = read.parse('FlexuralDesign_X')
c = read.parse('FlexuralDesign_Y')
d = read.parse('Ratio_DC')
model = DataFrameModel(a)
self.tableView_DComp.setModel(model)
model = DataFrameModel(b)
self.tableView_Dflexx.setModel(model)
model = DataFrameModel(c)
self.tableView_Dflexy.setModel(model)
model = DataFrameModel(d)
self.tableView_DSum.setModel(model)
|
from django.db import models
from django.utils import timezone
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.models import Max
from django.utils import timezone
# Create your models here.
class Board(models.Model):
#let's do the relational part after I've done the other fields
title = models.CharField(max_length=255)
Description = models.TextField(blank=True)
#where I got the image https://mixkit.co/blog/trello-backgrounds-awesome-free-illustrations-for-trello-boards/
ImageUrl = models.URLField(
default="https://mixkit.imgix.net/art/preview/mixkit-starry-night-sky-over-hills-and-water-85-original-large.png?q=80&auto=format%2Ccompress&h=700&q=80&dpr=1",
blank=True, null=False)
CreatedAt = models.DateTimeField(default=timezone.now)
#EndsAt = Board.return_date_time_in_one_year()
def __str__(self):
return self.title
# Need to figure out how to write helper methods
#@classmethod
#def return_date_time_in_one_year():
# now = timezone.now()
# return now + timedelta(years=1)
class Column(models.Model):
#add in relational fields
Name = models.CharField(max_length=255)
location = models.DecimalField(max_digits=1, decimal_places=0)
def __str__(self):
return self.Name
class Ticket(models.Model):
# add in relational fields
title = models.CharField(max_length=100)
description = models.TextField(blank=True)
TypeChoices = [('B','Bug'),('F','Feature'),('U','Urgent'),('S','Server Side'), ('C','Client Side')]
Role = models.CharField(max_length=1, choices=TypeChoices)
created_at = models.DateTimeField(default=timezone.now)
due_date = models.DateTimeField(blank=True, null=True)
# assigned_to = <-- also relational
order = models.DecimalField(max_digits=3,decimal_places=0)
def __str__(self):
return self.title
# We're gonna need to put in a comment class
# And a notification class
|
"""
ReflectiveLearning Application
Student Arbeit Universität Siegen
@author: Sukrut S. Sathaye
User Interface: Main Screen
"""
import os
import kivy
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.lang import Builder
from kivy.properties import ObjectProperty, StringProperty
import pickle
import pprint
from user_interface.global_variables import *
from user_interface.st_home_screens import StudentScreen, TeacherScreen
from source_code.ReflectiveLearningV7 import QuestionAnswerGenerator as qag
from source_code.ReflectiveLearningV7 import TestOptionGen as testoptn
from source_code.ReflectiveLearningV7 import NewTopicCreator as ntc
from source_code.ReflectiveLearningV7 import ExtractKeyWords as ekw
file_path = os.path.join(design_path, 'main_screen.kv')
Builder.load_file(file_path)
class MainScreen(Screen):
def __init__(self, **kwargs):
super(MainScreen, self).__init__(**kwargs)
login_id = ObjectProperty(None)
password = ObjectProperty(None)
status = ObjectProperty(None)
self.registered_users = {
# need to keep complete names in production
's' : '1',
't' : 'a',
'a' : 'q'
}
self.add_screens()
def add_screens(self):
student_screen = StudentScreen(name='student')
sm.add_widget(student_screen)
teacher_screen = TeacherScreen(name='teacher')
sm.add_widget(teacher_screen)
admin_screen = AdminScreen(name='admin')
sm.add_widget(admin_screen)
def user_login(self):
user = self.login_id.text
password = self.password.text
if user not in list(self.registered_users.keys()):
self.status.text = 'Unregistered User'
self.login_id.text = ''
self.password.text = ''
else:
if user == 's': # student
if password == self.registered_users[user]:
self.status.text = 'Logged in as Student'
sm.current = 'student'
else:
self.status.text = 'Invalid Password try again'
self.password.text = ''
elif user == 't': # teacher
if password == self.registered_users[user]:
self.status.text = 'Logged in as Teacher'
sm.current = 'teacher'
else:
self.status.text = 'Invalid Password try again'
self.password.text = ''
elif user == 'a':
if password == self.registered_users[user]:
sm.current = 'admin'
else:
self.status.text = 'Invalid Password try again'
self.password.text = ''
class AdminScreen(Screen):
def __init__(self, **kwargs):
super(AdminScreen, self).__init__(**kwargs)
topic = ObjectProperty(None)
status = ObjectProperty(None)
def add_new_topic(self):
# topic = self.new_topic.text
if not self.topic.text.isnumeric():
print("Existing Topics: ", topics)
if self.topic.text not in topics:
print("Adding new topic")
adder = ntc(self.topic.text)
self.status.text = 'Done'
else:
self.status.text = 'Topic already exists, add a different topic'
self.topic.text = ''
else:
self.status.text = 'Invalid topic, please re-enter'
self.topic.text = ''
def debug(self):
ds = DebugScreen(name='ds', topic=self.topic.text)
sm.add_widget(ds)
sm.current = 'ds'
class DebugScreen(Screen):
def __init__(self, topic, **kwargs):
super(DebugScreen, self).__init__(**kwargs)
self.topic = topic
def compare_keywords(self):
obj = ekw(self.topic)
comp_kw = obj.compare_keywords(per_parra=True)
pprint.pprint(comp_kw, indent=2,width= 100)
def back(self):
sm.current = 'admin'
|
import time
import json
import codecs
import sys
import multiprocessing as mp, os
import core
from argparse import ArgumentParser
from digsandpaper.elasticsearch_indexing.index_knowledge_graph import index_knowledge_graph_fields
# # from concurrent import futures
# from pathos.multiprocessing import ProcessingPool
# from pathos import multiprocessing as mpp
# import multiprocessing as mp
# import pathos
# # from pathos.helpers
import gzip
""" Process code begins here """
def output_write(output_path):
return codecs.open(output_path, 'w+')
def chunk_file(file_name, size=1024 * 1024):
""" Splitting data into chunks for parallel processing
:param file_name - name of the file to split
:param size - size of file to split
"""
file_end = os.path.getsize(file_name)
with open(file_name, 'r') as f:
chunk_end = f.tell()
while True:
chunk_start = chunk_end
f.seek(size, 1)
f.readline()
chunk_end = f.tell()
yield chunk_start, chunk_end - chunk_start
if chunk_end > file_end:
break
def process_wrapper(core, input, chunk_start, chunk_size, queue):
results = []
with open(input) as f:
f.seek(chunk_start)
lines = f.read(chunk_size).splitlines()
for i, line in enumerate(lines):
document = json.loads(line)
try:
document = core.process(document, create_knowledge_graph=True)
except Exception as e:
print "Failed - ", e
# queue.put(json.dumps(document))
# print "Processing chunk - ", str(chunk_start), " File - ", str(i)
def listener(queue, output):
f = open(output, 'wb')
while 1:
message = queue.get()
if message == 'kill':
print "Done writing to file......."
break
f.write(message + '\n')
f.flush()
f.close()
def run_parallel(input, output, core, processes=0):
processes = processes or mp.cpu_count()
processes += 2 # for writing
manager = mp.Manager()
queue = manager.Queue()
pool = mp.Pool(processes)
# put listener to work first
watcher = pool.apply_async(listener, (queue, output))
jobs = []
for chunk_start, chunk_size in chunk_file(input):
jobs.append(pool.apply_async(process_wrapper, (core, input, chunk_start, chunk_size, queue)))
for job in jobs:
job.get()
queue.put('kill')
pool.close()
def run_serial(input, output, core, prefix='', indexing=True):
output = codecs.open(output, 'w')
index = 1
for line in codecs.open(input):
print prefix, 'processing line number:', index
start_time_doc = time.time()
jl = json.loads(line)
jl.pop('knowledge_graph', None)
if 'content_extraction' in jl:
ce = jl['content_extraction']
if 'inferlink_extractions' in ce:
ce.pop('inferlink_extractions')
jl['content_extraction'] = ce
jl.pop('indexed', None)
result = core.process(jl, create_knowledge_graph=True)
if indexing:
result = index_knowledge_graph_fields(result)
if result:
output.write(json.dumps(result) + '\n')
time_taken_doc = time.time() - start_time_doc
# if time_taken_doc > 5:
# print prefix, "Took", str(time_taken_doc), " seconds"
else:
print 'Failed line number:', index
index += 1
output.close()
def process_one(x):
# output = "output-%d.gz" % pathos.helpers.mp.current_process().getPid()
output = c_options.outputPath + "/output-%d.jl" % mp.current_process().pid
with codecs.open(output, "a+") as out:
out.write('%s\n' % json.dumps(c.process(x)))
def run_parallel_2(input_path, output_path, core, processes=0):
lines = codecs.open(input_path, 'r').readlines()
inputs = list()
# pool = ProcessingPool(16)
pool = mpp.Pool(8)
for line in lines:
inputs.append(json.loads(line))
# pool = .ProcessPoolExecutor(max_workers=8)
# results = list(pool.map(process_one, inputs))
pool.map(process_one, inputs)
# output_f = codecs.open(output_path, 'w')
# for result in results:
# output_f.write(json.dumps(result))
# output_f.write('\n')
def run_parallel_3(input_path, output_path, config_path, processes):
if not os.path.exists(output_path) or not os.path.isdir(output_path):
raise Exception('temp path is invalid')
# if len(os.listdir(temp_path)) != 0:
# raise Exception('temp path is not empty')
if processes < 1:
raise Exception('invalid process number')
# split input file into chunks
print 'splitting input file...'
with codecs.open(input_path, 'r') as input:
input_chunk_file_handlers = [
codecs.open(os.path.join(output_path, 'input_chunk_{}.json'.format(i)), 'w') for i in xrange(processes)]
idx = 0
for line in input:
if line == '\n':
continue
input_chunk_file_handlers[idx].write(line)
idx = (idx + 1) % processes
for f in input_chunk_file_handlers:
f.close()
# create processes
print 'creating workers...'
print '-------------------'
process_handlers = []
for i in xrange(processes):
input_chunk_path = os.path.join(output_path, 'input_chunk_{}.json'.format(i))
output_chunk_path = os.path.join(output_path, 'output_chunk_{}.json'.format(i))
p = mp.Process(target=run_parallel_worker,
args=(i, input_chunk_path, output_chunk_path, config_path))
process_handlers.append(p)
# start processes
for p in process_handlers:
p.start()
# wait till finish
for p in process_handlers:
p.join()
print '-------------------'
def run_parallel_worker(worker_id, input_chunk_path, output_chunk_path, config_path):
print 'start worker #{}'.format(worker_id)
c = core.Core(json.load(codecs.open(config_path, 'r')))
run_serial(input_chunk_path, output_chunk_path, c, prefix='worker #{}:'.format(worker_id))
print 'worker #{} finished'.format(worker_id)
def usage():
return """\
Usage: python run_core.py [args]
-i, --input <input_doc> Input file
-o, --output <output_doc> Output file
-c, --config <config> Config file
Optional
-m, --enable-multiprocessing
-t, --thread <processes_count> Serial(default=0)
Run Parallel(>0)
"""
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("-i", "--input", action="store", type=str, dest="inputPath")
parser.add_argument("-o", "--output", action="store", type=str, dest="outputPath")
parser.add_argument("-c", "--config", action="store", type=str, dest="configPath")
parser.add_argument("-m", "--enable-multiprocessing", action="store_true", dest="enableMP")
parser.add_argument("-t", "--thread", action="store",
type=int, dest="threadCount", default=mp.cpu_count())
c_options, args = parser.parse_known_args()
if not (c_options.inputPath and c_options.outputPath and c_options.configPath):
print usage()
sys.exit()
try:
start_time = time.time()
if c_options.enableMP and c_options.threadCount > 1:
print "processing parallelly"
run_parallel_3(
input_path=c_options.inputPath,
output_path=c_options.outputPath,
config_path=c_options.configPath,
processes=c_options.threadCount)
else:
# print "processing serially"
c = core.Core(json.load(codecs.open(c_options.configPath, 'r')))
run_serial(c_options.inputPath, c_options.outputPath, c)
print('The script took {0} second !'.format(time.time() - start_time))
except Exception as e:
print e
|
import glob
import os
import numpy as np
import csv
import cv2
import argparse
parser = argparse.ArgumentParser(description="", add_help='How to use', prog='python creating_numpy.py <options>')
parser.add_argument("-if", "--inputfile", default=None,
help='Path to the CSV file [DEFAULT: "data/tarin.csv"]')
parser.add_argument("-o", "--outputdir", default="../data/tarin_np/",
help='Path to save the files to[DEFAULT: "data/tarin_np/"]')
parser.add_argument("-in", "--inputdir", default="../data/tarin/",
help='Path to actual images[DEFAULT: "data/tarin/"]')
parser.add_argument("-s", "--skip", default="10000",
help='number of records per file')
parser.add_argument("-sz", "--size", default="64",
help='size of the prodused Images ( default = 64 )')
parser.add_argument("-gm", "--generatemapping", action='store_true',
help='generates mappings of hashes to records')
args = parser.parse_args()
train_path = args.inputdir
output_path = args.outputdir
# get a list of all files
file_list = sorted(glob.glob( os.path.join(train_path, '*.jpg')))
X_train = []
y_train = []
if args.inputfile is not None:
csv_file = csv.reader(open(args.inputfile), delimiter=',')
csv_file_1 = dict((rows[0], rows[2])for rows in csv_file)
csv_file = csv_file_1
counter = 0
records_per_file = int(args.skip)
image_size = (int(args.size), int(args.size))
files = []
for i in range (0,len(file_list)):
if i % 100000 == 0 and i > 0:
print("current=", i)
try:
temp_x = cv2.imread(file_list[i], 1)
try:
if args.inputfile is not None:
y_train.append(csv_file[os.path.splitext(os.path.basename(file_list[i]))[0]])
X_train.append(cv2.resize(temp_x, image_size))
files.append(os.path.splitext(os.path.basename(file_list[i]))[0])
except:
print("cant find entry, skipping!")
if i % records_per_file == 0 and i > 0:
np.save(output_path + '/X_' + str(counter) + '.npy', np.array(X_train))
if args.inputfile is not None:
np.save(output_path + '/y_' + str(counter) + '.npy', np.array(y_train))
with open(output_path + '/hashes_X_' + str(counter) + '.txt', 'w') as f:
for pick in files:
f.write(pick + "\n")
counter += 1
files = []
X_train = []
y_train = []
except:
raise
print('error', i)
# save the last section
if len(X_train) > 0:
with open(output_path + '/hashes_X_' + str(counter) + '.txt', 'w') as f:
for pick in files:
f.write(pick + "\n")
np.save(output_path + '/X_' + str(counter) + '.npy', np.array(X_train))
if args.inputfile is not None:
np.save(output_path + '/y_' + str(counter) + '.npy', np.array(y_train))
|
from mlpug.pytorch.trainers.callbacks.basic import *
from mlpug.pytorch.trainers.callbacks.callback import *
from .checkpoint_manager import CheckpointManager
from mlpug.pytorch.trainers.callbacks.lr_scheduler_wrapper import LRSchedulerWrapper
from mlpug.pytorch.trainers.callbacks.metrics_logger import MetricsLoggingMode, TrainingMetricsLogger, TestMetricsLogger
from mlpug.pytorch.trainers.callbacks.tensorboard import Tensorboard, AutoTensorboard
from mlpug.pytorch.trainers.callbacks.distributed import DistributedSamplerManager
from mlpug.pytorch.trainers.callbacks.cuda_memory import EmptyCudaCache, LogCudaMemory
|
"""
Choropleth Map
==============
A choropleth map of unemployment rate per county in the US
"""
# category: maps
import altair as alt
from vega_datasets import data
counties = alt.topo_feature(data.us_10m.url, 'counties')
source = data.unemployment.url
alt.Chart(counties).mark_geoshape().encode(
color='rate:Q'
).transform_lookup(
lookup='id',
from_=alt.LookupData(source, 'id', ['rate'])
).project(
type='albersUsa'
).properties(
width=500,
height=300
)
|
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import numpy as np
import pandas as pd
import pytest
import mars.oscar as mo
from mars.serialization import AioDeserializer, AioSerializer
from mars.services import start_services, stop_services, NodeRole
from mars.services.storage import StorageAPI
from mars.storage import StorageLevel
@pytest.fixture
async def actor_pools():
async def start_pool():
start_method = os.environ.get('POOL_START_METHOD', 'forkserver') \
if sys.platform != 'win32' else None
pool = await mo.create_actor_pool('127.0.0.1', n_process=2,
subprocess_start_method=start_method,
labels=['main', 'sub', 'io'])
await pool.start()
return pool
worker_pool = await start_pool()
yield worker_pool
await worker_pool.stop()
@pytest.mark.asyncio
async def test_storage_service(actor_pools):
worker_pool = actor_pools
if sys.platform == 'darwin':
plasma_dir = '/tmp'
else:
plasma_dir = '/dev/shm'
plasma_setup_params = dict(
store_memory=10 * 1024 * 1024,
plasma_directory=plasma_dir,
check_dir_size=False)
config = {
"services": ["storage"],
"storage": {
"backends": ["plasma"],
"plasma": plasma_setup_params,
}
}
await start_services(
NodeRole.WORKER, config, address=worker_pool.external_address)
api = await StorageAPI.create('mock_session', worker_pool.external_address)
value1 = np.random.rand(10, 10)
await api.put('data1', value1)
get_value1 = await api.get('data1')
np.testing.assert_array_equal(value1, get_value1)
# test api in subpool
subpool_address = list(worker_pool._sub_processes.keys())[0]
api2 = await StorageAPI.create('mock_session', subpool_address)
assert api2._storage_handler_ref.address == subpool_address
get_value1 = await api2.get('data1')
np.testing.assert_array_equal(value1, get_value1)
sliced_value = await api2.get('data1', conditions=[slice(None, None), slice(0, 4)])
np.testing.assert_array_equal(value1[:, :4], sliced_value)
value2 = pd.DataFrame(value1)
await api2.put('data2', value2)
get_value2 = await api.get('data2')
pd.testing.assert_frame_equal(value2, get_value2)
# test writer and read
buffers = await AioSerializer(value2).run()
size = sum(getattr(buf, 'nbytes', len(buf)) for buf in buffers)
# test open_reader and open_writer
writer = await api.open_writer('write_key', size,
StorageLevel.MEMORY)
async with writer:
for buf in buffers:
await writer.write(buf)
reader = await api.open_reader('write_key')
async with reader:
read_value = await AioDeserializer(reader).run()
pd.testing.assert_frame_equal(value2, read_value)
await stop_services(NodeRole.WORKER, address=worker_pool.external_address,
config=config)
|
import sys
sys.stdout.write("qwert\nyuiop...")
|
from . import views
from django.urls import path
# We specify an app name to reference the URL we need in our HTML templates
app_name = 'bookmyslot'
# The app(bookmyslot) specific URL's
urlpatterns = [
path('mybookings/',views.BookingList.as_view(),name='list'),
path('mybookings/<int:pk>/',views.BookingDetail.as_view(),name='detail'),
path('update/<int:pk>/',views.BookingUpdate.as_view(),name='update'),
path('delete/<int:pk>/',views.BookingDelete.as_view(),name='delete'),
path('new/',views.BookingCreate.as_view(),name='create'),
path('',views.HomeView.as_view(),name='home'),
path('welcome/',views.WelcomeView.as_view(),name='welcome'),
path('thanks/',views.ThanksView.as_view(),name='thanks'),
path('search/',views.search,name='search_bookings')
]
|
import os
import sys
# Read arguments
if len(sys.argv) != 2:
raise ValueError('Please provide a filename input')
filename = sys.argv[1]
# Read file
file_data = open(os.getcwd() + '/' + filename, 'r')
# Parse file
commands = []
for line in file_data.readlines():
commands.append(line.split(' '))
# Get answer
horizontal_pos = 0
depth_pos = 0
for command in commands:
direction = command[0]
value = int(command[1])
if direction == 'forward':
horizontal_pos += value
elif direction == 'down':
depth_pos += value
elif direction == 'up':
depth_pos -= value
answer = horizontal_pos * depth_pos
print(answer)
|
# coding=utf-8
import tensorflow as tf
from tfninja.utils import loggerfactory
logger = loggerfactory.get_logger(__name__)
hello = tf.constant('hello ninjas!!')
session = tf.Session()
logger.info(session.run(hello))
|
from pycg3d import cg3d_vector
from pycg3d import utils
class CG3dCordsBase(object):
def __init__(self):
pass
@property
def origin(self):
raise NotImplementedError
@property
def ex(self):
raise NotImplementedError
@property
def ey(self):
raise NotImplementedError
@property
def ez(self):
raise NotImplementedError
class CG3dRectCords(CG3dCordsBase):
def __init__(
self,
origin,
ex=None,
ey=None,
ez=None,
px=None,
py=None,
pz=None,
pxy=None,
pyz=None,
pzx=None
):
"""
:param origin: origin point of the axis
:param ex: vector of X-axis (from origin)
:param px: one point on X-axis
:param ey: vector of Y-axis
:param py: one point on Y-axis
:param ez: vector of Z-axis
:param pz: one point on Z-axis
:param pxy: one point on XY plane
:param pyz: one point on YZ plane
:param pzx: one point on ZX plane
"""
self._origin = origin
self._ex = self.fix_cords(origin, ex, ey, ez, px, py, pz, pxy, pyz, pzx)
self._ey = self.fix_cords(origin, ey, ez, self._ex, py, pz, px, pyz, pzx, pxy)
self._ez = self.fix_cords(origin, ez, self._ex, self._ey, pz, px, py, pzx, pxy, pyz)
self._ex.nornalize()
self._ey.normalize()
self._ez.normalize()
def fix_cords(self, origin, e1, e2, e3, p1, p2, p3, p12, p23, p31):
"""
fix e1
:param origin:
:param e1: vector of axis-1 or None
:param e2: vector of axis-2 or None
:param e3: vector of axis-3 or None
:param p1: one point on axis-1 or None
:param p2: one point on axis-2 or None
:param p3: one point on axis-3 or None
:param p12: one point on plane 12 or None
:param p23: one point on plane 23 or None
:param p31: one point on plane 31 or None
:return:
"""
assert isinstance(origin, cg3d_vector.CG3dVector)
if e1:
assert isinstance(e1, cg3d_vector.CG3dVector)
return e1
if p1:
assert isinstance(p1, cg3d_vector.CG3dVector)
return p1-origin
# e1 is not defined
if e2:
if e3:
return utils.cross_product(e2, e3)
elif p3:
return utils.cross_product(e2, p3-origin)
elif p23:
return utils.cross_product(e2, p3-origin)
else:
raise ValueError
elif p2:
return self.fix_cords(origin, e1, p2-origin, e3, p1, p2, p3, p12, p23, p31)
else:
raise ValueError
@property
def origin(self):
return self._origin
@property
def ex(self):
return self._ex
@property
def ey(self):
return self._ey
@property
def ez(self):
return self._ez
|
from threading import Thread
import asyncio
import random
import time
import pytest
import _thread
from pybeehive.asyn.socket import SocketListener, SocketStreamer
import pybeehive
def test_no_zmq(async_hive):
async_hive._socket_listener_class = None
async_hive._socket_streamer_class = None
with pytest.raises(RuntimeError):
async_hive.socket_listener(('', 0))(lambda: None)
with pytest.raises(RuntimeError):
async_hive.socket_streamer(('', 0))(lambda: None)
async_hive._socket_listener_class = SocketListener
async_hive._socket_streamer_class = SocketStreamer
# If run_in_new_loop is not the first argument, things break
def test_messaging(run_in_new_loop, async_client_server):
client, server = async_client_server
async def _test():
msg = b'data'
generator = server.iter_messages().__aiter__()
for _ in range(5):
await client.send(msg)
received = await generator.__anext__()
assert received == msg, 'Incorrect message sent to server'
await client.shutdown()
await server.shutdown()
run_in_new_loop(_test)
def test_socket_streamer_listener_loop(async_hive):
address = '127.0.0.1', random.randint(7000, 10000)
events = []
@async_hive.socket_listener(address)
async def parse_event(event):
event = pybeehive.Event(event.data + 1, created_at=event.created_at)
events.append(event)
return event
async_hive.add(SocketStreamer(address))
async_hive.submit_event(pybeehive.Event(-1))
async_hive.run(threaded=True)
start = time.time()
while len(events) < 5 and time.time() - start < 2:
time.sleep(1e-4)
async_hive.close()
assert len(events) >= 5, "Hive did not process all events"
for i, e in enumerate(events):
assert i == e.data, "Event data was not parsed by listener"
def test_multiple_listeners_single_streamer(async_hive):
address = '127.0.0.1', random.randint(7000, 10000)
events = []
class Listener(SocketListener):
async def parse_event(self, event):
event = pybeehive.Event(event.data + 1, created_at=event.created_at)
events.append(event)
return event
async_hive.add(SocketStreamer(address))
for _ in range(3):
async_hive.add(Listener(address))
async_hive.submit_event(pybeehive.Event(-1))
async_hive.run(threaded=True)
start = time.time()
while len(events) < 12 and time.time() - start < 2:
time.sleep(1e-4)
async_hive.close()
assert len(events) >= 12, "Hive did not process all events"
for i, e in enumerate(events[:12]):
# First three
if i < 3:
assert e.data == 0, "Multiple listeners sent incorrect events"
# Last nine (each cycle is 3x)
else:
assert e.data == 1, "Streamer did not propagate events to listeners"
def test_message_closed_server(async_hive):
address = '127.0.0.1', random.randint(7000, 10000)
events = []
@async_hive.socket_listener(address)
async def parse_event(event):
events.append(event)
return event
async_hive.submit_event(pybeehive.Event(-1))
async_hive.run(threaded=True)
start = time.time()
while len(events) < 1 and time.time() - start < 2:
time.sleep(1e-4)
async_hive.kill()
assert len(events) >= 1, "Hive did not process all events"
# This test can pass, but due to the inconsistent handling of
# KeyboardInterrupt in zmq.asyncio.Socket.send it does not
# pass consistently and is thus skipped in the normal tests.
# If pyzmq is updated to fix this the skip can be removed.
@pytest.mark.skip
def test_interrupted_streamer_listener_loop(async_hive):
address = '127.0.0.1', random.randint(7000, 10000)
events = []
def interrupt():
time.sleep(1e-2)
_thread.interrupt_main()
async def parse_event(event):
await asyncio.sleep(1e-4)
events.append(event)
return event
streamer = SocketStreamer(address)
listener = SocketListener(address)
listener.parse_event = parse_event
async_hive.add(streamer)
async_hive.add(listener)
async_hive.submit_event(pybeehive.Event(-1))
Thread(target=interrupt).start()
async_hive.run()
assert len(events) > 1, "Listener did not receive any events from streamer"
assert not async_hive.alive, 'KeyboardInterrupt did not kill hive'
assert not streamer.server.alive, 'KeyboardInterrupt did not kill server'
assert not listener.client.alive, 'KeyboardInterrupt did not kill client'
|
import mxnet as mx
import nnvm
import random
import itertools
import numpy as np
from nnvm.compiler import graph_attr, graph_util
from nnvm import graph as _graph
import math
import tvm
from mxnet import nd
import sym_utils as sutils
import utils
import mrt as _mrt
def random_shape(shp, min_dim=1, max_dim=64):
for i in range(len(shp)):
if shp[i] is None:
shp[i] = random.randint(min_dim, max_dim)
while np.product(shp) > (1 << 20):
rand_idx = random.randint(0, len(shp)-1)
shp[rand_idx] = max(shp[rand_idx] // 2, 1)
return shp
def random_bool():
return random.randint(0, 1) == 0
def random_select(arr):
return arr[random.randint(0, len(arr)-1)]
def random_int():
int_max = (2 ** 31) - 1
return random.randint(-int_max, int_max)
def get_cvm_op(op_name):
if op_name == 'null':
return nnvm.sym.Variable
op = getattr(nnvm.sym, op_name)
if not op:
raise OpNotImplemented(
'Operator {} is not supported.'.format(op_name))
return op
def infer_shape(symbol):
nnvm_graph = _graph.create(symbol)
return graph_util.infer_shape(nnvm_graph)
def to_json(symbol):
graph = _graph.create(symbol)
return graph.json()
def adjust_shape(sym, ishp, oshp):
isize, osize = np.product(ishp), np.product(oshp)
if ishp != (isize,):
print ("\tflatten", sym.attr('name'), ishp, "into (", isize, ",)")
sym = get_cvm_op("reshape")(sym, shape=(isize,))
if isize < osize:
repeats = int(math.ceil(osize / isize))
print ("\trepeat", sym.attr('name'), "times=", repeats)
sym = get_cvm_op("repeat")(sym, repeats=repeats)
_, shape = infer_shape(sym)
print ("\tstrided_slice", sym.attr('name'), "with (0, ", osize, ")", shape)
sym = get_cvm_op("strided_slice")(sym, begin=(0,), end=(osize,))
if oshp != (osize,):
print ("\treshape", sym.attr('name'), "into ", oshp)
sym = get_cvm_op("reshape")(sym, shape=oshp)
return sym
class Attr():
bool_t = '__bool_type'
int_t = '__int_type'
list_10_t = '__list_10_type'
list_4_t = '__list_4_type'
list_2_t = '__list_2_type'
def __init__(self, in_len=1, out_len=1, param_candidate_rate=0, **kwargs):
assert out_len > 0
self.in_len = in_len
self.out_len = out_len
self.param_candidate_rate = param_candidate_rate
self.attrs = kwargs
def input_size(self):
return self.in_len
def output_size(self):
return self.out_len
def attr(self):
attrs = {}
for k, v in self.attrs.items():
if v == Attr.bool_t:
attrs[k] = random_bool()
elif v == Attr.int_t:
attrs[k] = random_int()
elif v == Attr.list_2_t:
attrs[k] = [random_int(), random_int()]
elif v == Attr.list_4_t:
attrs[k] = [random_int() for _ in range(4)]
elif v == Attr.list_10_t:
attrs[k] = [random_int() for _ in range(10)]
else:
attrs[k] = v
return attrs
NullAttr = Attr(0, 1)
class EntryId():
def __init__(self, node, entry=0):
assert entry < len(node)
self._node = node
self._entry = entry
def __call__(self, node_id=True):
if node_id:
return self._node.id()
return self._entry
def __repr__(self):
return "<%d, %d>" % (self._node.id(), self._entry)
class Node():
_eid = 0
def __init__(self, op_name="null", attr=NullAttr):
self._op_name = op_name
self._attr = attr
self._input_size = attr.input_size()
if self._input_size is None:
self._input_size = random.randint(1, 4)
elif isinstance(self._input_size, list):
length = len(self._input_size) - 1
self._input_size = self._input_size[random.randint(0, length)]
self._output_size = attr.output_size()
self._id = Node._eid
Node._eid += self._output_size
self._inputs = None
def attr(self, name=None):
if name is None:
return self._attr.attr()
return self._attr.__dict__[name]
def name(self):
return self._op_name
def set_inputs(self, inputs):
self._inputs = inputs
def get_children(self):
return self._inputs
def input_size(self):
return self._input_size
def id(self):
return self._id
def entry(self, entry=None):
if entry is None:
if self._op_name == 'get_valid_counts':
return EntryId(self, 1)
return EntryId(self, 0)
return EntryId(self, entry)
def __len__(self):
return self._output_size
def op(self):
return get_cvm_op(self._op_name)
def __repr__(self):
return "id=%3d op=%-20s inputs=%s" % (self._id, self._op_name, self._inputs)
class IndexGraph():
def __init__(self):
self.idx_graph = {}
self.nid = 0
self.last_node = None
def add_node(self, node):
print ("Index Graph add node nid", "%3d"%self.nid, "node", node)
self.idx_graph[self.nid] = node
self.last_node = node
self.nid = self.nid + 1
def random_entry(self, param_rate=0):
nid = random.randint(0, self.nid - 1)
if random.randint(0, 99) < param_rate:
self.add_node(Node())
nid = self.nid - 1
node = self.idx_graph[nid]
return node.entry()
def __iter__(self):
for i in sorted(self.idx_graph.keys()):
yield self.idx_graph[i]
def __getitem__(self, key):
assert key >= 0 and key < self.nid
return self.idx_graph[key]
CVM_OPS = {
# nn
'conv2d': Attr(in_len=[2, 3], param_candidate_rate=100,
channels=Attr.int_t, kernel_size=Attr.list_2_t,
strides=Attr.list_2_t, padding=Attr.list_2_t,
dilation=Attr.list_2_t, groups=Attr.int_t,
use_bias=Attr.bool_t),
'dense': Attr(in_len=[2, 3], param_candidate_rate=100,
units=Attr.int_t, use_bias=Attr.bool_t),
'relu': Attr(),
'upsampling':Attr(scale=Attr.int_t),
'max_pool2d': Attr(pool_size=Attr.list_2_t, strides=Attr.list_2_t,
padding=Attr.list_2_t),
# reduce
'max': Attr(axis=Attr.list_10_t, keepdims=Attr.bool_t, exclude=Attr.bool_t),
'sum': Attr(axis=Attr.list_10_t, keepdims=Attr.bool_t, exclude=Attr.bool_t),
# elemwise
'elemwise_add': Attr(in_len=2, param_candidate_rate=5),
'elemwise_sub': Attr(in_len=2, param_candidate_rate=5),
'abs': Attr(),
'log2': Attr(),
'negative': Attr(),
'clip': Attr(),
'cvm_clip': Attr(precision=Attr.int_t),
'cvm_left_shift': Attr(shift_bit=Attr.int_t, precision=Attr.int_t),
'cvm_right_shift': Attr(shift_bit=Attr.int_t, precision=Attr.int_t),
# broadcast
'broadcast_add': Attr(in_len=2, param_candidate_rate=10),
'broadcast_sub': Attr(in_len=2, param_candidate_rate=10),
'broadcast_mul': Attr(in_len=2, param_candidate_rate=70),
'broadcast_max': Attr(in_len=2, param_candidate_rate=50),
# vision
'get_valid_counts': Attr(out_len=2, score_threshold=Attr.int_t),
'non_max_suppression': Attr(in_len=2, param_candidate_rate=100,
iou_threshold=Attr.int_t, force_suppress=Attr.bool_t,
top_k=Attr.int_t, max_output_size=Attr.int_t),
# transform
'expand_dims': Attr(axis=Attr.int_t, num_newaxis=Attr.int_t),
'transpose': Attr(axes=Attr.list_10_t),
'reshape': Attr(shape=Attr.list_10_t),
'squeeze': Attr(axis=Attr.int_t),
'concatenate': Attr(in_len=None, param_candidate_rate=5,
axis=Attr.int_t),
'take': Attr(in_len=2, axis=Attr.int_t),
'strided_slice': Attr(begin=Attr.list_10_t, end=Attr.list_10_t, stride=Attr.list_10_t),
'repeat': Attr(repeats=Attr.int_t, axis=Attr.int_t),
'tile': Attr(reps=Attr.list_10_t),
'slice_like': Attr(in_len=2, param_candidate_rate=10, axis=Attr.list_10_t),
'cvm_lut': Attr(in_len=2),
'flatten': Attr(),
}
op_names = [
'conv2d', 'dense', 'expand_dims', 'transpose', 'reshape', 'squeeze',
'concatenate', 'take', 'strided_slice', 'repeat', 'tile',
'broadcast_add', 'broadcast_sub', 'broadcast_mul', 'broadcast_mul',
'elemwise_add', 'elemwise_sub',
]
name_count = {}
def uniq_name(name):
if name not in name_count:
name_count[name] = 0
uniq = "%s_%d" % (name, name_count[name])
name_count[name] += 1
return uniq
def sequence2symbol(idx_graph):
def constraint_attr(attr, name, a_min, a_max):
num = attr[name]
if isinstance(num, list):
for i, a in enumerate(num):
alpha = a_max[i] - a_min[i] + 1
attr[name][i] = a % alpha + a_min[i]
return
alpha = a_max - a_min + 1
attr[name] = num % alpha + a_min
graph, shapes = {}, {}
params = {}
inputs_ext = { 'data': {} }
for node in idx_graph:
input_eids = node.get_children()
if input_eids is not None:
childs = [graph[c()][c(False)] for c in input_eids]
else:
childs = None
op, op_name = node.op(), node.name()
nid, attr = node.id(), node.attr()
shpes = []
requantize = False
shift_bit = 0
if op_name == 'conv2d':
ishp = random_shape([None, None, None, None])
ishp[0] = random.randint(1, 16)
ishp[1] = random_select([3, 4, 16])
is_depthwise = random.randint(0, 100) > 80
attr['groups'] = ishp[1] if is_depthwise else 1
matrix_len = 1
if is_depthwise:
attr['channels'] = ishp[1]
else:
constraint_attr(attr, 'channels', a_min=1, a_max=256)
matrix_len = attr['channels']
h, w = ishp[2:]
dh, dw = random.randint(1, min(3, h)), random.randint(1, min(3, w))
kh, kw = random.randint(1, max(1, h // dh)), random.randint(1, max(1, w // dw))
while (kh * kw) > (32768 / matrix_len):
kh = kh // 2
kw = kw // 2
bit = math.ceil(math.log2(matrix_len * kh * kw))
attr['kernel_size'] = (kh, kw)
attr['dilation'] = (dh, dw)
constraint_attr(attr, 'strides', a_min=(1, 1), a_max=(5, 5))
constraint_attr(attr, 'padding', a_min=(1, 1), a_max=(7, 7))
attr['use_bias'] = False if len(childs) == 2 else True
if is_depthwise:
wshp = (ishp[1], 1, kh, kw)
else:
wshp = (attr['channels'], ishp[1], kh, kw)
shpes = [ishp, wshp]
if attr['use_bias']:
shpes.append((wshp[0],))
bit += 1
requantize = True
shift_bit = bit + 16 - 14
elif op_name == 'dense':
ishp = random_shape([None, None], 10, 64)
constraint_attr(attr, 'units', a_min=1, a_max=1000)
bit = math.ceil(math.log2(attr['units']))
attr['use_bias'] = False if len(childs) ==2 else True
wshp = (attr['units'], ishp[1])
shpes = [ishp, wshp]
if attr['use_bias']:
shpes.append((wshp[0],))
bit += 1
requantize = True
shift_bit = bit + 16 - 14
elif op_name == 'expand_dims':
ndim = random.randint(1, 3)
ishp = random_shape([None] * ndim)
constraint_attr(attr, 'axis', a_min=-ndim-1, a_max=ndim)
constraint_attr(attr, 'num_newaxis', a_min=1, a_max=6-ndim)
shpes = [ishp]
elif op_name == 'transpose':
ndim = random.randint(1, 6)
ishp = random_shape([None] * ndim, max_dim=32)
axes = list(range(0, ndim))
random.shuffle(axes)
attr['axes'] = axes
shpes = [ishp]
elif op_name == 'reshape':
ndim = random.randint(1, 4)
shape = random_shape([None] * ndim)
attr['shape'] = shape
size = np.product(shape)
shpes = [(size,)]
elif op_name == 'squeeze':
ndim = random.randint(2, 5)
ishp = random_shape([None] * ndim)
constraint_attr(attr, 'axis', a_min=-ndim, a_max=ndim-1)
ishp[attr['axis']] = 1
shpes = [ishp]
elif op_name == 'concatenate':
ndim = random.randint(1, 4)
ishp = random_shape([None] * ndim)
constraint_attr(attr, 'axis', a_min=-ndim, a_max=ndim-1)
axis = attr['axis'] if attr['axis'] >= 0 else attr['axis']+ndim
for _ in range(len(childs)):
shp = [random.randint(1, 64) if i==axis else s for i,s in enumerate(ishp)]
shpes.append(shp)
elif op_name == 'take':
ndim = random.randint(1, 3)
ishp = random_shape([None] * ndim)
constraint_attr(attr, 'axis', a_min=-ndim, a_max=ndim)
attr['axis'] = None if attr['axis'] == ndim else attr['axis']
ndim = random.randint(1, 2)
wshp = random_shape([None] * ndim)
shpes = [ishp, wshp]
elif op_name == 'strided_slice':
ndim = random.randint(1, 4)
ishp = random_shape([None] * ndim)
begin, end, stride = [], [], []
for s in ishp:
st = random_select([-3, -2, -1, 1, 2, 3])
if s == 1:
b = 0
e = 1 if st > 0 else -s-1
else:
b = random.randint(0, s-1)
e = random.randint(0, s-2)
e = e if e < b else e+1
if st > 0:
b, e = (b, e) if b < e else (e, b)
else:
b, e = (e, b) if b < e else (b, e)
b = b-s if random_bool() else b
e = e-s if random_bool() else e
begin.append(b)
end.append(e)
stride.append(st)
attr['begin'], attr['end'], attr['stride'] = begin, end, stride
shpes = [ishp]
elif op_name == 'repeat':
ndim = random.randint(1, 4)
ishp = random_shape([None] * ndim)
constraint_attr(attr, 'repeats', 1, 10)
constraint_attr(attr, 'axis', -ndim, ndim-1)
shpes = [ishp]
elif op_name == 'tile':
ndim = random.randint(1, 4)
ishp = random_shape([None] * ndim)
rdim = random.randint(1, 5)
attr['reps'] = [random.randint(1, 4) for _ in range(rdim)]
shpes = [ishp]
elif op_name == 'flatten':
ndim = random.randint(1, 4)
ishp = random_shape([None] * ndim)
shpes = [ishp]
elif op_name in ['broadcast_add', 'broadcast_sub', 'broadcast_mul', 'broadcast_max']:
adim = random.randint(1, 4)
bdim = random.randint(1, 4)
max_dim = max(adim, bdim)
shp = random_shape([None] * max_dim)
ashp = [1 if random_bool() else shp[max_dim-adim+i] for i in range(adim)]
bshp = [1 if random_bool() else shp[max_dim-bdim+i] for i in range(bdim)]
shpes = [ashp, bshp]
elif op_name in ['elemwise_add', 'elemwise_sub']:
ndim = random.randint(1, 4)
ishp = random_shape([None] * ndim)
shpes = [ishp, ishp]
print (op_name, attr, "childs shape:", shpes)
if nid==0:
ndim = random.randint(1, 4)
oshape = [random_shape([None] * ndim)]
node = op("data", shape=oshape[0], precision=8)
inputs_ext["data"]["shape"] = oshape[0]
print ("data", oshape)
elif op_name == 'null':
node = op(uniq_name("param"))
oshape = None
elif childs is not None:
new_childs = []
for i, c in enumerate(childs):
cname, cop_name = c.attr('name'), c.attr('op_name')
if shapes[cname] is None:
new_name = uniq_name("parameter")
new_c = get_cvm_op("null")(new_name, shape=shpes[i],
precision=8)
shapes[new_name] = shpes[i]
param = np.random.randint(-127, 127, shpes[i], "int32")
params[new_name] = tvm.nd.array(param)
else:
new_c = adjust_shape(c, shapes[cname][input_eids[i](False)], shpes[i])
new_childs.append(new_c)
node = op(*new_childs, **attr)
if requantize:
# node = get_cvm_op("cvm_right_shift")(node, shift_bit=shift_bit, precision=8)
node = get_cvm_op("cvm_clip")(node, precision=8)
ishape, oshape = infer_shape(node)
print (op_name, "name:", node.attr('name'), "output shape:", oshape)
if len(oshape) == 1:
bias = get_cvm_op("null")(uniq_name("parameter"), shape=oshape[0],
precision=8)
params[bias.attr('name')] = tvm.nd.array(np.random.randint(-127, 127, oshape[0], "int32"))
node = get_cvm_op("elemwise_add")(node, bias)
node = get_cvm_op("cvm_clip")(node, precision=8)
else:
assert False
shapes[node.attr('name')] = oshape
graph[nid] = node
return graph[idx_graph.last_node.id()], params, inputs_ext
def gen_sequence():
graph_size = random.randint(1, 100)
print ("Graph Length", graph_size)
# op_names.extend(['conv2d' for _ in range(5)])
# op_names.extend(['dense' for _ in range(2)])
ops_count = len(op_names)
ops = [random.randint(0, ops_count-1) for _ in range(graph_size)]
ops = [op_names[idx] for idx in ops]
print ("Graph Ops", " -> ".join(ops))
idx_graph = IndexGraph()
Node._eid = 0
out = Node()
idx_graph.add_node(out)
for op_name in ops:
op = Node(op_name, CVM_OPS[op_name])
input_size = op.input_size()
param_rate = op.attr('param_candidate_rate')
inputs = [out.entry() if i==0 else idx_graph.random_entry(param_rate) \
for i in range(input_size)]
op.set_inputs(inputs)
idx_graph.add_node(op)
out = op
return idx_graph
def to_nnvm(sym, params, inputs_ext, model_name):
dshp = inputs_ext['data']['shape']
data = tvm.nd.array(np.random.randint(-127, 127, dshp, "int32"))
_mrt.std_dump(sym, params, inputs_ext, data, model_name,
is_mxnet=False, batch=True)
def cvm_model():
graph = gen_sequence()
symbol, params, inputs_ext = sequence2symbol(graph)
open("/tmp/cvm_model.json", "w").write(to_json(symbol))
param_bytes = nnvm.compiler.save_param_dict(params)
open("/tmp/cvm_model.params", "wb").write(param_bytes)
model_name = uniq_name("random_4")
to_nnvm(symbol, params, inputs_ext, model_name)
def load_model():
sym_json = open("/tmp/cvm_model.json", "r").read()
param_bytes = open("/tmp/cvm_model.params", "rb").read()
params = nnvm.compiler.load_param_dict(param_bytes)
nnvm_graph = _graph.load_json(sym_json)
for sym in sutils.topo_sort(nnvm_graph.symbol):
name, op_name = sym.attr('name'), sym.attr('op_name')
if op_name == 'repeat':
_, oshape = infer_shape(sym)
print (op_name, name, oshape)
# ishape, oshape = graph_util.infer_shape(nnvm_graph)
# print (ishape, oshape)
model_name = "random_test"
inputs_ext = { 'data': { 'shape': (25, 32, 28, 28) }}
to_nnvm(nnvm_graph.symbol, params, inputs_ext, model_name)
exit()
for i in range(10):
cvm_model()
|
############### Configuration file ###############
import math
start_epoch = 1
num_epochs = 60
batch_size = 8
optim_type = 'Adam'
lr = 0.00001
weight_decay = 0.0005
num_samples = 25
beta_type = "Blundell"
mean = {
'cifar10': (0.4914, 0.4822, 0.4465),
'cifar100': (0.5071, 0.4867, 0.4408),
'mnist': (0.1307,),
'stl10': (0.485, 0.456, 0.406),
'origa': (0.5, 0.5, 0.5),
}
std = {
'cifar10': (0.2023, 0.1994, 0.2010),
'cifar100': (0.2675, 0.2565, 0.2761),
'mnist': (0.3081,),
'stl10': (0.229, 0.224, 0.225),
'origa': (0.5, 0.5, 0.5),
}
# Only for cifar-10
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
def learning_rate(init, epoch):
optim_factor = 0
if(epoch > 160):
optim_factor = 3
elif(epoch > 120):
optim_factor = 2
elif(epoch > 60):
optim_factor = 1
def dynamic_lr(init, epoch):
optim_factor = 1
if (epoch > 60):
optim_factor = 500
elif (epoch > 30):
optim_factor = 100
elif (epoch > 10):
optim_factor = 10
return init/optim_factor
def get_hms(seconds):
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
return h, m, s
|
import numpy as np
import matplotlib.pyplot as plt
from profit.sur.backend.gp_functions_old import invert, nll, predict_f, \
get_marginal_variance_BBQ, wld_get_marginal_variance
from profit.sur.backend.kernels import kern_sqexp
from profit.util.halton import halton
def f(x): return x*np.cos(10*x)
# Custom function to build GP matrix
def build_K(xa, xb, hyp, K):
for i in np.arange(len(xa)):
for j in np.arange(len(xb)):
K[i, j] = kern_sqexp(xa[i], xb[j], hyp[0])
noise_train = 0.01
#ntrain = 20
for ntrain in range(1, 31):
xtrain = halton(ntrain, 1)
ftrain = f(xtrain)
np.random.seed(0)
ytrain = ftrain + noise_train*np.random.randn(ntrain, 1)
# GP regression with fixed kernel hyperparameters
hyp = [0.1, 1e-4] # l and sig_noise**2
K = np.empty((ntrain, ntrain)) # train-train
build_K(xtrain, xtrain, hyp, K) # writes inside K
Ky = K + hyp[-1]*np.eye(ntrain)
Kyinv = invert(Ky, 4, 1e-6) # using gp_functions.invert
ntest = 300
xtest = np.linspace(0, 1, ntest)
ftest = f(xtest)
Ks = np.empty((ntrain, ntest)) # train-test
Kss = np.empty((ntest, ntest)) # test-test
build_K(xtrain, xtest, hyp, Ks)
build_K(xtest, xtest, hyp, Kss)
fmean = Ks.T.dot(Kyinv.dot(ytrain)) # predictive mean
Ef, varf = predict_f(hyp, xtrain.reshape(-1, 1),
ytrain.reshape(-1, 1), xtest.reshape(-1, 1), neig=8)# posterior
# Estimation and variance
varf = np.diag(varf)
# we keep only the diag because the variance is on it, the other terms are covariance
plt.figure()
plt.plot(xtrain, ytrain, 'kx')
plt.plot(xtest, ftest, 'm-')
plt.plot(xtest, fmean, 'r--')
axes = plt.gca()
axes.set_ylim([-1.5, 1])
plt.title('Random Gaussian Process with '+ str(ntrain) + ' observation(s) hyp = [0.1, 1e-4]')
plt.fill_between(xtest, # x
(fmean.flatten() + 2 * np.sqrt(varf)), # y1
(fmean.flatten() - 2 * np.sqrt(varf)), facecolor='blue', alpha=0.4) # y2
plt.legend(('training', 'reference', 'prediction', 'Posterior Variance'))
plt.savefig('Random_' + str(ntrain))
#plt.show()
|
# General course information
COURSE_NUMBER = "CS 106A"
COURSE_NAME = "Programming Methodology"
COURSE_TITLE = COURSE_NUMBER + ": " + COURSE_NAME
CS198_NUMBER = 1 # B = 2, X = 3
# General quarter information
QUARTER_NUMBER = 1188
QUARTER_NAME = "Summer"
QUARTER_YEAR = "2018"
QUARTER_FULL_NAME = QUARTER_NAME + " " + QUARTER_YEAR
QUARTER_OVER = True # Set to True to add banner at the top redirecting to newer version of webpage
# Lecture information (displayed at top of home page)
LECTURE_DAYS = ["Monday", "Tuesday", "Wednesday", "Thursday"]
LECTURE_TIME = "11:30AM-12:20PM PST"
LECTURE_LOCATION = "NVIDIA Auditorium"
LECTURE_LOCATION_LINK = "https://www.google.com/maps/place/NVIDIA+Auditorium/@37.4277284,-122.1763905,17z/data=!3m1!4b1!4m5!3m4!1s0x808fbb2ac97723cb:0xa6e8fc1a7a5f2c29!8m2!3d37.4277284!4d-122.1741965"
LECTURE_ASSIGNMENTS_LINK = "hashed-assignments.txt"
LECTURE_FEEDBACK_LINK = "https://docs.google.com/forms/d/e/1FAIpQLSeqfuZy6h7FI5Bbuck20kUOXjeR7pPEdj_DKPBJrHT-PXNHUQ/viewform?usp=sf_link" # Length > 0
# Course staff (displayed in left sidebar and on staff page)
STAFF_INFO = [
{
"POSITION": "Instructor",
"NAME": "Colin Kincaid",
"IMAGE": "ckincaid.jpg",
"EMAIL": "ckincaid@stanford.edu",
"OFFICE_HOURS_LOCATION": "Gates B02",
"OFFICE_HOURS_DATETIME": "Mon-Thu 12:30-1:30PM",
"DESCRIPTION": "The instructor teaches lectures and manages the overall course. Contact the instructor if you have a question that cannot be answered by your section leader; the SLs are your primary email contact for all questions about homework or other course issues. Please also cc your section leader and/or Head TA. In addition to at his office hours, please feel free to ask Colin questions before/after any lecture."
},
{
"POSITION": "Head TA",
"NAME": "Annie Hu",
"IMAGE": "anniehu.jpg",
"EMAIL": "anniehu@stanford.edu",
"OFFICE_HOURS_LOCATION": "Gates B02",
"OFFICE_HOURS_DATETIME": "Sun, Wed 5-7PM",
"DESCRIPTION": "The Head TA manages the section leaders and also oversees important course activities such as grading and section problems. Contact the Head TA if you need a regrade on an assignment, or have a question that cannot be answered by your section leader; the SLs are your primary email contact for all questions about homework or other course issues. Please also cc your section leader and/or instructor."
}
]
# Note: section leader names MUST be formatted "NAME (SUNET)" as the image for a given Section Leader is assumed to be SUNET.jpg.
SECTION_LEADERS = [
"Alex Mallery (amallery)",
"Arjun Sawhney (sawhneya)",
"Avery Wang (awvry952)",
"Belce Dogru (belce)",
"Diego Hernandez (diegoh)",
"Garrick Fernandez (gfaerr)",
"Jared Bitz (jbitz)",
"Jennie Yang (jenniey)",
"Jesse Doan (jdoan21)",
"Jonathan Gomes Selman (jgs8)",
"Matthew Katzman (mkatzman)",
"Meng Zhang (mz315)",
"Michelle McGhee (mmcghee)",
"Ruiqi Chen (rchensix)",
"Shanon Reckinger (mcintyrs)",
"Yoni Lerner (yonlern)",
"Allison Tielking (atielkin)",
"Connor Meany (cmeany)",
"Deanna Garcia (dgarcia5)",
"Greg Ramel (gramel)",
"Drew Bassilakis (abass20)"
]
SL_INDEX = range(len(SECTION_LEADERS))
# LaIR (listed in left sidebar)
LAIR_HOURS = "Sun-Wed, 7-11PM"
# SCPD (listed in left sidebar)
SCPD_OFFICE_HOURS = []
SCPD_OFFICE_HOURS_LINK = "http://www.queuestatus.com/queues/122"
SCPD_OFFICE_HOURS_DESCRIPTION = "for help during the times below (free QueueStatus account required)."
# Exams (displayed in left sidebar, but locations and review session are only displayed on exam subpages)
MIDTERM = {
"DATE": "Monday, July 23",
"TIME": "7-9PM PST",
"LOCATIONS": [ # A list of objects containing TITLE, LOCATION, MAP_LINK
{
"TITLE": "Exam",
"LOCATION": "Hewlett 200",
"MAP_LINK": "https://www.google.com/maps/embed?pb=!1m18!1m12!1m3!1d3168.345197243158!2d-122.1750263848679!3d37.42894923975373!2m3!1f0!2f0!3f0!3m2!1i1024!2i768!4f13.1!3m3!1m2!1s0x808fbb2ae177cc91%3A0x5351ffed56456da8!2sWilliam+R.+Hewlett+Teaching+Center%2C+370+Serra+Mall%2C+Stanford%2C+CA+94305!5e0!3m2!1sen!2sus!4v1500400060838"
}
],
"REVIEW_SESSION": {
"DATE": "Friday, July 20",
"TIME": "1:30-2:50PM PST",
"LOCATION": "Gates B01 (basement)",
"MAP_LINK": "https://www.google.com/maps/embed?pb=!1m18!1m12!1m3!1d3168.2942783500653!2d-122.17568493444799!3d37.4301523298236!2m3!1f0!2f0!3f0!3m2!1i1024!2i768!4f13.1!3m3!1m2!1s0x808fbb2b3f50f727%3A0xfd9cc3200ee97fda!2sGates+Computer+Science%2C+353+Serra+Mall%2C+Stanford%2C+CA+94305!5e0!3m2!1sen!2sus!4v1500425297459"
}
}
FINAL_EXAM = {
"DATE": "Friday, August 17",
"TIME": "12:15-3:15PM PST",
"LOCATIONS": [ # A list of objects containing TITLE, LOCATION, MAP_LINK
{
"TITLE": "Exam",
"LOCATION": "Dinkelspiel Auditorium",
"MAP_LINK": "https://www.google.com/maps/embed?pb=!1m18!1m12!1m3!1d3168.551960752374!2d-122.17208698447611!3d37.42406357982489!2m3!1f0!2f0!3f0!3m2!1i1024!2i768!4f13.1!3m3!1m2!1s0x808fbad417d5a965%3A0xff6e238283d53800!2sDinkelspiel+Auditorium!5e0!3m2!1sen!2sus!4v1533930014794"
}
],
"REVIEW_SESSION": {
"DATE": "Mon/Tue, August 13/14",
"TIME": "11:30AM-12:20PM PST",
"LOCATION": "NVIDIA Auditorium",
"MAP_LINK": "https://www.google.com/maps/embed?pb=!1m18!1m12!1m3!1d3168.396866793715!2d-122.17639054916224!3d37.42772837972516!2m3!1f0!2f0!3f0!3m2!1i1024!2i768!4f13.1!3m3!1m2!1s0x808fbb2ac97723cb%3A0xa6e8fc1a7a5f2c29!2sNVIDIA+Auditorium!5e0!3m2!1sen!2sus!4v1502919715852"
}
}
# Assignments (displayed in Assignments dropdown at the top)
STYLE_GUIDE = True
PAIR_PROGRAMMING = True
ASSIGNMENTS = [
{
"NAME": "Karel the Robot",
"LINK": "karel.html"
},
{
"NAME": "Intro to Java",
"LINK": "introToJava.html"
},
{
"NAME": "Snowman",
"LINK": "snowman.html"
},
{
"NAME": "Breakout",
"LINK": "breakout.html"
},
{
"NAME": "ImageShop",
"LINK": "imageshop.html"
},
{
"NAME": "BiasBars",
"LINK": "biasbars.html"
}
]
ASSIGNMENT_DUE_TIME = "11AM PST"
# Software
SOFTWARE = "Eclipse"
SOFTWARE_ICON = "eclipse-icon.png"
SOFTWARE_LINK = "eclipse"
# Stanford Library
LIBRARY_DOCS_LINK = "http://stanford.edu/~stepp/acmdoc/"
BLANK_PROJECTS = [
{
"NAME": "Blank Karel Project",
"LINK": "/resources/BlankKarelProject.zip"
},
{
"NAME": "Blank Java Project",
"LINK": "/resources/BlankJavaProject.zip"
}
]
|
import datetime
import pytest
import auth
try:
import mock
except ImportError:
from unittest import mock
class Test_purge_login_tokens:
@mock.patch('auth.datetime')
def test_not_yet_expired_tokens_are_kept(self, stub_datetime):
auth.login_tokens = {
('token', 'username'): datetime.datetime.max,
}
stub_datetime.datetime.now.return_value = datetime.datetime.min
auth.purge_login_tokens()
assert auth.login_tokens
@mock.patch('auth.datetime')
def test_expired_tokens_are_discarded(self, stub_datetime):
auth.login_tokens = {
('token', 'username'): datetime.datetime.min,
}
stub_datetime.datetime.now.return_value = datetime.datetime.max
auth.purge_login_tokens()
assert not auth.login_tokens
class Test_log_in_as_user:
@mock.patch('auth.config')
@mock.patch('auth.datetime')
def test_cookies_have_correct_lifetime(self, stub_datetime, stub_config):
stub_config.login_token_lifetime = 42
stub_datetime.datetime.now.return_value = datetime.datetime.min
stub_datetime.timedelta = datetime.timedelta
stub_request = mock.Mock()
auth.log_in_as_user(stub_request, 'user')
expires = list(auth.login_tokens.values())[0]
lifetime = expires - datetime.datetime.min
assert lifetime == datetime.timedelta(42)
class Test__parse_login_cookie:
@pytest.mark.parametrize("cookie", [
"",
" ",
" ",
" asdf",
"asdf",
"asdf ",
"asdf asdf",
"asdf asdf asdf",
])
def test_returns_none_for_wrong_format(self, cookie):
username, token = auth._parse_login_cookie(cookie)
assert token is None
@pytest.mark.parametrize("cookie", [
"abc 123",
])
def test_returns_username_and_token(self, cookie):
username, token = auth._parse_login_cookie(cookie)
assert username and token
|
import time
import requests
def compute_collateral_benefit(num_of_top_isp_rpki_adopters, rpki_adoption_propability_list):
print("### Collateral benefit ###")
for rpki_adopters_value in num_of_top_isp_rpki_adopters:
for rpki_adoption_propability in rpki_adoption_propability_list:
print("Top ISP RPKI Adopters: " + str(rpki_adopters_value) + " , RPKI Adoption Propability: " + str(rpki_adoption_propability))
sim_data = {
"simulation_type": "random",
"legitimate_AS": 0,
"legitimate_prefix": "x.y.z.w/m",
"hijacker_AS": 0,
"hijacker_prefix": "x.y.z.w/m",
"hijack_type": 0,
"hijack_prefix_type": "exact",
"anycast_ASes": [0],
"mitigation_prefix": "x.y.z.w/m",
"rpki_rov_mode": "manual",
"nb_of_sims": 20,
"nb_of_reps": 1,
"caida_as_graph_dataset": "20211001",
"caida_ixps_datasets": "202107",
"max_nb_anycast_ASes": 2,
"realistic_rpki_rov": False,
"num_of_top_isp_rpki_adopters": rpki_adopters_value,
"rpki_adoption_propability": rpki_adoption_propability
}
response = requests.post('http://127.0.0.1:5000/launch_simulation', json=sim_data)
print(response.json())
time.sleep(8 * 60) #after 8mins do the next request
def compute_today_rov_status_other_random_prop(num_of_top_isp_rpki_adopters, rpki_adoption_propability, other_random_prop_list):
print("### Today ROV status + Other ASes ###")
for prop_value in other_random_prop_list:
print("Today ROV status: " + str(num_of_top_isp_rpki_adopters) + " , Deployment Probability of other ASes: " + str(prop_value))
sim_data = {
"simulation_type": "random",
"legitimate_AS": 0,
"legitimate_prefix": "x.y.z.w/m",
"hijacker_AS": 0,
"hijacker_prefix": "x.y.z.w/m",
"hijack_type": 0,
"hijack_prefix_type": "exact",
"anycast_ASes": [0],
"mitigation_prefix": "x.y.z.w/m",
"rpki_rov_mode": "today_rov_status+other_random_prop",
"nb_of_sims": 20,
"nb_of_reps": 1,
"caida_as_graph_dataset": "20211001",
"caida_ixps_datasets": "202107",
"max_nb_anycast_ASes": 2,
"realistic_rpki_rov": False,
"num_of_top_isp_rpki_adopters": num_of_top_isp_rpki_adopters,
"rpki_adoption_propability": rpki_adoption_propability,
"other_random_prop": prop_value
}
response = requests.post('http://127.0.0.1:5000/launch_simulation', json=sim_data)
print(response.json())
time.sleep(10 * 60) # after 10 mins do the next request
def compute_top_isps_rov_other_random_prop(num_of_top_isp_rpki_adopters, rpki_adoption_propability, other_random_prop_list):
print("### Top ISPs ROV + Other ASes ###")
for prop_value in other_random_prop_list:
print("Top ISPs ROV number: " + str(
num_of_top_isp_rpki_adopters) + " , Deployment Probability of other ASes: " + str(prop_value))
sim_data = {
"simulation_type": "random",
"legitimate_AS": 0,
"legitimate_prefix": "x.y.z.w/m",
"hijacker_AS": 0,
"hijacker_prefix": "x.y.z.w/m",
"hijack_type": 0,
"hijack_prefix_type": "exact",
"anycast_ASes": [0],
"mitigation_prefix": "x.y.z.w/m",
"rpki_rov_mode": "top_isps_rov+other_random_prop",
"nb_of_sims": 20,
"nb_of_reps": 1,
"caida_as_graph_dataset": "20211001",
"caida_ixps_datasets": "202107",
"max_nb_anycast_ASes": 2,
"realistic_rpki_rov": False,
"num_of_top_isp_rpki_adopters": num_of_top_isp_rpki_adopters,
"rpki_adoption_propability": rpki_adoption_propability,
"other_random_prop": prop_value
}
response = requests.post('http://127.0.0.1:5000/launch_simulation', json=sim_data)
print(response.json())
time.sleep(10 * 60) # after 10 mins do the next request
if __name__ == '__main__':
rpki_adoption_propability_list = [0.25, 0.50, 0.75, 1]
num_of_top_isp_rpki_adopters = list(range(0, 101, 10))
other_random_prop_list = [v * 0.1 for v in range(0, 11, 1)]
print("EXPERIMENTS START ...")
#compute_collateral_benefit(num_of_top_isp_rpki_adopters, rpki_adoption_propability_list)
#compute_today_rov_status_other_random_prop(100, 1.0, other_random_prop_list)
compute_top_isps_rov_other_random_prop(100, 1.0, other_random_prop_list)
|
import sys
from .app import Application
def main():
app = Application(sys.argv)
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
# This file contains helper functions developed by the MITx 6.00.1x course
# The game can be played through ps4_6.py (and ps4_7.py with a computer player)
import random
VOWELS = 'aeiou'
CONSONANTS = 'bcdfghjklmnpqrstvwxyz'
HAND_SIZE = 7
SCRABBLE_LETTER_VALUES = {
'a': 1, 'b': 3, 'c': 3, 'd': 2, 'e': 1, 'f': 4, 'g': 2, 'h': 4, 'i': 1, 'j': 8, 'k': 5, 'l': 1, 'm': 3, 'n': 1, 'o': 1, 'p': 3, 'q': 10, 'r': 1, 's': 1, 't': 1, 'u': 1, 'v': 4, 'w': 4, 'x': 8, 'y': 4, 'z': 10
}
WORDLIST_FILENAME = "words.txt"
def loadWords():
"""
Returns a list of valid words. Words are strings of lowercase letters.
Depending on the size of the word list, this function may
take a while to finish.
"""
print("Loading word list from file...")
# inFile: file
inFile = open(WORDLIST_FILENAME, 'r')
# wordList: list of strings
wordList = []
for line in inFile:
wordList.append(line.strip().lower())
print(" ", len(wordList), "words loaded.")
return wordList
def getFrequencyDict(sequence):
"""
Returns a dictionary where the keys are elements of the sequence
and the values are integer counts, for the number of times that
an element is repeated in the sequence.
sequence: string or list
return: dictionary
"""
# freqs: dictionary (element_type -> int)
freq = {}
for x in sequence:
freq[x] = freq.get(x,0) + 1
return freq
def displayHand(hand):
"""
Displays the letters currently in the hand.
For example:
>>> displayHand({'a':1, 'x':2, 'l':3, 'e':1})
Should print out something like:
a x x l l l e
The order of the letters is unimportant.
hand: dictionary (string -> int)
"""
for letter in hand.keys():
for j in range(hand[letter]):
print(letter,end=" ") # print all on the same line
print() # print an empty line
def dealHand(n):
"""
Returns a random hand containing n lowercase letters.
At least n/3 the letters in the hand should be VOWELS.
Hands are represented as dictionaries. The keys are
letters and the values are the number of times the
particular letter is repeated in that hand.
n: int >= 0
returns: dictionary (string -> int)
"""
hand={}
numVowels = n // 3
for i in range(numVowels):
x = VOWELS[random.randrange(0,len(VOWELS))]
hand[x] = hand.get(x, 0) + 1
for i in range(numVowels, n):
x = CONSONANTS[random.randrange(0,len(CONSONANTS))]
hand[x] = hand.get(x, 0) + 1
return hand
|
# https://gitlab.freedesktop.org/wayland/wayland/-/blob/main/protocol/wayland.xml
import struct
from .base import (
ArgUint32,
ArgString,
Interface
)
class ArgDisplayError:
def parse(data):
oid, code = struct.unpack('=II', data[:8])
_, msg = ArgString.parse(data[8:])
return (oid, code, msg)
class ArgRegistryGlobal:
def parse(data):
global_id = struct.unpack('=I', data[:4])[0]
data = data[4:]
consumed, interface = ArgString.parse(data)
data = data[consumed:]
version = struct.unpack('=I', data)[0]
return (interface, version, global_id)
class ArgRegistryBind:
def create(global_id, name, version, new_obj_id):
data = ArgUint32.create(global_id)
data += ArgString.create(name)
data += ArgUint32.create(version)
data += ArgUint32.create(new_obj_id)
return data
class Display(Interface):
def __init__(self, connection):
super().__init__(connection)
self.obj_id = 1
self.set_name('wl_display')
self.set_version(1)
connection.add_event_handler(self)
self.add_event(self.on_error)
self.add_event(self.on_delete_id)
self.registry = self.get_registry()
# Wayland events
def on_error(self, data, fds):
obj_id, err_code, err_msg = ArgDisplayError.parse(data)
obj = self._connection.get_obj(obj_id)
self.log(f"Got Display error for {obj or obj_id}: [{err_code}] {err_msg}")
def on_delete_id(self, data, fds):
_, obj_id = ArgUint32.parse(data)
self._connection.free_obj_id(obj_id)
# Wayland methods
def do_sync(self, callback):
sync_id = self.get_new_obj_id()
self._connection.add_event_handler(sync_id, callback)
data = ArgUint32.create(sync_id)
self.send_command(0, data)
def get_registry(self):
registry = Registry(self._connection, self.get_new_obj_id())
self._connection.add_event_handler(registry)
data = ArgUint32.create(registry.obj_id)
self.send_command(1, data)
return registry
# Internal events
def on_initial_sync(self):
self.seat = Seat(self._connection)
class Registry(Interface):
def __init__(self, connection, obj_id):
super().__init__(connection, obj_id=obj_id)
self.set_name('wl_registry')
self.set_version(1)
self._registry = dict()
self._initial_sync = False
self.add_event(self.on_global)
self.add_event(self.on_global_remove)
# Wayland events
def on_global(self, data, fds):
name, version, global_id = ArgRegistryGlobal.parse(data)
self._registry[name] = (global_id, version)
def on_global_remove(self, data, fds):
_, global_id = ArgUint32.parse(data)
# FIXME: should destroy all object instances for this global_id
if global_id not in self._registry:
self.log(f"Can't remove global id {global_id}: We have no idea about it")
return
del self._registry[global_id]
self.log(f"Not destroying instances of global id {global_id}")
# Wayland methods
def do_bind(self, interface):
if not self._initial_sync:
raise RuntimeError("Bind without waiting for full sync. Please bind in on_initial_sync().")
if interface.name not in self._registry:
raise RuntimeError(f"Interface {interface.name} not supported by server")
global_id, version = self._registry[interface.name]
version = min(interface.version, version)
if version < interface.version:
interface.set_version(version)
interface.obj_id = self.get_new_obj_id()
data = ArgRegistryBind.create(global_id, interface.name, version, interface.obj_id)
self.send_command(0, data)
# Internal events
def on_initial_sync(self):
self._initial_sync = True
class Seat(Interface):
def __init__(self, connection):
super().__init__(connection)
self.set_name('wl_seat')
self.set_version(7)
self.add_event(self.on_capabilities)
self.add_event(self.on_name)
self.bind()
# Wayland events
def on_capabilities(self, data, fds):
pass
def on_name(self, data, fds):
pass
# Wayland methods
def get_keyboard(self):
pass
def get_touch(self):
pass
def release(self):
if self.version < 5:
return
pass
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Compute v2 API Library Tests"""
from keystoneauth1 import session
from osc_lib import exceptions as osc_lib_exceptions
from requests_mock.contrib import fixture
from openstackclient.api import compute_v2 as compute
from openstackclient.tests.unit import utils
FAKE_PROJECT = 'xyzpdq'
FAKE_URL = 'http://gopher.com/v2'
class TestComputeAPIv2(utils.TestCase):
def setUp(self):
super(TestComputeAPIv2, self).setUp()
sess = session.Session()
self.api = compute.APIv2(session=sess, endpoint=FAKE_URL)
self.requests_mock = self.useFixture(fixture.Fixture())
class TestFloatingIP(TestComputeAPIv2):
FAKE_FLOATING_IP_RESP = {
'id': 1,
'ip': '203.0.113.11', # TEST-NET-3
'fixed_ip': '198.51.100.11', # TEST-NET-2
'pool': 'nova',
'instance_id': None,
}
FAKE_FLOATING_IP_RESP_2 = {
'id': 2,
'ip': '203.0.113.12', # TEST-NET-3
'fixed_ip': '198.51.100.12', # TEST-NET-2
'pool': 'nova',
'instance_id': None,
}
LIST_FLOATING_IP_RESP = [
FAKE_FLOATING_IP_RESP,
FAKE_FLOATING_IP_RESP_2,
]
FAKE_SERVER_RESP_1 = {
'id': 1,
'name': 'server1',
}
def test_floating_ip_add_id(self):
self.requests_mock.register_uri(
'POST',
FAKE_URL + '/servers/1/action',
json={'server': {}},
status_code=200,
)
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/servers/1',
json={'server': self.FAKE_SERVER_RESP_1},
status_code=200,
)
ret = self.api.floating_ip_add('1', '1.0.1.0')
self.assertEqual(200, ret.status_code)
def test_floating_ip_add_name(self):
self.requests_mock.register_uri(
'POST',
FAKE_URL + '/servers/1/action',
json={'server': {}},
status_code=200,
)
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/servers/server1',
json={'server': self.FAKE_SERVER_RESP_1},
status_code=200,
)
ret = self.api.floating_ip_add('server1', '1.0.1.0')
self.assertEqual(200, ret.status_code)
def test_floating_ip_create(self):
self.requests_mock.register_uri(
'POST',
FAKE_URL + '/os-floating-ips',
json={'floating_ip': self.FAKE_FLOATING_IP_RESP},
status_code=200,
)
ret = self.api.floating_ip_create('nova')
self.assertEqual(self.FAKE_FLOATING_IP_RESP, ret)
def test_floating_ip_create_not_found(self):
self.requests_mock.register_uri(
'POST',
FAKE_URL + '/os-floating-ips',
status_code=404,
)
self.assertRaises(
osc_lib_exceptions.NotFound,
self.api.floating_ip_create,
'not-nova',
)
def test_floating_ip_delete(self):
self.requests_mock.register_uri(
'DELETE',
FAKE_URL + '/os-floating-ips/1',
status_code=202,
)
ret = self.api.floating_ip_delete('1')
self.assertEqual(202, ret.status_code)
self.assertEqual("", ret.text)
def test_floating_ip_delete_none(self):
ret = self.api.floating_ip_delete()
self.assertIsNone(ret)
def test_floating_ip_find_id(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-floating-ips/1',
json={'floating_ip': self.FAKE_FLOATING_IP_RESP},
status_code=200,
)
ret = self.api.floating_ip_find('1')
self.assertEqual(self.FAKE_FLOATING_IP_RESP, ret)
def test_floating_ip_find_ip(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-floating-ips/' + self.FAKE_FLOATING_IP_RESP['ip'],
status_code=404,
)
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-floating-ips',
json={'floating_ips': self.LIST_FLOATING_IP_RESP},
status_code=200,
)
ret = self.api.floating_ip_find(self.FAKE_FLOATING_IP_RESP['ip'])
self.assertEqual(self.FAKE_FLOATING_IP_RESP, ret)
def test_floating_ip_find_not_found(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-floating-ips/1.2.3.4',
status_code=404,
)
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-floating-ips',
json={'floating_ips': self.LIST_FLOATING_IP_RESP},
status_code=200,
)
self.assertRaises(
osc_lib_exceptions.NotFound,
self.api.floating_ip_find,
'1.2.3.4',
)
def test_floating_ip_list(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-floating-ips',
json={'floating_ips': self.LIST_FLOATING_IP_RESP},
status_code=200,
)
ret = self.api.floating_ip_list()
self.assertEqual(self.LIST_FLOATING_IP_RESP, ret)
def test_floating_ip_remove_id(self):
self.requests_mock.register_uri(
'POST',
FAKE_URL + '/servers/1/action',
status_code=200,
)
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/servers/1',
json={'server': self.FAKE_SERVER_RESP_1},
status_code=200,
)
ret = self.api.floating_ip_remove('1', '1.0.1.0')
self.assertEqual(200, ret.status_code)
def test_floating_ip_remove_name(self):
self.requests_mock.register_uri(
'POST',
FAKE_URL + '/servers/1/action',
status_code=200,
)
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/servers/server1',
json={'server': self.FAKE_SERVER_RESP_1},
status_code=200,
)
ret = self.api.floating_ip_remove('server1', '1.0.1.0')
self.assertEqual(200, ret.status_code)
class TestFloatingIPPool(TestComputeAPIv2):
LIST_FLOATING_IP_POOL_RESP = [
{"name": "tide"},
{"name": "press"},
]
def test_floating_ip_pool_list(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-floating-ip-pools',
json={'floating_ip_pools': self.LIST_FLOATING_IP_POOL_RESP},
status_code=200,
)
ret = self.api.floating_ip_pool_list()
self.assertEqual(self.LIST_FLOATING_IP_POOL_RESP, ret)
class TestHost(TestComputeAPIv2):
FAKE_HOST_RESP_1 = {
"zone": "internal",
"host_name": "myhost",
"service": "conductor",
}
FAKE_HOST_RESP_2 = {
"zone": "internal",
"host_name": "myhost",
"service": "scheduler",
}
FAKE_HOST_RESP_3 = {
"zone": "nova",
"host_name": "myhost",
"service": "compute",
}
LIST_HOST_RESP = [
FAKE_HOST_RESP_1,
FAKE_HOST_RESP_2,
FAKE_HOST_RESP_3,
]
def test_host_list_no_options(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-hosts',
json={'hosts': self.LIST_HOST_RESP},
status_code=200,
)
ret = self.api.host_list()
self.assertEqual(self.LIST_HOST_RESP, ret)
def test_host_list_zone(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-hosts?zone=nova',
json={'hosts': [self.FAKE_HOST_RESP_3]},
status_code=200,
)
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-hosts',
json={'hosts': [self.FAKE_HOST_RESP_3]},
status_code=200,
)
ret = self.api.host_list(zone='nova')
self.assertEqual([self.FAKE_HOST_RESP_3], ret)
def test_host_set_none(self):
ret = self.api.host_set(host='myhost')
self.assertIsNone(ret)
def test_host_set(self):
self.requests_mock.register_uri(
'PUT',
FAKE_URL + '/os-hosts/myhost',
json={},
status_code=200,
)
ret = self.api.host_set(host='myhost', status='enabled')
self.assertEqual({}, ret)
def test_host_show(self):
FAKE_RESOURCE_1 = {
"cpu": 2,
"disk_gb": 1028,
"host": "c1a7de0ac9d94e4baceae031d05caae3",
"memory_mb": 8192,
"project": "(total)",
}
FAKE_RESOURCE_2 = {
"cpu": 0,
"disk_gb": 0,
"host": "c1a7de0ac9d94e4baceae031d05caae3",
"memory_mb": 512,
"project": "(used_now)",
}
FAKE_RESOURCE_3 = {
"cpu": 0,
"disk_gb": 0,
"host": "c1a7de0ac9d94e4baceae031d05caae3",
"memory_mb": 0,
"project": "(used_max)",
}
FAKE_HOST_RESP = [
{'resource': FAKE_RESOURCE_1},
{'resource': FAKE_RESOURCE_2},
{'resource': FAKE_RESOURCE_3},
]
FAKE_HOST_LIST = [
FAKE_RESOURCE_1,
FAKE_RESOURCE_2,
FAKE_RESOURCE_3,
]
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-hosts/myhost',
json={'host': FAKE_HOST_RESP},
status_code=200,
)
ret = self.api.host_show(host='myhost')
self.assertEqual(FAKE_HOST_LIST, ret)
class TestNetwork(TestComputeAPIv2):
FAKE_NETWORK_RESP = {
'id': '1',
'label': 'label1',
'cidr': '1.2.3.0/24',
}
FAKE_NETWORK_RESP_2 = {
'id': '2',
'label': 'label2',
'cidr': '4.5.6.0/24',
}
LIST_NETWORK_RESP = [
FAKE_NETWORK_RESP,
FAKE_NETWORK_RESP_2,
]
def test_network_create_default(self):
self.requests_mock.register_uri(
'POST',
FAKE_URL + '/os-networks',
json={'network': self.FAKE_NETWORK_RESP},
status_code=200,
)
ret = self.api.network_create('label1')
self.assertEqual(self.FAKE_NETWORK_RESP, ret)
def test_network_create_options(self):
self.requests_mock.register_uri(
'POST',
FAKE_URL + '/os-networks',
json={'network': self.FAKE_NETWORK_RESP},
status_code=200,
)
ret = self.api.network_create(
name='label1',
subnet='1.2.3.0/24',
)
self.assertEqual(self.FAKE_NETWORK_RESP, ret)
def test_network_delete_id(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-networks/1',
json={'network': self.FAKE_NETWORK_RESP},
status_code=200,
)
self.requests_mock.register_uri(
'DELETE',
FAKE_URL + '/os-networks/1',
status_code=202,
)
ret = self.api.network_delete('1')
self.assertEqual(202, ret.status_code)
self.assertEqual("", ret.text)
def test_network_delete_name(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-networks/label1',
status_code=404,
)
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-networks',
json={'networks': self.LIST_NETWORK_RESP},
status_code=200,
)
self.requests_mock.register_uri(
'DELETE',
FAKE_URL + '/os-networks/1',
status_code=202,
)
ret = self.api.network_delete('label1')
self.assertEqual(202, ret.status_code)
self.assertEqual("", ret.text)
def test_network_delete_not_found(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-networks/label3',
status_code=404,
)
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-networks',
json={'networks': self.LIST_NETWORK_RESP},
status_code=200,
)
self.assertRaises(
osc_lib_exceptions.NotFound,
self.api.network_delete,
'label3',
)
def test_network_find_id(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-networks/1',
json={'network': self.FAKE_NETWORK_RESP},
status_code=200,
)
ret = self.api.network_find('1')
self.assertEqual(self.FAKE_NETWORK_RESP, ret)
def test_network_find_name(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-networks/label2',
status_code=404,
)
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-networks',
json={'networks': self.LIST_NETWORK_RESP},
status_code=200,
)
ret = self.api.network_find('label2')
self.assertEqual(self.FAKE_NETWORK_RESP_2, ret)
def test_network_find_not_found(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-networks/label3',
status_code=404,
)
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-networks',
json={'networks': self.LIST_NETWORK_RESP},
status_code=200,
)
self.assertRaises(
osc_lib_exceptions.NotFound,
self.api.network_find,
'label3',
)
def test_network_list_no_options(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-networks',
json={'networks': self.LIST_NETWORK_RESP},
status_code=200,
)
ret = self.api.network_list()
self.assertEqual(self.LIST_NETWORK_RESP, ret)
class TestSecurityGroup(TestComputeAPIv2):
FAKE_SECURITY_GROUP_RESP = {
'id': '1',
'name': 'sg1',
'description': 'test security group',
'tenant_id': '0123456789',
'rules': []
}
FAKE_SECURITY_GROUP_RESP_2 = {
'id': '2',
'name': 'sg2',
'description': 'another test security group',
'tenant_id': '0123456789',
'rules': []
}
LIST_SECURITY_GROUP_RESP = [
FAKE_SECURITY_GROUP_RESP_2,
FAKE_SECURITY_GROUP_RESP,
]
def test_security_group_create_default(self):
self.requests_mock.register_uri(
'POST',
FAKE_URL + '/os-security-groups',
json={'security_group': self.FAKE_SECURITY_GROUP_RESP},
status_code=200,
)
ret = self.api.security_group_create('sg1')
self.assertEqual(self.FAKE_SECURITY_GROUP_RESP, ret)
def test_security_group_create_options(self):
self.requests_mock.register_uri(
'POST',
FAKE_URL + '/os-security-groups',
json={'security_group': self.FAKE_SECURITY_GROUP_RESP},
status_code=200,
)
ret = self.api.security_group_create(
name='sg1',
description='desc',
)
self.assertEqual(self.FAKE_SECURITY_GROUP_RESP, ret)
def test_security_group_delete_id(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-security-groups/1',
json={'security_group': self.FAKE_SECURITY_GROUP_RESP},
status_code=200,
)
self.requests_mock.register_uri(
'DELETE',
FAKE_URL + '/os-security-groups/1',
status_code=202,
)
ret = self.api.security_group_delete('1')
self.assertEqual(202, ret.status_code)
self.assertEqual("", ret.text)
def test_security_group_delete_name(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-security-groups/sg1',
status_code=404,
)
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-security-groups',
json={'security_groups': self.LIST_SECURITY_GROUP_RESP},
status_code=200,
)
self.requests_mock.register_uri(
'DELETE',
FAKE_URL + '/os-security-groups/1',
status_code=202,
)
ret = self.api.security_group_delete('sg1')
self.assertEqual(202, ret.status_code)
self.assertEqual("", ret.text)
def test_security_group_delete_not_found(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-security-groups/sg3',
status_code=404,
)
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-security-groups',
json={'security_groups': self.LIST_SECURITY_GROUP_RESP},
status_code=200,
)
self.assertRaises(
osc_lib_exceptions.NotFound,
self.api.security_group_delete,
'sg3',
)
def test_security_group_find_id(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-security-groups/1',
json={'security_group': self.FAKE_SECURITY_GROUP_RESP},
status_code=200,
)
ret = self.api.security_group_find('1')
self.assertEqual(self.FAKE_SECURITY_GROUP_RESP, ret)
def test_security_group_find_name(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-security-groups/sg2',
status_code=404,
)
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-security-groups',
json={'security_groups': self.LIST_SECURITY_GROUP_RESP},
status_code=200,
)
ret = self.api.security_group_find('sg2')
self.assertEqual(self.FAKE_SECURITY_GROUP_RESP_2, ret)
def test_security_group_find_not_found(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-security-groups/sg3',
status_code=404,
)
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-security-groups',
json={'security_groups': self.LIST_SECURITY_GROUP_RESP},
status_code=200,
)
self.assertRaises(
osc_lib_exceptions.NotFound,
self.api.security_group_find,
'sg3',
)
def test_security_group_list_no_options(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-security-groups',
json={'security_groups': self.LIST_SECURITY_GROUP_RESP},
status_code=200,
)
ret = self.api.security_group_list()
self.assertEqual(self.LIST_SECURITY_GROUP_RESP, ret)
def test_security_group_set_options_id(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-security-groups/1',
json={'security_group': self.FAKE_SECURITY_GROUP_RESP},
status_code=200,
)
self.requests_mock.register_uri(
'PUT',
FAKE_URL + '/os-security-groups/1',
json={'security_group': self.FAKE_SECURITY_GROUP_RESP},
status_code=200,
)
ret = self.api.security_group_set(
security_group='1',
description='desc2')
self.assertEqual(self.FAKE_SECURITY_GROUP_RESP, ret)
def test_security_group_set_options_name(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-security-groups/sg2',
status_code=404,
)
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-security-groups',
json={'security_groups': self.LIST_SECURITY_GROUP_RESP},
status_code=200,
)
self.requests_mock.register_uri(
'PUT',
FAKE_URL + '/os-security-groups/2',
json={'security_group': self.FAKE_SECURITY_GROUP_RESP_2},
status_code=200,
)
ret = self.api.security_group_set(
security_group='sg2',
description='desc2')
self.assertEqual(self.FAKE_SECURITY_GROUP_RESP_2, ret)
class TestSecurityGroupRule(TestComputeAPIv2):
FAKE_SECURITY_GROUP_RULE_RESP = {
'id': '1',
'name': 'sgr1',
'tenant_id': 'proj-1',
'ip_protocol': 'TCP',
'from_port': 1,
'to_port': 22,
'group': {},
# 'ip_range': ,
# 'cidr': ,
# 'parent_group_id': ,
}
def test_security_group_create_no_options(self):
self.requests_mock.register_uri(
'POST',
FAKE_URL + '/os-security-group-rules',
json={'security_group_rule': self.FAKE_SECURITY_GROUP_RULE_RESP},
status_code=200,
)
ret = self.api.security_group_rule_create(
security_group_id='1',
ip_protocol='tcp',
)
self.assertEqual(self.FAKE_SECURITY_GROUP_RULE_RESP, ret)
def test_security_group_create_options(self):
self.requests_mock.register_uri(
'POST',
FAKE_URL + '/os-security-group-rules',
json={'security_group_rule': self.FAKE_SECURITY_GROUP_RULE_RESP},
status_code=200,
)
ret = self.api.security_group_rule_create(
security_group_id='1',
ip_protocol='tcp',
from_port=22,
to_port=22,
remote_ip='1.2.3.4/24',
)
self.assertEqual(self.FAKE_SECURITY_GROUP_RULE_RESP, ret)
def test_security_group_create_port_errors(self):
self.requests_mock.register_uri(
'POST',
FAKE_URL + '/os-security-group-rules',
json={'security_group_rule': self.FAKE_SECURITY_GROUP_RULE_RESP},
status_code=200,
)
self.assertRaises(
compute.InvalidValue,
self.api.security_group_rule_create,
security_group_id='1',
ip_protocol='tcp',
from_port='',
to_port=22,
remote_ip='1.2.3.4/24',
)
self.assertRaises(
compute.InvalidValue,
self.api.security_group_rule_create,
security_group_id='1',
ip_protocol='tcp',
from_port=0,
to_port=[],
remote_ip='1.2.3.4/24',
)
def test_security_group_rule_delete(self):
self.requests_mock.register_uri(
'DELETE',
FAKE_URL + '/os-security-group-rules/1',
status_code=202,
)
ret = self.api.security_group_rule_delete('1')
self.assertEqual(202, ret.status_code)
self.assertEqual("", ret.text)
|
import logging
import os
import re
import sys
from configparser import RawConfigParser
from html import unescape
from html.parser import HTMLParser
from json import dumps
from logging.handlers import TimedRotatingFileHandler
from os.path import isfile
from shutil import copyfile
import gevent
from bottle import template, HTTPError, redirect
from gevent import sleep
from geventwebsocket import WebSocketError
# Taken from http://www.electricmonk.nl/log/2011/08/14/redirect-stdout-and-stderr-to-a-logger-in-python/
class StreamToLogger(object):
"""
Fake file-like stream object that redirects writes to a logger instance.
"""
def __init__(self, logger, log_level=logging.INFO):
self.logger = logger
self.log_level = log_level
self.linebuf = ''
def write(self, buf):
for line in buf.rstrip().splitlines():
self.logger.log(self.log_level, line.rstrip())
def setup_logging(name, log_level=None, capture_stderr=False):
cfg = load_config()
level = getattr(logging, cfg.get('Logging', 'level') if log_level is None else log_level)
logs_folder = './logs'
os.makedirs(logs_folder, exist_ok=True)
logger = logging.getLogger(name)
logger.setLevel(level)
handler = TimedRotatingFileHandler(os.path.join(logs_folder, name + ".log"), when="midnight")
formatter = logging.Formatter(cfg.get('Logging', 'format'), cfg.get('Logging', 'date format'))
handler.setFormatter(formatter)
logger.addHandler(handler)
if capture_stderr:
stderr_logger = logging.getLogger('STDERR')
stderr_logger.addHandler(handler)
sl = StreamToLogger(stderr_logger, logging.ERROR)
sys.stderr = sl
return logger
def load_config():
# Create config folder
config_folder = "."
os.makedirs(config_folder, exist_ok=True)
# Check for config file. If it doesn't exist, copy it over
config_path = os.path.join(config_folder, "config.ini")
if not os.path.isfile(config_path):
dist_config_path = "config.ini.dist"
copyfile(dist_config_path, config_path)
# Load config file and return it
cfg = RawConfigParser()
cfg.read(config_path)
return cfg
def delete_config():
config_path = "./config.ini"
os.remove(config_path)
print(config_path, "has been removed.")
# Taken from https://stackoverflow.com/questions/753052/strip-html-from-strings-in-python
class MLStripper(HTMLParser):
def __init__(self):
super().__init__()
self.reset()
self.strict = False
self.convert_charrefs= True
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
def strip_html(html):
s = MLStripper()
s.feed(html)
return s.get_data()
def ordinal(num: str):
if not num.isdigit():
return num.title()
suffix = {"1": "st", "2": "nd", "3": "rd"}
return num + suffix.get(num, "th") + " Level"
def str_to_bool(s):
return s and str(s).lower()[0] in ["t", "1", "y"]
def create_tooltip(text, tooltip_text=None):
if tooltip_text is not None:
return '''
<div class="tooltip">{}
<span class="tooltiptext">{}</span>
</div>'''.format(text, tooltip_text)
return text
def title_to_page_name(title):
"""
Converts a title (e.g. "Beast Master (Revamped)") to a markdown filename (e.g. "beast-master-revamped")
:param title:
:return:
"""
return re.sub(r"\W+", "-", title.lower()).strip("-")
def page_name_to_title(page_name):
"""
Converts a markdown filename (e.g. "beast-master-revamped.md") to a title (e.g. "Beast Master Revamped")
Not a perfect solution.
:param page_name:
:return:
"""
return page_name.replace("-", " ").title()
def md_page(page_title, namespace, directory=None, build_toc=True, markdown_parser=None, **kwargs):
if markdown_parser is None:
# Avoiding circular dependencies
from src.common.markdown_parser import DEFAULT_MARKDOWN_PARSER
markdown_parser = DEFAULT_MARKDOWN_PARSER
path_name = title_to_page_name(page_title)
if directory:
path_name = os.path.join(directory, path_name)
if namespace:
path_name = os.path.join(namespace, path_name)
template_path = f"views/{path_name}.tpl"
md_path = f"data/{path_name}.md"
if isfile(template_path):
text = unescape(template(template_path, **kwargs))
elif isfile(md_path):
with open(md_path, encoding="utf-8") as f:
text = f.read()
else:
raise HTTPError(404, f"I couldn't find \"{path_name}\".")
try:
md = markdown_parser.parse_md(text, namespace)
except NameError:
print(f"Error when converting {path_name}")
raise
if md.startswith("<p>REDIRECT "):
redirect(md[12:-5])
else:
if "title" not in kwargs:
kwargs["title"] = page_title.title()
if build_toc and not md.startswith("<!-- no-toc -->"):
kwargs["toc"] = md.toc_html
kwargs["text"] = md
kwargs["accordion_text"] = markdown_parser.accordion_text
return template("common/page.tpl", kwargs)
def websocket_loop(ws, websocket_list):
print("Opening Websocket {}".format(ws), flush=True)
websocket_list.append(ws)
try:
while True:
sleep(60)
# Checking if websocket has been closed by the client
with gevent.Timeout(1.0, False):
ws.receive()
if ws.closed:
print("WebSocket was closed by the client: {}".format(ws), flush=True)
break
except Exception as e:
print("Error in WebSocket loop: {}".format(e), flush=True)
finally:
if not ws.closed:
print("Closing WebSocket: {}".format(ws), flush=True)
ws.close()
try:
websocket_list.remove(ws)
except ValueError as e:
print(e, ws)
def send_to_websockets(payload, websocket_list):
print(websocket_list, flush=True)
for ws in websocket_list[:]:
try:
print(f"Sending payload {payload} to {ws}", flush=True)
ws.send(dumps(payload))
except WebSocketError:
print(f"Failed to send message to {ws}. Removing from list", flush=True)
websocket_list.remove(ws)
except Exception as e:
print(f"Error when sending message to {ws}. {e}", flush=True)
websocket_list.remove(ws)
|
"""
An object to be imported.
"""
class InsightObject:
def __init__(self):
print("et print, ergo sum")
def fizz(self):
return "buzz"
|
import sys, os
from lxml import etree
import urllib
from edx_gen import _edx_consts
from edx_gen import _css_settings
from edx_gen import _util
import __SETTINGS__
#--------------------------------------------------------------------------------------------------
# Text strings
WARNING = " WARNING:"
INFO = " INFO:"
#--------------------------------------------------------------------------------------------------
# process a hrefs
def processHtmlTags(component_path, content_root_tag, unit_filename):
# process headings
h3_tags = list(content_root_tag.iter('h3'))
h4_tags = list(content_root_tag.iter('h4'))
h5_tags = list(content_root_tag.iter('h5'))
_processHeadingsTags(h3_tags, h4_tags, h5_tags)
# process pre
pre_tags = list(content_root_tag.iter('pre'))
_processPreTags(pre_tags)
# process code
code_tags = list(content_root_tag.iter('code'))
_processCodeTags(code_tags)
# process hrefs
a_tags = list(content_root_tag.iter('a'))
_processHtmlATags(component_path, a_tags, unit_filename)
# process images
img_tags = list(content_root_tag.iter('img'))
_processHtmlImgTags(component_path, img_tags, unit_filename)
#--------------------------------------------------------------------------------------------------
# process headings
def _processHeadingsTags(h3_tags, h4_tags, h5_tags):
for h3_tag in h3_tags:
h3_tag.set('style', _css_settings.H3_CSS)
for h4_tag in h4_tags:
h4_tag.set('style', _css_settings.H4_CSS)
for h5_tag in h5_tags:
h5_tag.set('style', _css_settings.H5_CSS)
#--------------------------------------------------------------------------------------------------
# process pre
def _processPreTags(pre_tags):
for pre_tag in pre_tags:
parent = pre_tag.getparent()
div_tag = etree.Element("div")
for child in pre_tag:
div_tag.append(child)
if pre_tag.text:
div_tag.text = pre_tag.text
div_tag.set('style', _css_settings.CODE_BOX_CSS)
parent.replace(pre_tag, div_tag)
#--------------------------------------------------------------------------------------------------
# process code
def _processCodeTags(code_tags):
for code_tag in code_tags:
lines = code_tag.text.strip().split('\n')
if len(lines) > 1:
parent = code_tag.getparent()
div_tag = etree.Element("div")
for line in lines:
if len(line) > 0:
pre_tag = etree.Element("pre")
pre_tag.set('style', _css_settings.CODE_LINE_CSS)
pre_tag.text = line
div_tag.append(pre_tag)
parent.replace(code_tag, div_tag)
else:
code_tag.set('style', _css_settings.CODE_INLINE_CSS)
#--------------------------------------------------------------------------------------------------
# process images
def _processHtmlImgTags(component_path, img_tags, unit_filename):
for img_tag in img_tags:
# create new image
new_img_tag = etree.Element("img")
for key in img_tag.keys():
if not key in ['src', 'modal']:
new_img_tag.set(key, img_tag.get(key))
# get modal setting
modal = False
if 'modal' in img_tag.keys() and img_tag.get('modal') == 'true':
modal = True
# add css
if modal:
new_img_tag.set('style', _css_settings.IMAGE_MODAL_CSS)
else:
new_img_tag.set('style', _css_settings.IMAGE_CSS)
src = img_tag.get('src')
# get the new src for the image
new_src = ''
if src.startswith('/') or src.startswith('http'):
new_src = src
else:
# check that that the file exists
component_dir = os.path.dirname(component_path)
image_filepath = os.path.normpath(component_dir + '/' + src)
if (not os.path.exists(image_filepath) or not os.path.isfile(image_filepath)):
print(WARNING, 'The image file does not exist: "' + image_filepath +'" in', component_path)
# new src
new_src = '/' + _edx_consts.STATIC_FOLDER + '/' + unit_filename + '_' + src
new_img_tag.set('src', new_src)
# create figure
figure_tag = etree.Element("figure")
if _css_settings.FIGURE_CSS:
figure_tag.set('style', _css_settings.FIGURE_CSS)
if modal:
a_tag = etree.Element("a")
a_tag.set('target', 'image')
a_tag.set('href', new_src)
a_tag.append(new_img_tag)
figure_tag.append(a_tag)
else:
figure_tag.append(new_img_tag)
# create caption for the figure
if 'alt' in img_tag.keys():
figcaption_tag = etree.Element("figcaption")
if _css_settings.FIGCAPTION_CSS:
figcaption_tag.set('style', _css_settings.FIGCAPTION_CSS)
figcaption_tag.text = img_tag.get('alt')
figure_tag.append(figcaption_tag)
# replace the existing image with the figure
img_tag.getparent().replace(img_tag, figure_tag)
#--------------------------------------------------------------------------------------------------
# process a hrefs
def _processHtmlATags(component_path, a_tags, unit_filename):
for a_tag in a_tags:
# get the href
href = a_tag.get('href')
if not href:
print(WARNING, 'An <a/> tag has no "href" attribute:', unit_filename)
return
# normal a tag
else:
_updateATag(a_tag, href, unit_filename)
#--------------------------------------------------------------------------------------------------
# update the href in an <a href=''></a> tag
def _updateATag(a_tag, href, unit_filename):
# ends with /, so must be a url like http://google.com/
# do nothing
if href.endswith('/'):
return
# break down the url
href_parts = list(urllib.parse.urlparse(href))
href_file = None
href_file_ext = None
href_path = href_parts[2]
if href_path and '.' in href_path:
href_file = href_path.split('/')[-1]
if '.' in href_file:
href_file_ext = href_file.split('.')[-1]
# no extension, so must be a url like http://google.com
# do nothing
if href_file_ext == None or href_file_ext == '':
return
# ends with html or htm, so must be a url like http://google.com/hello.html
# do nothing
if href_file_ext in ['html', 'htm', 'asp']:
return
# an asset that goes to the edX static folder
if href_file_ext in __SETTINGS__.EDX_ASSET_EXT:
new_href = '/' + _edx_consts.STATIC_FOLDER + '/' + unit_filename + '_' + href_file
a_tag.set('href', new_href)
# something unknown
else:
print(INFO, 'Found a strange href:', href, unit_filename)
#--------------------------------------------------------------------------------------------------
|
#
# Copyright 2008-2012 NVIDIA Corporation
# Copyright 2009-2010 University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
from copperhead import *
import numpy as np
import plac
@cu
def radix_sort(A, bits, lsb):
"""
Sort A using radix sort.
Each element of A is assumed to be an integer. The key used in
sorting will be bits [lsb, lsb+bits). For the general case, use
bits=32 and lsb=0 to sort on all 32 bits.
For sequences of length n with b-bit keys, this performs O(b*n) work.
"""
def delta(flag, ones_before, zeros_after):
if flag==0: return -ones_before
else: return +zeros_after
if lsb >= bits:
return A
else:
flags = map(lambda x: int64((x>>lsb)&1), A)
ones = scan(op_add, flags)
zeros = rscan(op_add, [f^1 for f in flags])
offsets = map(delta, flags, ones, zeros)
bit_sorted = permute(A, map(op_add, indices(A), offsets))
return radix_sort(bit_sorted, bits, lsb+1)
def radix_sort8(A): return radix_sort(A, np.int32(8), np.int32(0))
def radix_sort32(A): return radix_sort(A, np.int32(32), np.int32(0))
@plac.annotations(n="Length of array to test sort with, defaults to 277")
def main(n=277):
"""Tests Copperhead radix sort in Python interpreter and on GPU."""
def random_numbers(n, bits=8):
import random
return [np.int32(random.getrandbits(bits)) for i in xrange(n)]
def test_sort(S, n=277, trials=50, bits=8):
npass, nfail = 0,0
name = S.__name__
for i in xrange(trials):
data_in = random_numbers(n, bits)
gold = sorted(data_in)
data_out = S(data_in)
if list(gold) == list(data_out):
npass = npass+1
else:
nfail = nfail+1
print ("%-20s passed [%2d]\tfailed [%2d]\r" % (name, npass,nfail)),
print
print
print "---- Checking Python implementations (n=277) ----"
with places.here:
test_sort(radix_sort8, n=277)
print "---- Checking GPU results (n=277) ----"
with places.gpu0:
test_sort(radix_sort8, n=277)
if __name__ == '__main__':
plac.call(main)
|
from hashlib import sha1 as sha
def main():
m = sha()
m.update(b"Nobody inspects")
m.update(b" the spammish repetition")
digest1 = m.digest()
print('digest1: ', digest1)
print('digest1 size (The size of the resulting hash in bytes.): ',
m.digest_size)
print(
"digest1 block size (The internal block size of the hash algorithm in "
"bytes.): ",
m.block_size)
hexdigest1 = m.hexdigest()
print('hexdigest1: ', hexdigest1)
print('length of hexdigest1: ', len(hexdigest1))
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
"""Tests for constructing supercell models."""
import itertools
import pytest
import numpy as np
from parameters import T_VALUES
# pylint: disable=invalid-name
@pytest.mark.parametrize("t_values", T_VALUES)
@pytest.mark.parametrize("offset", [(0.2, 1.2, 0.9), (-0.2, 0.912, 0.0)])
@pytest.mark.parametrize("cartesian", [True, False])
def test_shift_twice(get_model, t_values, sparse, offset, cartesian, models_close):
"""
Check that shifting a model twice in opposite direction gives back
the original model.
"""
model = get_model(
*t_values, sparse=sparse, uc=[[0.1, 1.0, 0.0], [2.0, 0.0, 0.0], [0.0, 0.0, 3.0]]
)
model_shifted = model.change_unit_cell(offset=offset, cartesian=cartesian)
model_shifted_twice = model_shifted.change_unit_cell(
offset=[-x for x in offset], cartesian=cartesian
)
assert models_close(model, model_shifted_twice)
@pytest.mark.parametrize(
"uc", [((1.1, 0.3, 0.0), (0.4, 1.5, 0.1), (-0.1, 0.0, 3.0)), None]
)
@pytest.mark.parametrize("offset", [(1, 0, -1), (2, 3, 0)])
def test_lattice_shift_reduced(get_model, sparse, offset, uc, models_equal):
"""
Check that shifting by a lattice vector produces the original
model, with reduced coordinates.
"""
model = get_model(t1=0.1, t2=0.7, sparse=sparse, uc=uc)
model_shifted = model.change_unit_cell(offset=offset, cartesian=False)
assert models_equal(model, model_shifted)
@pytest.mark.parametrize(
"uc, offsets",
[
(
[[1.1, 0.3, 0.0], [0.4, 1.5, 0.1], [-0.1, 0.0, 3.0]],
[(1.1, 0.3, 0), [1.5, 1.8, 0.1], (0.1, 0, -3)],
)
],
)
def test_lattice_shift_cartesian(get_model, sparse, uc, offsets, models_close):
"""
Check that shifting by a lattice vector produces the original
model, with cartesian coordinates.
"""
# We change the position from being exactly at the unit cell boundary
# to avoid issues with positions being off by one unit cell.
model = get_model(
t1=0.1, t2=0.7, sparse=sparse, uc=uc, pos=[(0.01, 0.02, 0.03), (0.5, 0.5, 0.5)]
)
for offset in offsets:
model_shifted = model.change_unit_cell(offset=offset, cartesian=True)
assert models_close(model, model_shifted)
@pytest.mark.parametrize(
"uc",
(
[[1, 2, 0], [1, 1, 0], [0, 0, 1]],
[[2, 0, 0], [0, 1, 0], [0, 0, 1]],
[[1, 0, 0], [0, 1.5, 0], [0, 0, 1]],
),
)
def test_invalid_uc_raises_reduced(get_model, uc, sparse):
"""
Test that specifying an invalid new unit cell in reduced coordinates
raises an error.
"""
model = get_model(t1=0.1, t2=0.7, sparse=sparse, uc=uc)
with pytest.raises(ValueError):
model.change_unit_cell(uc=uc, cartesian=False)
@pytest.mark.parametrize(
"uc", ([[1, 2, 0], [0, 1, 0], [0, 0, 6]], [[1, 2, 0], [0, 1.5, 0], [0, 0, 3]])
)
def test_invalid_uc_raises_cartesian(get_model, uc, sparse):
"""
Test that specifying an invalid new unit cell in cartesian coordinates
raises an error.
"""
model = get_model(
t1=0.1, t2=0.7, sparse=sparse, uc=[[1, 2, 0], [0, 1, 0], [0, 0, 3]]
)
with pytest.raises(ValueError):
model.change_unit_cell(uc=uc, cartesian=True)
def test_change_uc_without_pos_raises(get_model):
"""
Test that the 'change_unit_cell' method raises an error when no
positions are defined.
"""
model = get_model(t1=0.4, t2=0.9)
# Note: this is affected by issue #76
model.pos = None
with pytest.raises(ValueError):
model.change_unit_cell()
def test_change_uc_without_uc_cartesian_raises(get_model):
"""
Test that the 'change_unit_cell' method raises an error in cartesian
mode when the original unit cell is not defined.
"""
model = get_model(t1=0.4, t2=0.9)
model.uc = None
with pytest.raises(ValueError):
model.change_unit_cell(cartesian=True)
@pytest.mark.parametrize(
"uc_original, uc_changed, offset",
[
(
[[1.2, 0.1, 0.0], [0, 2, 0], [1, 0, 3]],
[[1.2, 2.1, 0.0], [-1, 2, -3], [1, 0, 3]],
(0, 0, 0),
),
(
[[1.2, 0.1, 0.0], [0, 2, 0], [1, 0, 3]],
[[1.2, 2.1, 0.0], [-1, 2, -3], [1, 0, 3]],
(0.5, -0.1, 10.2),
),
([[1.2, 0.1], [0, 2]], [[1.2, 2.1], [0, 2]], (0.2, -1.5)),
],
)
def test_revert_cartesian_uc_change(
get_model, models_close, uc_original, uc_changed, offset
):
"""
Test that reverting a cartesian unit cell change produces the original model.
"""
if offset is None:
revert_offset = None
else:
revert_offset = -np.array(offset)
dim = len(offset)
model = get_model(
t1=0.2, t2=0.3, pos=[[0.1] * dim, [0.7] * dim], uc=uc_original, dim=dim
)
model_changed = model.change_unit_cell(uc=uc_changed, cartesian=True, offset=offset)
model_change_reverted = model_changed.change_unit_cell(
uc=uc_original, cartesian=True, offset=revert_offset
)
models_close(model, model_change_reverted)
def test_equivalent_uc_shape(get_model, models_close):
"""
Test that two manually created equivalent models are equal after
matching unit cells.
"""
t1 = 0.232
t2 = -0.941234
uc1 = np.eye(3)
uc2 = [[1, 0, 0], [1, 1, 0], [0, 0, 1]]
model1 = get_model(t1=0, t2=0, pos=[(0.2, 0.1, 0.1), (0.6, 0.5, 0.5)], uc=uc1)
model2 = get_model(t1=0, t2=0, pos=[(0.1, 0.1, 0.1), (0.1, 0.5, 0.5)], uc=uc2)
for phase, R1 in zip([1, -1j, 1j, -1], itertools.product([0, -1], [0, -1], [0])):
model1.add_hop(t1 * phase, 0, 1, R1)
R2 = [R1[0] - R1[1], R1[1], R1[2]]
model2.add_hop(t1 * phase, 0, 1, R2)
for r1_part in itertools.permutations([0, 1]):
R1 = list(r1_part) + [0]
R2 = [R1[0] - R1[1], R1[1], R1[2]]
model1.add_hop(t2, 0, 0, R1)
model1.add_hop(-t2, 1, 1, R1)
model2.add_hop(t2, 0, 0, R2)
model2.add_hop(-t2, 1, 1, R2)
assert models_close(model1, model2.change_unit_cell(uc=uc1, cartesian=True))
assert models_close(model2, model1.change_unit_cell(uc=uc2, cartesian=True))
assert models_close(model2, model1.change_unit_cell(uc=uc2, cartesian=False))
assert models_close(
model1,
model2.change_unit_cell(uc=[[1, 0, 0], [-1, 1, 0], [0, 0, 1]], cartesian=False),
)
|
import os
from scipy.special import comb
def bagging_proba(n, acc=0.8):
''' n independent estimators with accuracy 'acc', then what is estimated
accuracy of bagging estimator with majority voting ?
Note, 6 or more out of 10 is called a majority.
'''
if n == 1:
return acc
error = 0
for i in range(n // 2 + 1):
# only i estimator makes correct guess
error += comb(n, i, exact=False) * \
((1 - acc) ** (n - i)) * ((acc) ** i)
return 1 - error
for i in range(1, 10):
n = i * 10
print(n, bagging_proba(n))
'''
10 0.9672065024000001
20 0.997405172599326
30 0.9997687743883322
40 0.9999783081068737
50 0.9999979051451444
60 0.9999997938783113
70 0.999999979452253
80 0.999999997931789
90 0.9999999997902754
'''
|
'''
@package: pyAudioLex
@author: Jim Schwoebel
@module: word_endings
Given a word ending (e.g. '-ed'), output the words with that ending and the associated count.
'''
from nltk import word_tokenize
import re
def word_endings(importtext,ending):
text=word_tokenize(importtext)
#number of words ending in 'ed'
words=[w for w in text if re.search(ending+'$', w)]
return [len(words),words]
#test
#print(word_endings('In a blunt warning to the remaining ISIS fighters, Army Command Sgt. Maj. John Wayne Troxell said the shrinking band of militants could either surrender to the U.S. military or face death. “ISIS needs to understand that the Joint Force is on orders to annihilate them,” he wrote in a forceful message on Facebook. “So they have two options, should they decide to come up against the United States, our allies and partners: surrender or die!”','s'))
|
__author__ = 'varun'
from .views import settings_page, update_user_profile
from django.conf.urls import url
urlpatterns = [
url(r'^$', settings_page),
url(r'^update-profile$', update_user_profile),
]
|
"""
entrada
nombre->int->n
monto_compra-->int-->mc
salidas
total_descuento->str->td
"""
n = str(input("nombre del cliente: "))
mc = int(input("ingrese el valor de la compra $"))
if (mc < 50000):
print(n)
print("No hay descuento")
print("Monto a pagar: " + str(mc),"$")
elif(mc > 50000 and mc <= 100000):
td = mc*0.05
mcb = mc-td
print("Nombre del cliente: "+str(n))
print("Su momto de compra es: " + str(mc))
print("Su descuento por la compra es de: " (td),"$")
print("Su valor a pagar es de: " (mcb), "$")
elif (mc >= 100000 and mc < 700000):
td = mc*0.11
mcb = mc-td
print("Nombre del cliente: "+str(n))
print("Su momto de compra es: " + str(mc))
print("Su descuento por la compra es de: "+str(td)," $")
print("Su valor a pagar es de: "+str(mcb), " $")
elif (mc >= 700000 and mc < 1500000):
td = mc*0.18
mcb = mc-td
print("Nombre del cliente: "+str(n))
print("Su momto de compra es: " + str(mc))
print("Su descuento por la compra es de: " +str(td), " $")
print("Su valor a pagar es de: "+str(mcb), " $")
elif (mc >= 1500000):
td = mc*0.25
mcb = mc-td
print("Nombre del cliente: "+str(n))
print("Su momto de compra es: " + str(mc))
print("Su descuento por la compra es de: " +str(td), " $")
print("Su valor a pagar es de: "+str(mcb), " $")
|
# Generated by Django 2.2.2 on 2019-06-18 02:26
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Song',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('indexNumber', models.CharField(max_length=20)),
('opusNumber', models.PositiveIntegerField()),
('dateWritten', models.DateField(default=django.utils.timezone.now)),
('key', models.CharField(max_length=3)),
('meter', models.CharField(max_length=5)),
('tempo', models.CharField(max_length=20)),
('bpm', models.PositiveIntegerField()),
('instrumentation', models.CharField(max_length=10)),
('typeOfMusic', models.CharField(max_length=20)),
('filmGenre', models.CharField(max_length=20)),
('subGenre', models.CharField(max_length=20)),
('tone', models.CharField(max_length=300)),
('emotion', models.CharField(max_length=300)),
('mood', models.CharField(max_length=300)),
('feeling', models.CharField(max_length=300)),
('songName', models.CharField(max_length=60)),
('composerNotes', models.CharField(max_length=1024)),
('location', models.CharField(max_length=100)),
],
),
]
|
"""
This file handles searching and adding workouts
"""
from flask import (Blueprint, render_template, flash,
session, request, redirect, url_for, json)
from flaskapp import db
bp = Blueprint('workout', __name__, url_prefix='/workout')
@bp.route('/')
def workout():
"""
Page where user adds workouts
:return: html workout template with exercises in search bar
"""
exercises = db.get_exercises()
categories = db.get_categories()
return render_template('workout.html', exercises=exercises,
categories=categories)
@bp.route('/add_workout', methods=['POST'])
def add_workout():
"""
Add workout to database when user clicks button
:return JSON response:
"""
data = request.get_json()
set_id = db.insert_set(data)
db.insert_workout(session['training_session'], set_id, data)
return json.dumps({'success': True}), 200, {'ContentType': 'application/json'}
@bp.route('/training_session', methods=['GET', 'POST'])
def training_session():
"""
Creates training sessions when user clicks start workout
:return JSON response:
"""
if request.method == 'POST':
session['training_session'] = db.get_training_session(session['user'])
resp = f"""Training session created with
ID#{session['training_session']}"""
return json.dumps(resp), 200, {'ContentType': 'application/json'}
@bp.route('/search-by-cat', methods=['GET', 'POST'])
def return_by_cat():
"""
Returns only exercises with a given category
:return JSON response:
"""
if request.method == 'POST':
# print('searching by category')
data = request.get_json()
# print(data)
return json.dumps(db.get_exercises_by_cat(data))
@bp.route('/get-exercises', methods=['GET', 'POST'])
def get_exercises():
"""
Returns only exercises with a given category
:return JSON response:
"""
if request.method == 'POST':
return json.dumps(db.get_exercises())
|
import pickle
import sys
import maya.cmds as cmds
import os
# import DLS
from maya.api.OpenMaya import MVector
import xml.etree.ElementTree
import maya.mel as mel
import random
import DLS
import time
import re
mapping_folder = 'd:/.work/.chrs/.bone_mapping'
xml_folder = 'd:/.work/.chrs/.xml'
upper_lower_match = 'd:/.work/.chrs/.upper_lower_match/upper_lower_match.txt'
lps_path = 'd:/.work/.chrs/.lps/'
'''Gets or set bone mapping for a character, bone per vertex correspondance.
Since bones are constrained to vertices.
And vertices are driven with a blendshapes.
Args:
folder (str): path to bone mapping files that contain dictionary,
dumped with a pickle
set_mapping (bool, optional): used with cubes and mapping params.
Defaults to False
Sets custom mapping for a character
cubes (list, optional): used with set and mapping params.
Cubes that visualize joint placement
mapping (dict, optional): used with set and cubes params.
Bone per vertex correspondance
Returns:
dict: default or custom bone per vertex correspondance for a '*_head' mesh
Examples:
>>> print([i for i in example_generator(4)])
[0, 1, 2, 3]
'''
def get_head(head=''):
'''Searches for a transform node named '*_head' in the scene.
Args:
head (str, optional):
Returns:
str: mesh name with prefix "_head"
'''
if head:
return head
meshes = cmds.ls('*_head', tr=True)
if meshes and len(meshes) == 1:
head = meshes[0]
elif not meshes:
sys.exit('No mesh with the prefix _head in the scene')
else:
sys.exit(
'More than one object with the prefix _head in the scene \n %s' % meshes)
return head
def get_teeth(teeth=''):
'''Searches for a transform node named '*_teeth' in the scene.
Returns:
str: mesh name with prefix "_teeth"
'''
if not teeth:
meshes = cmds.ls('*_teeth', tr=True)
if meshes and len(meshes) == 1:
teeth = meshes[0]
elif not meshes:
sys.exit('No mesh with the prefix _teeth in the scene')
else:
sys.exit(
'More than one object with the prefix _teeth in the scene \n %s' % meshes)
return teeth
def mapping(mesh, folder, set_mapping=False, cubes=[], mapping={}, female=False):
'''Gets or set bone mapping for a character, bone per vertex correspondance.
Since bones are constrained to vertices.
And vertices are driven with a blendshapes.
Args:
mesh (str): transform object name mapping will be applied to.
folder (str): path to bone mapping files that contain dictionary,
dumped with a pickle
set_mapping (bool, optional): used with cubes and mapping params.
Defaults to False
Sets custom mapping for a character
cubes (list, optional): used with set and mapping params.
Cubes that visualize joint placement
mapping (dict, optional): used with set and cubes params.
Bone per vertex correspondance
Returns:
dict: default or custom bone per vertex correspondance for a '*_head' mesh
if 'head' in mesh:
default_map_name = 'head_bones_mapping.txt'
elif 'teeth' or 'mouth' in mesh:
default_map_name = 'teeth_bones_mapping.txt'
else:
sys.exit('No pattern for %s found' % mesh)
'''
if female:
default_map_name = 'female_bones_mapping.txt'
else:
default_map_name = 'male_bones_mapping.txt'
cur_map_name = '%s_%s' % (mesh.split('_')[0], default_map_name)
default_map_path = os.path.join(folder, default_map_name)
cur_map_path = os.path.join(folder, cur_map_name)
if os.path.exists(cur_map_path):
path = cur_map_path
elif os.path.exists(default_map_path):
path = default_map_path
else:
sys.exit('Neither Default nor Chr mapping file found')
with open(path, 'rb') as bone_map:
mapping = pickle.loads(bone_map.read())
if set_mapping == True and cubes and mapping:
upd_mapping = update_match(mapping, cubes)
with open(cur_map_path, 'wb') as bone_map:
pickle.dump(upd_mapping, bone_map)
print 'Saved updated mapping to %s' % cur_map_path
# return upd_mapping
else:
print 'Loaded mapping from %s' % path
return mapping
def get_length(jnt_coord, vtx_coord):
''''''
vtx_vector = MVector(vtx_coord)
jnt_vector = MVector(jnt_coord)
vector_position = vtx_vector - jnt_vector
vector_length = vector_position.length()
return vector_length
def set_cubes(mapping):
cubes = []
head = get_head()
for vtx, jnt in mapping.iteritems():
cube = cmds.polyCube(name=jnt + '_cube', d=0.15, w=0.15, h=0.15)[0]
vtx_pos = cmds.pointPosition(head + vtx, world=True)
cmds.xform(cube, t=vtx_pos, ws=True)
cubes.append(cube)
cubes_group = cmds.group(cubes, name='cubes')
return cubes_group
def get_pos(a):
a_pos = cmds.xform(a, translation=True,
query=True,
worldSpace=True)
return a_pos
def get_coords(objects):
pos = {}
for o in objects:
o_pos = cmds.xform(o, translation=True,
query=True,
worldSpace=True)
pos[o] = o_pos
return pos
def compare_pos(a, b):
a_pos = cmds.xform(a, translation=True,
query=True,
worldSpace=True)
b_pos = cmds.xform(b, translation=True,
query=True,
worldSpace=True)
if MVector(a_pos) == MVector(b_pos):
return True
else:
return False
'''
true_false = []
for a_v, b_v in zip(a_pos, b_pos):
if round(a_v, 2) == round(b_v, 2):
true_false.append(True)
elif round(a_v, 0) == round(b_v, 0):
true_false.append(True)
print 'Not certain about match'
else:
true_false.append(False)
'''
def update_match(mapping, cubes):
if not cubes:
sys.exit('No Cubes found in scene.\nSet new bone position first.')
head = get_head()
upd_match = {}
for vtx, jnt in mapping.iteritems():
cube = jnt + '_cube'
if cmds.objExists(cube):
if compare_pos(head + vtx, cube) == True:
upd_match[vtx] = jnt
else:
print 'cube %s moved' % cube
vtx = '.vtx[%s]' % get_closest(cube)
upd_match[vtx] = jnt
else:
sys.exit('Cube %s not found' % cube)
return upd_match
def get_closest(element, obj=''):
if not obj:
obj = get_head()
vtxs = cmds.polyEvaluate(obj, v=True)
check_value = float('inf')
current_vtx = ''
element_pos = get_pos(element)
for vtx in range(0, vtxs + 1):
l = get_length(get_pos(obj + '.vtx[%s]' % vtx), element_pos)
if l < check_value:
check_value = l
match_l = vtx
return match_l
def set_bones(mesh, bone_mapping):
'''Constrains facial bones to a certain vertices.
Position is taken from bone per vertex mapping dictionary
Args:
mesh (str): transform object name.
bone_mapping (dict): bone per vertex mapping dictionary
Returns:
list: temporary objects, locators, groups, to be deleted later
'''
tmp_stuff = []
# Constrain joints to vertices
for vtx, jnt in bone_mapping.iteritems():
old_loc_list = cmds.ls('*LOC*', flatten=True)
cmds.select(mesh + vtx)
HZrivet.UI.HZrivet_finalCC()
current_loc_list = cmds.ls('*LOC*', flatten=True, tr=True)
loc = [loc for loc in current_loc_list if loc not in old_loc_list]
vtx_pos = cmds.pointPosition(mesh + vtx, world=True)
new_loc = cmds.spaceLocator()
new_group = cmds.group(new_loc)
tmp_stuff.append(loc[0])
tmp_stuff.append(new_group)
# Align joint to the world of the parent joint
# Breaks the model in editor
# cmds.joint(jnt, e=True, oj='none', zso=True)
cmds.xform(jnt, t=vtx_pos, ws=True)
cmds.xform(new_group, t=vtx_pos, ws=True)
# Now, not needed, because bones often have connections
# cmds.makeIdentity(jnt)
cmds.pointConstraint(loc[0], new_group, mo=True)
cmds.orientConstraint(loc[0], new_group, mo=True)
cmds.parentConstraint(new_loc, jnt, mo=True)
tmp = cmds.group(tmp_stuff, name='tmp')
return tmp
def get_closest_vtx_bone_match(obj, jnts):
''''''
vtxs_pos = get_vtxs_coords(obj)
jnts_pos = get_coords(jnts)
match = {}
for jnt, jnt_pos in jnts_pos.iteritems():
check_value = float('inf')
current_vtx = ''
for vtx, vtx_pos in vtxs_pos.iteritems():
vtx_length = get_length(jnt_pos, vtx_pos)
if vtx_length < check_value:
check_value = vtx_length
current_vtx = vtx
match['.vtx[%s]' % current_vtx] = jnt
return match
def set_bip_rom(start=0, angle=45, fxgraph=False, skeleton='new'):
'''Sets rotation for each of the predefined joints at x,y,z axis.
Is a part of blendshapes to skin process.
Returns:
None
'''
if skeleton == 'new':
jnts = ['driv_Bip01_Head',
'driv_Bip01_Neck1',
'driv_Bip01_Neck']
head_mapping = ['head_yaw_right',
'head_yaw_left',
'head_roll_left',
'head_roll_right',
'head_pitch_up',
'head_pitch_down',
'neck1_yaw_right',
'neck1_yaw_left',
'neck1_roll_left',
'neck1_roll_right',
'neck1_pitch_up',
'neck1_pitch_down',
'neck_yaw_right',
'neck_yaw_left',
'neck_roll_left',
'neck_roll_right',
'neck_pitch_up',
'neck_pitch_down']
elif skeleton == 'old':
jnts = ['Bip01_Head',
'Bip01_Neck']
head_mapping = ['head_yaw_right',
'head_yaw_left',
'head_roll_left',
'head_roll_right',
'head_pitch_up',
'head_pitch_down',
'neck_yaw_right',
'neck_yaw_left',
'neck_roll_left',
'neck_roll_right',
'neck_pitch_up',
'neck_pitch_down']
# Fedor's code eval(str(rotates).replace('90','60'))
# Get nested list of rotates for attributes
rotates = []
for n in xrange(3):
for mp in ['-', '']:
temp = [0, 0, 0]
temp[n] = int(mp + str(angle))
rotates += [temp]
'''
rotates = [[-45,0,0],
[45,0,0],
[0,-45,0],
[0,45,0],
[0,0,-45],
[0,0,45]]
'''
head_frames = range(start + 1, start + 1 + len(head_mapping))
head_fxgraph = ''
# Info for fxgraph. Placed in batch.txt.
for head_name, head_frame in zip(head_mapping, head_frames):
print head_name, head_frame
head_fxgraph += '%s_%s\n' % (head_name, str(head_frame))
if fxgraph:
return head_fxgraph
cmds.setKeyframe(jnts, time=start)
frame = start + 1
for jnt in jnts:
for rot in rotates:
cmds.xform(jnt, ro=(rot))
cmds.setKeyframe(jnts, time=(frame, frame))
frame += 1
cmds.xform(jnt, ro=(0, 0, 0))
cmds.setKeyframe(jnts, time=(frame, frame))
return max(head_frames)
def set_eyes_rom(start=0, fxgraph=False, skeleton='new'):
start = int(start)
eye_mapping = ['Eyeball_L_Down',
'Eyeball_L_Up',
'Eyeball_L_In',
'Eyeball_L_Out',
'Eyeball_R_Down',
'Eyeball_R_Up',
'Eyeball_R_Out',
'Eyeball_R_In']
if skeleton == 'new':
jnts = ['bn_eye_l', 'bn_eye_r']
elif skeleton == 'old':
jnts = ['BN_Eyeball_L', 'BN_Eyeball_R']
# jnts = ['BN_Eyeball_L', 'BN_Eyeball_R']
rotates = [[-30, 0, 0],
[30, 0, 0],
[0, 0, -40],
[0, 0, 40]]
eye_frames = range(start + 1, start + 1 + len(eye_mapping))
fxgraph_text = ''
for eye_name, eye_frame in zip(eye_mapping, eye_frames):
print eye_name, eye_frame
fxgraph_text += '%s_%s\n' % (eye_name, str(eye_frame))
if fxgraph:
return fxgraph_text
cmds.setKeyframe(jnts, time=start)
frame = start + 1
for jnt in jnts:
for rot in rotates:
cmds.xform(jnt, ro=(rot))
cmds.setKeyframe(jnts, time=(frame, frame))
frame += 1
cmds.xform(jnt, ro=(0, 0, 0))
cmds.setKeyframe(jnts, time=(frame, frame))
return max(eye_frames)
def set_tmp_skin():
'''Set skin cluster for the head mesh only on the predefined joints.
Ommiting those, that not needed while skin calculation.
Is a part of blendshapes to skin process.
Returns:
str: skin cluster name
'''
head = get_head()
tmp_bones = [u'Bip01_Spine3', u'driv_Bip01_Neck', u'driv_Bip01_Neck1', u'driv_Bip01_Head',
u'bn_lid_l_d_02', u'bn_lid_l_u_05', u'bn_lid_l_u_04', u'bn_lid_l_d_03', u'bn_lid_l_u_06',
u'bn_lid_l_u_07', u'bn_lid_l_d_04', u'bn_lid_l_d_05', u'bn_lid_l_u_08', u'bn_lid_l_u_03',
u'bn_br_l_08', u'bn_br_l_09', u'bn_lid_l_u_01', u'bn_lid_l_u_02', u'bn_br_l_04',
u'bn_br_l_05', u'bn_br_l_06', u'bn_br_l_07', u'bn_br_l_01', u'bn_br_l_02', u'bn_br_l_03',
u'bn_nose_l', u'bn_mouth_l_01', u'bn_cheek_l_04', u'bn_cheek_l_02', u'bn_cheek_l_03',
u'bn_cheek_l_05', u'bn_mouth_l_02', u'bn_mouth_l_03', u'bn_cheek_l_06', u'bn_cheek_l_07',
u'bn_mouth_l_04', u'bn_lip_l_u_02', u'bn_lip_l', u'bn_lip_l_u_04', u'bn_lip_l_u_05',
u'bn_lip_c_u_02', u'bn_lip_l_u_01', u'bn_lip_c_u', u'bn_lip_c_u_01', u'bn_lip_l_u_03',
u'bn_cheek_l_01', u'bn_cheek_r_06', u'bn_lip_r', u'bn_lip_r_u_04', u'bn_mouth_r_02',
u'bn_cheek_r_04', u'bn_cheek_r_07', u'bn_lip_r_u_01', u'bn_lip_r_u_05', u'bn_br_r_01',
u'bn_lid_r_d_05', u'bn_lid_r_d_03', u'bn_lid_r_d_02', u'bn_lid_r_u_02', u'bn_lid_r_d_04',
u'bn_lid_r_u_08', u'bn_lid_r_u_07', u'bn_lid_r_u_03', u'bn_br_r_02', u'bn_cheek_r_03',
u'bn_cheek_r_02', u'bn_mouth_r_01', u'bn_lid_r_u_01', u'bn_br_r_05', u'bn_br_r_04',
u'bn_br_r_07', u'bn_cheek_r_05', u'bn_nose_r', u'bn_lid_r_u_06', u'bn_lid_r_u_04',
u'bn_lid_r_u_05', u'bn_br_r_09', u'bn_br_r_08', u'bn_br_r_06', u'bn_br_r_03',
u'bn_lip_r_u_03', u'bn_mouth_r_03', u'bn_mouth_r_04', u'bn_lip_r_u_02', u'bn_cheek_r_01',
u'bn_lid_l_d_01', u'bn_nose_c', u'bn_br_c', u'bn_chin_l', u'bn_chin_r', u'bn_chin_c',
u'bn_lip_c_d_02', u'bn_lip_r_d_05', u'bn_lip_l_d_05', u'bn_lip_c_d_01', u'bn_lip_r_d_03',
u'bn_lip_l_d_03', u'bn_lip_l_d_04', u'bn_lip_r_d_04', u'bn_lip_r_d_01', u'bn_lip_l_d_01',
u'bn_lip_c_d', u'bn_lip_r_d_02', u'bn_lip_l_d_02', u'bn_lid_r_d_01', u'bn_lip_r_u_06',
u'bn_lip_l_u_06']
sk = cmds.skinCluster(tmp_bones, head, tsb=True)
cmds.setAttr("%s.envelope" % sk[0], 0)
return sk
def get_joints(invert=False):
'''Gets all the child joints from the 'Bip01'
Returns:
list: all joints in hierarchy
'''
root = 'Bip01'
if not cmds.objExists(root):
return
cmds.select(root, hierarchy=True)
selection = cmds.ls(sl=True, fl=True)
nubs = cmds.ls('*Nub*', type='joint')
if nubs:
cmds.select(nubs, d=True)
jnts = cmds.ls(type='joint', selection=True)
cmds.select(clear=True)
if invert:
return [o for o in selection if o not in jnts]
else:
return jnts
def get_meshes():
'''Gets all the transform meshes node names from a scene
Returns:
list: all meshes in scene
'''
objects = cmds.ls('*', type='mesh')
meshes = cmds.listRelatives(objects, parent=True)
if not meshes:
return
meshes = list(set(meshes))
return meshes
def unlock_attributes(o):
locked = cmds.listAttr(o, l=True)
if locked:
for atr in locked:
cmds.setAttr('%s.%s' % (o, atr), lock=0)
def reset(objects):
'''Deletes all connections and history from a given objects.
And freezes transform.
Args:
objects (list): string list of objects
Returns:
None:
'''
axis = ['scaleX', 'scaleY', 'scaleZ', 'rotateX', 'rotateY',
'rotateZ', 'translateX', 'translateY', 'translateZ']
cmds.currentTime(0)
for o in objects:
cmds.delete(o, ch=True, cn=True, tac=True, e=True)
unlock_attributes(o)
sk = cmds.listConnections(o, type='skinCluster')
if sk:
cmds.delete(sk)
bp = cmds.listConnections(o, type='dagPose')
if bp:
cmds.delete(bp)
for a in axis:
conn = cmds.listConnections(o + "." + a, s=True, p=True)
if conn:
cmds.disconnectAttr(conn[0], o + "." + a)
cmds.delete(objects, c=True)
cmds.delete(objects, ch=True)
cmds.makeIdentity(objects, apply=True)
def add_zeroes(num, digits=2):
'''Gerenerates zeroes in front of the given digit.
Args:
num (int): input digit that is processed.
digits (int, optional): quantity of digits.
Returns:
str:
Examples:
>>> add_zeroes(2, digits=2)
'02'
>>> add_zeroes(2, digits=3)
'002'
'''
if isinstance(num, int) and isinstance(digits, int):
num = str(num)
zeroes_quantity = digits - len(num)
if zeroes_quantity > 0:
zeroes = (str(0) * zeroes_quantity)
return zeroes + num
elif zeroes_quantity == 0:
return num
else:
print 'digits', digits, 'less than', num, 'returning', num
return num
else:
exit_message = str(
['"update_number()" accepts "int" only, got', type(num), type(digits)])
sys.exit(exit_message)
def create_bip_blendshapes(start=1, end=19, head=''):
if not head:
head = get_head()
for key in range(start, end):
cmds.currentTime(key, edit=True)
cmds.select(head)
new_name = cmds.duplicate()
cmds.rename(new_name[0], head + '_' + add_zeroes(key, digits=2))
cmds.delete(ch=True)
def prepare_buttons(path):
'''
Take .py or .pyc path to script
Return dictionary button label:function
'''
path = path.replace('.pyc', '.py')
with open(path, 'r+') as cmds:
commands = cmds.read().splitlines()
defs = [d for d in commands if 'def ' in d]
to_del = ['def ', '(*args):']
buttons = []
for d in defs:
for i in to_del:
d = d.replace(i, '')
buttons.append(d)
labeled_buttons = {}
for b in buttons:
labeled_buttons[b.replace('_', ' ')] = 'c.' + b
return labeled_buttons
def get_blendshape_node(mesh):
'''
Description:
Example:
input: transform mesh
output: blendshape name if connected
'''
bls_set = cmds.ls('blendShape*', type='objectSet')
for bl_set in bls_set:
conns = cmds.listConnections(bl_set)
if mesh in conns:
bl = cmds.ls(conns, type='blendShape')
return bl[0]
print 'No blendshape connected to', mesh
def duplicate_blendshapes(bl=''):
'''Duplicates all blendshapes of a node by calling them one by one.
Number prefix of duplicated mesh is taken from a blendshape name if present.
Otherwise, Maya takes care of a prefix by itself.
Args:
None:
Returns:
list: duplicated meshes
Examples:
>>> duplicate_blendshapes()
['yermak_head_01', 'yermak_head_02']
'''
mesh = cmds.ls(sl=True, flatten=True)[0]
if not bl:
bl = get_blendshape_node(mesh)
cmds.currentTime(0)
targets = cmds.blendShape(bl, t=True, q=True)
# If blendshape meshes were deleted from scene
if not targets:
targets = cmds.listAttr(bl + '.w', multi=True)
# Generate dict for set each blendshapes in range to 1 infl per call
weights_bls = {}
renamed = []
group_name = '%s_bls_duplicated' % mesh
cmds.group(name=group_name, empty=True)
# Get index mapping for blendshape targets.
# Example: {0:'yermak_19', 1:'yermak_20'}
for t in range(0, len(targets)):
weight = [(i, 0) for i in range(0, len(targets))]
weight[t] = (t, 1)
weights_bls[targets[t]] = weight
for bl_mesh, bl_weight in weights_bls.iteritems():
cmds.blendShape(bl, w=bl_weight, edit=True)
d_name = cmds.duplicate(mesh)
cmds.parent(d_name, group_name)
new_name = cmds.rename(d_name[0], bl_mesh)
renamed.append(new_name)
return renamed
def skin_eyes():
'''
Prepare eye for export to 4A engine
Add the skin and the proper weights
'''
# Add check for skinscuster
eyes = cmds.ls('*l_eye', '*r_eye')
if not eyes:
sys.exit('No match for the pattern *l_eye *r_eye')
elif len(eyes) != 2:
sys.exit('More or less than 2 objects match the pattern *l_eye *r_eye')
reset(eyes)
# Center pivot
cmds.xform(eyes, cp=True, p=True)
l_prefix = '_l'
r_prefix = '_r'
jnts_list = ['bn_eye_r', 'bn_eye_l', 'Bip01_Head', 'Bip01']
for jnt in jnts_list:
if not cmds.objExists(jnt):
sys.exit('joint not found', jnt)
for eye in eyes:
jnts_hi = get_joints()
if l_prefix in eye:
eye_jnt = 'bn_eye_l'
elif r_prefix in eye:
eye_jnt = 'bn_eye_r'
else:
sys.exit('No prefix match')
# Align bone to eyes
# Should add a check tor keys and connections on bones
# To prevent jumping thile grabbing timeline
p_constr = cmds.pointConstraint(eye, eye_jnt)
cmds.delete(p_constr)
skin_cluster = cmds.skinCluster(jnts_hi, eye, tsb=True)
# skin_cluster = mel.eval('findRelatedSkinCluster("%s")' % object)
cmds.skinPercent(skin_cluster[0],
eye + '.vtx[*]',
transformValue=[(eye_jnt, 0.99),
(jnts_list[2], 0.01)])
print 'Prepared', eye
def dict_io(path, dict={}, get=False, set=False):
if get:
with open(path, 'rb') as dict_path:
dict = pickle.loads(dict_path.read())
print '# Loaded dictionary from', path
return dict
elif set and dict:
with open(path, 'wb') as dict_path:
pickle.dump(dict, dict_path)
print '# Saved dictionary to', path
else:
sys.exit('Command not specified')
def add_bones_to_skin_cluster(mesh, skin_cluster):
existing_bones = cmds.skinCluster(skin_cluster, query=True, inf=True)
to_add_bones = [
bone for bone in get_joints() if bone not in existing_bones]
if to_add_bones:
cmds.skinCluster(skin_cluster, edit=True, ai=to_add_bones, wt=0)
def get_info_from_xml(path):
'''
takes path to .xml file
return dict 'skin cluster name':[jnts]
'''
root = xml.etree.ElementTree.parse(path).getroot()
# set the header info
for atype in root.findall('headerInfo'):
fileName = atype.get('fileName')
info = {}
jnts = []
for atype in root.findall('weights'):
jnts.append(atype.get('source'))
#shape = atype.get('shape')
skin_cluster = atype.get('deformer')
info[skin_cluster] = jnts
return info
def get_skin_cluster(mesh):
skin_cluster = mel.eval('findRelatedSkinCluster "%s"' % mesh)
if not skin_cluster:
skin_cluster = cmds.ls(cmds.listHistory(mesh), type='skinCluster')
if skin_cluster:
skin_cluster = skin_cluster[0]
else:
skin_cluster = None
return skin_cluster
def set_skin_cluster(mesh):
global xml_folder
mesh_xml = os.path.join(xml_folder, mesh + '.xml')
if not os.path.exists(mesh_xml):
print 'No saved skin cluster found for %s' % mesh
return None
else:
info = get_info_from_xml(mesh_xml)
# No check for skin cluster name match in scene yet.
jnts = info.values()[0]
skin_cluster = info.keys()[0]
if not exists(jnts):
print 'Not enough joints to apply saved skin to'
return None
if exists(skin_cluster):
print 'Skin cluster already exists with a given name'
return None
cmds.skinCluster(mesh, jnts, name=skin_cluster, mi=4)
return skin_cluster
def exists(objects):
'''
Takes strings and lists
Return True or False
'''
if isinstance(objects, str) or isinstance(objects, unicode):
if cmds.objExists(objects):
return True
elif isinstance(objects, list):
true_false = []
for o in objects:
if cmds.objExists(o):
true_false.append(True)
else:
true_false.append(False)
if False in true_false:
return False
else:
return True
else:
print 'Types "str", "list" are accepted only'
return False
def fix_skin_cluster_name(mesh, skin_cluster):
'''Renames skin cluster with a mesh name + prefix "_sc"
Args:
mesh (str): transform object with a skin cluster
skin_cluster (str): skin cluster node name
Returns:
str: updated mesh skin cluster name
'''
prefix = '_sc'
if skin_cluster != mesh + prefix:
skin_cluster_name = cmds.rename(skin_cluster, mesh + prefix)
return skin_cluster_name
else:
return skin_cluster
def export_weights():
global xml_folder
cmds.currentTime(0)
objects = cmds.ls(selection=True, transforms=True, flatten=True)
if not objects:
print 'Nothing is selected to save weights from'
return
object_and_skin_cluster = {}
for o in objects:
skin_cluster = get_skin_cluster(o)
if skin_cluster:
skin_cluster = fix_skin_cluster_name(o, skin_cluster)
object_and_skin_cluster[o] = skin_cluster
if not object_and_skin_cluster:
print 'No skin cluster was found on selected'
return
for o, skin_cluster in object_and_skin_cluster.iteritems():
add_bones_to_skin_cluster(o, skin_cluster)
cmds.deformerWeights(o + '.xml',
path=xml_folder,
ex=True,
deformer=skin_cluster,
method='index')
def import_weights():
global xml_folder
cmds.currentTime(0)
objects = cmds.ls(selection=True, transforms=True, flatten=True)
if not objects:
print 'Nothing is selected to import weights from'
return
for o in objects:
skin_cluster = get_skin_cluster(o)
if skin_cluster:
cmds.skinCluster(skin_cluster, unbind=True, edit=True)
skin_cluster = set_skin_cluster(o)
if skin_cluster:
cmds.deformerWeights(o + '.xml',
path=xml_folder,
im=True,
deformer=skin_cluster,
method='index')
cmds.skinCluster(
skin_cluster, forceNormalizeWeights=True, edit=True)
add_bones_to_skin_cluster(o, skin_cluster)
print "# Imported deformer weights from '%s'. #" % (os.path.join(xml_folder, o + '.xml'))
def get_num_prefix(s):
bl_prefix = s.split('_')[-1]
return int(''.join(chr for chr in bl_prefix if chr.isdigit()))
def set_blendshapes(head=''):
if not head:
head = get_head()
bls = check_blendshapes(head)
# Blendshapes may not be properly ordered. 1,2,3,5...
# That is why a dictionary mapping is used
bls_order = {}
for i in range(0, len(bls)):
bls_order[i] = bls[i]
bl = cmds.blendShape(bls + [head])[0]
zero_weights = [(i, 0) for i in range(0, len(bls))]
cmds.blendShape(bl, w=zero_weights, edit=True)
cmds.setKeyframe(bl, time=0)
frames = []
for key in bls_order.keys():
frames.append(get_num_prefix(bls_order[key]))
frame_range = [frame for frame in range(0, max(frames))]
# Key all blendshapes along the timeline range
for frame in frame_range:
cmds.blendShape(bl, w=zero_weights, edit=True)
cmds.setKeyframe(bl, time=frame)
for key in bls_order.keys():
bl_num = get_num_prefix(bls_order[key])
cmds.blendShape(bl, w=zero_weights, edit=True)
cmds.blendShape(bl, w=(key, 1.0), edit=True)
cmds.setKeyframe(bl, time=bl_num)
return bl
def check_blendshapes(head):
# Find all blendshapes in scene
#head = get_head()
head_template = '_'.join(head.split('_')[:-1])
head_template_match = cmds.ls(head_template + '*', tr=True)
head_bl = [i for i in head_template_match if i.split('_')[-1].isdigit()]
if not head_bl:
sys.exit('No blendshapes')
head_bl.sort()
return head_bl
def get_vtxs_coords(mesh, relative=False):
vtxs_coords = {}
vtxs = cmds.ls(mesh + '.vtx[*]', fl=True)
for vtx in vtxs:
if relative:
coord = cmds.xform(vtx, q=True, t=True, r=True)
else:
coord = cmds.xform(vtx, q=True, t=True, ws=True)
vtx_id = ''.join(i for i in vtx.split('.')[1] if i.isdigit())
vtxs_coords[vtx_id] = coord
return vtxs_coords
def calc_coords(a_coords, b_coords, add=False, sub=False):
'''
Perfoms add or substract operations on a [x, y, z]
input: [float, float, float]
output: [float, float, float]
'''
a_vector = MVector(a_coords)
b_vector = MVector(b_coords)
if add:
result = list(a_vector + b_vector)
elif sub:
result = list(b_vector - a_vector)
else:
sys.exit('Only "sub" or "add" flags supported')
# Operations are correct even without float formatting
#diff = [float('%.10f' % c) for c in diff]
return result
def set_coords(mesh, coords):
for vtx_id, coords in coords.iteritems():
cmds.xform(mesh + '.vtx[%s]' % vtx_id, t=coords, ws=True)
def get_vtxs_delta(t_coords, a_coords, b_coords):
delta = {}
vtx_ids = [id for id in a_coords.keys()]
for vtx_id in vtx_ids:
if a_coords[vtx_id] == b_coords[vtx_id]:
continue
diff_coords = calc_coords(a_coords[vtx_id], b_coords[vtx_id], sub=True)
delta_coords = calc_coords(t_coords[vtx_id], diff_coords, add=True)
delta[vtx_id] = delta_coords
return delta
def set_delta_mesh(t_mesh, a_mesh, b_mesh):
a_coords = get_vtxs_coords(a_mesh)
b_coords = get_vtxs_coords(b_mesh)
t_coords = get_vtxs_coords(t_mesh)
if len(a_coords) != len(b_coords) != len(t_coords):
exit_message = 'Different length of dicts: %s, %s, %s' % (
len(a_coords), len(b_coords), len(t_coords))
sys.exit(exit_message)
if a_coords == b_coords == t_coords:
sys.exit('Compared dictionaries are identical')
vtxs_delta = get_vtxs_delta(t_coords, a_coords, b_coords)
delta_mesh = cmds.duplicate(t_mesh, n=b_mesh + '_delta')[0]
set_coords(delta_mesh, vtxs_delta)
print delta_mesh
return delta_mesh
def calc_delta_on_meshes(mapping):
'''
eye_l_up_closed 64
eye_r_up_closed 65
eye_l_down_closed 66
eye_r_down_closed 67
eye_l_left_closed 68
eye_r_left_closed 69
eye_l_right_closed 70
eye_r_right_closed 71
delta_mapping = {57:64, 56:65, 54:66, 55:67, 59:68, 58:69, 60:70, 61:71}
'''
# delta_mapping = {57:64, 56:65, 54:66, 55:67, 59:68, 58:69, 60:70, 61:71}
# calc_delta_on_meshes(delta_mapping)
template_mesh = get_head()
for a, b in delta_mapping.iteritems():
a_mesh = cmds.listRelatives(
cmds.ls('*%s*' % a, type='mesh'), parent=True)[0]
b_mesh = cmds.listRelatives(
cmds.ls('*%s*' % b, type='mesh'), parent=True)[0]
if not a_mesh and not b_mesh:
exit_message = 'Meshes not found with the %s or %s prefix' % (a, b)
sys.exit(exit_message)
delta_mesh = set_delta_mesh(template_mesh, a_mesh, b_mesh)
cmds.rename(delta_mesh, b_mesh + '_delta')
def get_blend_multiplier(coords, threshold=3):
'''Gets data for smooth dividing mesh on to symmetrical parts
with a blend between parts given by threshold.
Uses to divide a scanned emotion blendshapes on the left and the right sides.
Default division axis is X.
Args:
coords (list): vertex coordinates [x,y,z]
threshold (int, optional): blend half length. In units.
Counted from the center of a mesh
Returns:
dict, dict: blend mapping for both sides in a format:
'1': 0.5, where key is vertex id in "str", 05 is blend value
'''
axis = 0 # x in x, y, z
max_coord = max([x[axis] for x in coords.values()])
min_coord = min([x[axis] for x in coords.values()])
center_coord = (max_coord + min_coord) / 2
blend_max = center_coord + threshold
blend_min = center_coord - threshold
blend_length = threshold * 2
positive_blend = {}
negative_blend = {}
for id, coord in coords.iteritems():
axis_coord = coord[axis]
if axis_coord > blend_max:
positive_blend[id] = 1
elif axis_coord < blend_min:
negative_blend[id] = 1
else:
positive_blend_value = (axis_coord - blend_min) / blend_length
positive_blend[id] = positive_blend_value
# Sinse a multiplier will never be greater than 1
negative_blend[id] = 1 - positive_blend_value
return positive_blend, negative_blend
def set_blend_coords(blend, diff, mesh):
'''Applies a relative transform on a mesh with a multiplier
Args:
blend (dict): vtx id: multiplier based on distance.
diff (dict): vtx id: [x,y,z] difference between tempate mesh,
and som emotion.
mesh (str): mesh name transform are applied to.
Returns:
None:
'''
for vtx_id in blend.keys():
pos_blend_coord = [x * blend[vtx_id] for x in diff]
cmds.xform(mesh + '.vtx[%s]' % vtx_id, t=blend, r=True)
def divide_mesh(template_mesh, divided_mesh, threshold=3):
'''Divides mesh on the left and the right symmetrical sides.
With a smooth linear blend inbetween.
Uses to divide a scanned emotion blendshapes.
Template mesh is duplicated.
Args:
template_mesh (str):
divided_mesh (str):
Returns:
list: divided meshes names
'''
l_mesh = cmds.duplicate(template_mesh, name=divided_mesh + '_l')[0]
r_mesh = cmds.duplicate(template_mesh, name=divided_mesh + '_r')[0]
template_coords = get_vtxs_coords(template_mesh, relative=True)
div_coords = get_vtxs_coords(divided_mesh, relative=True)
pos_blend_values, neg_blend_values = get_blend_multiplier(
template_coords, threshold=threshold)
for vtx_id in pos_blend_values.keys():
diff_coords = calc_coords(
template_coords[vtx_id], div_coords[vtx_id], sub=True)
pos_blend_coord = [x * pos_blend_values[vtx_id] for x in diff_coords]
cmds.xform(r_mesh + '.vtx[%s]' % vtx_id, t=pos_blend_coord, r=True)
for vtx_id in neg_blend_values.keys():
diff_coords = calc_coords(
template_coords[vtx_id], div_coords[vtx_id], sub=True)
neg_blend_coord = [x * neg_blend_values[vtx_id] for x in diff_coords]
cmds.xform(l_mesh + '.vtx[%s]' % vtx_id, t=neg_blend_coord, r=True)
print 'Divided', divided_mesh
return [l_mesh, r_mesh]
def get_neibour_faces(face):
edges = cmds.polyListComponentConversion(face, ff=True, te=True)
# get neighbour faces
faces = cmds.polyListComponentConversion(edges, fe=True, tf=True, bo=True)
return faces
def check_bones_threshold(faces, skin_cluster):
threshold = 80
bones_on_faces = cmds.skinPercent(skin_cluster, faces,
transform=None, q=1, ib=0.001)
bones_quantity = int(len(set(bones_on_faces)))
if bones_quantity <= threshold:
return True
else:
return False
def get_faces():
mesh = cmds.ls(sl=True)[0]
# a_mat, b_mat = get_faces()
max_iter = 100
skin_cluster = get_skin_cluster(mesh)
max_random = cmds.polyEvaluate(mesh, f=True)
for i in range(0, max_iter):
random_face = '%s.f[%s]' % (mesh, random.randrange(0, max_random))
faces = []
faces.append(random_face)
# 100 iterations of grow selection is enough to cover even the most hi
# poly mesh
for i in range(0, 100):
if check_bones_threshold(faces, skin_cluster):
# new_faces = faces+get_neibour_faces(faces)
# Slow but a selection is more elegant
# Fits my needs because right now using only 81 bones
cmds.select(faces)
mel.eval('GrowPolygonSelectionRegion;')
new_faces = cmds.ls(sl=True, fl=True)
if check_bones_threshold(new_faces, skin_cluster):
faces = new_faces
else:
break
else:
break
# Get the other faces
cmds.select(faces)
mel.eval('InvertSelection')
inverted_faces = cmds.ls(sl=True, flatten=True)
cmds.select(clear=True)
if check_bones_threshold(inverted_faces, skin_cluster) and check_bones_threshold(faces, skin_cluster):
print random_face, 'worked'
return faces, inverted_faces
sys.exit('100 iteration is not enough to divide the mesh on two materials')
def set_facefx_scale(root_only=False, biped_only=False, defined=[]):
import skin_import_export
start, end = get_timeline()
jnts = get_joints()
root_jnt = 'Bip01'
meshes = get_meshes()
reset(meshes)
# Brute force approach to be confident for all meshes will be scaled
# Apply one bone skin
for mesh in meshes:
cmds.skinCluster('Bip01_Head', mesh, tsb=True)
# Make joints scalable
for jnt in jnts:
for a in ['scaleX', 'scaleY', 'scaleZ']:
conn = cmds.listConnections(jnt + "." + a, s=True, p=True)
if conn:
cmds.disconnectAttr(conn[0], jnt + "." + a)
# Works with old skeleton
if root_only:
cmds.xform(root_jnt, scale=(0.01, 0.01, 0.01))
# For unknown reasons works sometimes when all skeleton scale spoils face joints
elif biped_only and not defined:
if cmds.objExists('driv_Bip01_Head'):
face_jnts = cmds.listRelatives('driv_Bip01_Head', type='joint', ad=True)
elif cmds.objExists('Bip01_Head'):
face_jnts = cmds.listRelatives('Bip01_Head', type='joint', ad=True)
else:
sys.exit('No head joint presents in scene. Biped scale only not possible.')
filtered_jnts = [j for j in jnts if j not in face_jnts]
cmds.xform(filtered_jnts, scale=(0.01, 0.01, 0.01))
elif defined:
cmds.xform(defined, scale=(0.01, 0.01, 0.01))
# Default scale
else:
cmds.xform(jnts, scale=(0.01, 0.01, 0.01))
# Transfer and bake animation to locators
locs = []
constrs = []
for jnt in jnts:
cmds.select(clear=True)
loc_name = cmds.spaceLocator(name=jnt + '_LOC')
locs.append(loc_name[0])
pc = cmds.pointConstraint(jnt, loc_name)
constrs.append(pc[0])
oc = cmds.orientConstraint(jnt, loc_name)
constrs.append(oc[0])
cmds.bakeResults(locs, time=(start, end + 1), sm=True)
cmds.delete(constrs)
# Freeze transformation
reset(meshes)
reset(jnts)
# Birng the animation back to joints from locators
for jnt in jnts:
cmds.select(clear=True)
loc_name = jnt + '_LOC'
cmds.pointConstraint(loc_name, jnt)
cmds.orientConstraint(loc_name, jnt)
cmds.select(meshes)
skin_import_export.import_weights_sp()
cmds.bakeResults(jnts, time=(start, end + 1), sm=True)
cmds.delete(locs)
def get_texture(mesh):
shapesInSel = cmds.ls(mesh, dag=1, o=1, s=1)
shadingGrps = cmds.listConnections(shapesInSel, type='shadingEngine')
shaders = cmds.ls(cmds.listConnections(shadingGrps), materials=1)
fileNode = cmds.listConnections('%s.color' % (shaders[0]), type='file')
if fileNode:
currentFile = cmds.getAttr("%s.fileTextureName" % fileNode[0])
return currentFile
else:
return None
def create_shader(name, texture):
shader = cmds.shadingNode("blinn", asShader=True, name=name)
# a file texture node
file_node = cmds.shadingNode("file", asTexture=True, name=name)
# a shading group
cmds.setAttr(file_node + '.fileTextureName', texture, type="string")
shading_group = cmds.sets(
renderable=True, noSurfaceShader=True, empty=True)
# connect shader to sg surface shader
cmds.connectAttr('%s.outColor' %
shader, '%s.surfaceShader' % shading_group)
# connect file texture node to shader's color
cmds.connectAttr('%s.outColor' % file_node, '%s.color' % shader)
return shading_group
def assign_shader(object, s_group):
try:
cmds.sets(object, e=True, forceElement=s_group)
except:
print 'Cannot assign material to', object
def find_texture(texture):
path = 't:/'
name = os.path.basename(texture)
for path, dirs, file_names in os.walk(path):
for file_name in file_names:
if file_name == name:
new_texture = os.path.join(path, file_name)
return new_texture
print 'Texture %s not found on T drive. Leaving as is.' % texture
return texture
def organize_scene_shaders():
# Obtain texture files from shaders
mesh_texture = {}
for mesh in get_meshes():
texture = get_texture(mesh)
mesh_texture[mesh] = texture
# Get all shaders in scene and delete them
shaders = cmds.ls('*', mat=True)
cmds.delete(shaders)
for mesh, texture in mesh_texture.iteritems():
new_texture = find_texture(texture)
s_name = '%s_m' % mesh
sg = create_shader(s_name, new_texture)
assign_shader(mesh, sg)
def get_locators_in_scene():
existing_locs_shapes = cmds.ls('*', type='locator', flatten=True)
return [cmds.listRelatives(l, p=True)[0] for l in existing_locs_shapes]
def set_color_lps_rig(dict):
object = cmds.ls(sl=True)[0]
for loc, vtxs in dict.iteritems():
cmds.select([object + '.' + v for v in vtxs])
mel.eval("polyColorPerVertex -cdo -rgb 0.0 1.0 1.0;")
def create_lps_rig(dict, ctrls=[]):
object = cmds.ls(sl=True)[0]
cmds.select(object)
# Fetching commands from mel LPC script.
# Since now don't want to rewrite on python
mel.eval("setDeformGeom();")
loc_renaming_table = {}
if not ctrls:
ctrls = dict.keys()
for ctrl in ctrls:
existing_locs = get_locators_in_scene()
vtxs = dict[ctrl]
# time.sleep(1)
tmp_ls = cmds.select([object + '.' + v for v in vtxs])
# cmds.refresh()
# time.sleep(1)
if ctrl != 'excluded':
mel.eval("addHandle();")
else:
mel.eval("addAnchor();")
created_locs = get_locators_in_scene()
created_loc = [l for l in created_locs if l not in existing_locs][0]
loc_renaming_table[created_loc] = ctrl
for loc in loc_renaming_table.keys():
cmds.rename(loc, loc_renaming_table[loc])
def get_shader_info(mesh):
shapes = cmds.ls(mesh, dag=1, o=1, s=1)
shading_groups = cmds.listConnections(shapes, type='shadingEngine')
shading_groups = list(set(shading_groups))
shader_info = {}
for shading_group in shading_groups:
shader = cmds.ls(cmds.listConnections(shading_group), materials=1)
print shader
cmds.select(shading_group)
shader_mesh = cmds.ls(selection=True, flatten=True)
cmds.select(clear=True)
# print shading_groups
return len(shading_groups)
def print_shader_info():
for m in get_meshes():
print m, get_shader_info(m)
def get_timeline():
start = cmds.playbackOptions(min=True, query=True)
end = cmds.playbackOptions(max=True, query=True)
return int(start), int(end)
def bake_animation_joints_to_locators(jnts=[]):
'''First part of the operation, that consists of two steps.
The second one is a function called "bake_animation_locators_to_joints"
Transfer with a help of constraints animation from a given
joins to locators. Keeps an animation when reparenting or scaling joints
Args:
jnts (list, optional): joints. If not specified, takes all joints
starting from "Bip01"
Returns:
dict: joint:locator mapping. Used later when returning an animation back
'''
start, end = get_timeline()
if not jnts:
jnts = get_joints()
mapping = {}
constrs = []
# group_name = 'tmp_locs'
for jnt in jnts:
cmds.select(clear=True)
loc_name = cmds.spaceLocator(name=jnt + '_LOC')[0]
pc = cmds.pointConstraint(jnt, loc_name)
oc = cmds.orientConstraint(jnt, loc_name)
# sc = cmds.scaleConstraint(jnt, loc_name)
constrs.append(pc[0])
constrs.append(oc[0])
# constrs.append(sc[0])
mapping[jnt] = loc_name
cmds.bakeResults([l for j, l in mapping.iteritems()],
time=(start, end + 1), sm=True)
cmds.delete(constrs)
cmds.group([l for j, l in mapping.iteritems()], n='tmp_locs')
# cmds.group([l for l in mapping.values()], name=group_name)
return mapping
def bake_animation_locators_to_joints(data):
'''Second part of the operation, that consists of two steps.
The first one is a function called "bake_animation_joints_to_locators"
Args:
data (dict, list): joint:locator data.
Returns:
none:
'''
# Convert list of objects to mapping.
# When locators are imported in the other file
if isinstance(data, list):
data = {l[:-4]: l for l in data}
start, end = get_timeline()
for jnt, loc in data.iteritems():
cmds.pointConstraint(loc, jnt)
cmds.orientConstraint(loc, jnt)
# cmds.scaleConstraint(loc, jnt)
cmds.bakeResults([j for j, l in data.iteritems()],
time=(start, end + 1), sm=True)
cmds.delete([loc for jnt, loc in data.iteritems()])
def get_sc_multi(a):
'''Get a multiplier from a skinned object based on the bone per vertex influences.
A skin serves as a masked regions for further splitting blendshapes.
Bones names are taken as a prefixes for masked areas.
Used later when template mesh is being duplicated.
Args:
a (str): object name
Returns:
dict: with a following structure {bone:{vertex_id:bone_influence}}
'''
sc = get_skin_cluster(a)
if not sc:
exit_message = 'No skin cluster on the %s' % a
sys.exit(exit_message)
bones = cmds.skinCluster(sc, wi=True, q=True)
vtxs_range = cmds.polyEvaluate(a, v=True)
sc_multi = {}
for bone in bones:
per_bone_multi = {}
for id in range(0, vtxs_range + 1):
infl = cmds.skinPercent(sc,
'%s.vtx[%s]' % (a, id),
transform=bone,
ib=0.0001,
query=True)
if not infl or infl == 0:
continue
per_bone_multi[str(id)] = round(infl, 5)
if not per_bone_multi:
continue
sc_multi[bone] = per_bone_multi
if not sc_multi:
sys.exit('Bones under the skin cluster do not have influences on any vertex')
else:
return sc_multi
def get_diff_coords(a, b):
diff = {}
ids = [id for id in a.keys()]
for id in ids:
if a[id] == b[id]:
continue
diff_coords = calc_coords(a[id], b[id], sub=True)
diff[id] = diff_coords
return diff
def set_local_coords(mesh, coords):
for vtx_id, coords in coords.iteritems():
cmds.xform(mesh + '.vtx[%s]' % vtx_id, t=coords, r=True)
def split_blendshape(a, b):
'''Skin of an a object in used to divide the b blendshape on a zones.
Each joint influence becomes the separate blendshape element.
Args:
a (str): object with a skin cluster
b (str): blendshape name that will be divided
Returns:
b_group (str): created group
'''
a_coords = get_vtxs_coords(a, relative=True)
b_coords = get_vtxs_coords(b, relative=True)
diff_coords = get_diff_coords(a_coords, b_coords)
sc_multi = get_sc_multi(a)
b_group = b + '_divided'
b_group = cmds.group(n=b_group, empty=True)
for bone in sc_multi.keys():
per_bone_multi = sc_multi[bone]
diff_coords_multi = {}
for id, multi in per_bone_multi.iteritems():
# If position of a identical to b
if not id in diff_coords.keys():
continue
elif multi == 1:
diff_coords_multi[id] = diff_coords[id]
else:
diff_coords_multi[id] = [
coord * multi for coord in diff_coords[id]]
duplicated = cmds.duplicate(a, name=b + '_' + bone)[0]
set_local_coords(duplicated, diff_coords_multi)
cmds.parent(duplicated, b_group)
return b_group
def batch_split_blendshapes():
'''Splits blendshapes on a zones provided by skin influence.
Blendshapes selected first,
the template with a skin cluster - the last.
Args:
None:
Returns:
None:
'''
objects = cmds.ls(sl=True, fl=True, tr=True)
a = objects[-1]
objects.remove(a)
for o in objects:
split_blendshape(a, o)
print o, 'done'
def split_blendshapes_xy_axis(a, b, zx=0, zy=0, cut=0):
'''Split blendshape on two. Movement is divided by axis.
The first gets x, z/2 movement from original mesh,
the second gets y, z/2.
Positive or negative translation are cut if cut is set to 1 or -1
Args:
a (str): neutral mesh name
b (str): blendshape name that will be divided
zx (float, optional): Z axis translation multiplier added to "X" mesh
zy (float, optional): Z axis translation multiplier added to 'Y' mesh
cut (int, optional): positive or negative value,
that will be cut from Y axis if met conditions.
No cut if 0.
Returns:
None:
'''
a_coords = get_vtxs_coords(a, relative=True)
b_coords = get_vtxs_coords(b, relative=True)
diff_coords = get_diff_coords(a_coords, b_coords)
x_mesh = cmds.duplicate(a, name=b + '_x')[0]
y_mesh = cmds.duplicate(a, name=b + '_y')[0]
for vtx_id, coords in diff_coords.iteritems():
x_m_coords = [coords[0], 0, coords[-1] * zx]
# Cuts positive or negative value from transform
if cut > 0 < coords[1] or cut < 0 > coords[1]:
y_cut = 0
else:
y_cut = coords[1]
y_m_coords = [0, y_cut, coords[-1] * zy]
cmds.xform(x_mesh + '.vtx[%s]' % vtx_id, t=x_m_coords, r=True)
cmds.xform(y_mesh + '.vtx[%s]' % vtx_id, t=y_m_coords, r=True)
def select_bad_joints(mesh, limit=3):
'''
Seperates bad ones from auto generated joints that have no useful influence on a skin
'''
sc = get_skin_cluster(mesh)
jnts = cmds.skinCluster(sc, query=True, inf=True)
bad_attr_jnts = list(jnts)
start, end = get_timeline()
attrs = ['translateX', 'translateY',
'translateZ', 'rotateX', 'rotateY', 'rotateZ']
for jnt in jnts:
for attr in attrs:
if jnt in bad_attr_jnts:
for frame in range(start, end + 1):
if frame == start:
init_value = cmds.getAttr(
'%s.%s' % (jnt, attr), time=frame)
else:
value = cmds.getAttr('%s.%s' % (jnt, attr), time=frame)
if not (init_value - limit) < value < (init_value + limit):
bad_attr_jnts.remove(jnt)
break
else:
break
# Another method to detect bad bones.
# This will find bones with zero weight to vertices.
bad_sc_jnts = []
for jnt in jnts:
infl = cmds.skinPercent(sc,
'%s.vtx[*]' % mesh,
transform=jnt,
query=True)
if infl < 0.001:
bad_sc_jnts.append(jnt)
# Gets joints that influence the lips zones.
# These joints will be substracted from bad joints.
# Due to small weights on lips (f.e. Sticky), that are selected as "bad".
# Vertices ids are saved for male topology.
vtxs_nums = dict_io(r'u:\face\scripts\config_data\vtx_num_mask.txt', get=True)
affected_vtxs = ['%s.vtx[%s]' % (mesh, vtx) for vtx in vtxs_nums]
lips_jnts = cmds.skinPercent(sc,
affected_vtxs,
query=True,
transform=None,
ib=0.1)
bad_jnts = list(set(bad_attr_jnts + bad_sc_jnts))
# Removes joints that influence the "lip" zone.
bad_jnts = [j for j in bad_jnts if j not in lips_jnts]
if bad_jnts:
cmds.select(bad_jnts)
else:
print 'No bad joints found.'
def get_fxgraph(obj='', print_fxgraph=False):
if not obj:
obj = cmds.ls(sl=True)[0]
bls = get_blendshape_nodes(obj)
if not bls:
sys.exit()
start, end = get_timeline()
fxgraph = ''
for frame in xrange(start, (end + 1)):
for bl in bls:
bl_names = cmds.listAttr(bl + '.w', multi=True)
for bl_name in bl_names:
if cmds.getAttr('%s.%s' % (bl, bl_name), time=frame) == 1:
if print_fxgraph:
print bl_name, frame
fxgraph += '%s_%s\n' % (bl_name, str(frame))
return fxgraph
def get_blendshape_nodes(mesh):
bls = []
bls_set = cmds.ls('blendShape*', type='objectSet')
for bl_set in bls_set:
conns = cmds.listConnections(bl_set)
if mesh in conns:
bls.append(cmds.ls(conns, type='blendShape')[0])
if bls:
return bls
else:
print 'No blendshape connected to', mesh
def orient_driv():
a_jnts = ['driv_Bip01_Neck', 'driv_Bip01_Head']
b_jnts = ['Bip01_Neck', 'Bip01_Head']
i_jnt = 'driv_Bip01_Neck1'
cs = []
if exists(a_jnts + b_jnts + [i_jnt]):
for a, b in zip(a_jnts, b_jnts):
cs.append(cmds.orientConstraint(b, a, mo=True))
cs.append(cmds.orientConstraint(a_jnts, i_jnt, mo=True, w=0.5))
return cs
else:
print 'Not done. Some of the joints are not in scene.'
# Bones that cannot be deleted or replaced by autobones in
# set_auto_to_biped(mesh) function
low_case_skeleton = ['bn_eye_l', 'bn_eye_r',
'bn_tng_01', 'bn_tng_02', 'bn_tng_03', 'bn_jaw_c']
upper_case_skeleton = ['BN_Eyeball_L', 'BN_Eyeball_R',
'BN_Tongue_Back', 'BN_Tongue', 'BN_Tongue_Front', 'BN_Jaw_Pivot']
def parent_to_hi(jnts, mapping):
if len(jnts) == 0:
return
for j, parent in mapping.iteritems():
if j in jnts:
if cmds.objExists(parent):
cmds.parent(j, parent)
jnts.remove(j)
else:
continue
return parent_to_hi(jnts, mapping)
def set_auto_to_biped(mesh, skeleton='new'):
'''Set autobones generated by DLS plugin into biped hierarchy.
Autobones are renamed and reparented to biped.
The same quantity of biped bones are deleted.
There is no closest auto to biped bones mapping.
Args:
mesh (str): object skinned with autobones
Returns:
None:
'''
# Gets biped bones hi from head
if skeleton == 'new':
biped_bones = cmds.listRelatives('driv_Bip01_Head', type='joint', ad=True)
excl_bones = low_case_skeleton
elif skeleton == 'old':
biped_bones = cmds.listRelatives('Bip01_Head', type='joint', ad=True)
excl_bones = upper_case_skeleton
else:
sys.exit('skeleton parameter should be "new" or "old"')
[biped_bones.remove(j) for j in excl_bones if j in biped_bones]
# Gets autobones
sc = get_skin_cluster(mesh)
if not sc:
exit_message = 'No skin cluster found on %s' % mesh
sys.exit(exit_message)
auto_bones = cmds.skinCluster(sc, query=True, inf=True)
# Checks if autobones fits biped bones quantity
if len(biped_bones) < len(auto_bones):
exit_message = 'Autobones quantity is greater than biped bones. %s vs %s' % (
len(auto_bones), len(biped_bones))
sys.exit(exit_message)
# Sets auto/biped bones mapping
auto_biped_match = {}
for a, b in zip(auto_bones, biped_bones):
auto_biped_match[a] = b
bake_to_biped(auto_biped_match)
return [j for j in auto_biped_match.values()]
def bake_to_locs(objs):
prefix = '_LOC'
start, end = get_timeline()
locs = []
constrs = []
for obj in objs:
cmds.select(clear=True)
loc_name = cmds.spaceLocator(name=obj + prefix)
locs.append(loc_name[0])
pc = cmds.pointConstraint(obj, loc_name)
constrs.append(pc[0])
oc = cmds.orientConstraint(obj, loc_name)
constrs.append(oc[0])
cmds.bakeResults(locs, time=(start, end + 1), sm=True)
cmds.delete(constrs)
return locs
def bake_from_locs(data):
prefix = '_LOC'
start, end = get_timeline()
# If there is mapping for loc:bone
if isinstance(data, dict):
jnts = [j for l, j in data.iteritems()]
constrs = []
for a, b in data.iteritems():
loc = a + prefix
if not cmds.objExists(loc):
exit = '%s locator not found in scene.' % loc
sys.exit(exit)
constrs.append(cmds.pointConstraint(loc, b)[0])
constrs.append(cmds.orientConstraint(loc, b)[0])
cmds.bakeResults(jnts, time=(start, end + 1), sm=True)
cmds.delete(constrs)
elif isinstance(data, list):
exit = 'Got list. Now not working with lists.'
sys.exit(exit)
else:
exit = 'Got neither list nor dict.'
sys.exit(exit)
def bake_to_biped(data):
start, end = get_timeline()
# If there is mapping for auto:biped
if isinstance(data, dict):
jnts = [biped for auto, biped in data.iteritems()]
auto = [a for a in data.keys()]
keyed_frames = cmds.keyframe(auto, query=True, timeChange=True)
start, end = min(keyed_frames), max(keyed_frames)
cmds.playbackOptions(min=min(keyed_frames), max=max(keyed_frames))
constrs = []
for a, b in data.iteritems():
constrs.append(cmds.pointConstraint(a, b)[0])
constrs.append(cmds.orientConstraint(a, b)[0])
cmds.bakeResults(jnts, time=(start, end + 1), sm=True)
cmds.delete(constrs)
elif isinstance(data, list):
exit = 'Got list. Now not working with lists.'
sys.exit(exit)
else:
exit = 'Got neither list nor dict.'
sys.exit(exit)
def walked(p, ends=''):
files = []
for path, dirs, file_names in os.walk(p):
for file_name in file_names:
if file_name.endswith(ends):
files.append(os.path.join(path, file_name))
return files
def add_prefix_suffix(name, suf=False, pref=True):
for o in cmds.ls(sl=True, flatten=True):
if suf:
cmds.rename(o, o + '_' + name)
elif pref:
cmds.rename(o, name + '_' + o)
else:
print 'Set suffix or prefix to rename'
def dir_import():
folder_path_list = cmds.fileDialog2(fm=3)
folder_path = folder_path_list[0] + '/'
files = cmds.getFileList(folder=folder_path)
if len(files) == 0:
cmds.warning("No files found")
else:
for f in files:
cmds.file(folder_path + f, i=True)
def key_blendshapes(mesh, start=0):
bl = get_blendshape_node(mesh)
if not bl:
sys.exit('No blendshape node found on oject.')
targets = cmds.blendShape(bl, t=True, q=True)
# If blendshape meshes were deleted from scene
if not targets:
targets = cmds.listAttr(bl + '.w', multi=True)
# Generate dict for set each blendshapes in range to 1 infl per call
weights_bls = {}
for t in range(len(targets)):
weight = [(i, 0) for i in range(len(targets))]
weight[t] = (t, 1)
weights_bls[start + t + 1] = weight
# Keys start and end of teeth graph
end = start + len(targets)
cmds.setKeyframe(bl, time=start)
cmds.setKeyframe(bl, time=end + 1)
for frame, bl_weight in weights_bls.iteritems():
cmds.blendShape(bl, w=bl_weight, edit=True)
cmds.setKeyframe(bl, time=frame)
cmds.playbackOptions(min=start, max=end+1)
return end
def dls(mesh, target_mesh=None, num_jnts=220, infls=4, iters=4):
import DLS
DLS.launch()
cmds.select(mesh)
pruneBelow = 0.001
isKeepOriginal = True
isDeleteDeltaMush = False
numBones = num_jnts
maxInfs = infls
start, end = get_timeline()
maxIters = iters
epilon = 1.0
targetMesh = target_mesh
isAlternativeUpdate = False
DLS.core.learningFunc.solve(numBones, maxInfs, targetMesh, isAlternativeUpdate,
start, end, maxIters, isKeepOriginal, pruneBelow,
epilon, isDeleteDeltaMush)
def get_m_time(f_path):
import time
import os
e_time = os.path.getmtime(f_path)
return time.strftime('%Y.%m.%d %H:%M:%S', time.localtime(e_time))
def find_identical_meshes(regex, vtxs_check=True, vtxs=[]):
'''Searches for all polygon meshes in scene that matches regex expression
and optional - vertex count.
Preset vertex count fits two main types of head meshes, male, female,
and male neck cut.
Args:
regex (str): regular expression
vtxs_check (boolean, optional): condition, if to check the vertex count on
top of the regular expression
vtxs (list, optional): vertices count (int)
Returns:
list: polygon meshes that match search parameters
Examples:
>>> find_identical_meshes('(_head|head_)')
'''
# 2770 - is for cut Krest cut neck head
if not vtxs:
vtxs = [2782, 3335, 2770]
meshes = get_meshes()
found = []
[found.append(m) for m in meshes if re.search(regex, m)]
if not found:
return
# Meshes I'm searching for can by messy named,
# so the only way to find them is to compare by vertices quantity.
if not vtxs:
return found
meshes_filtered = [m for m in found for vtx in vtxs if cmds.polyEvaluate(m, v=True) == vtx]
if meshes_filtered:
return meshes_filtered
def find_static_blendshapes(mesh='', rounded=1):
def get_vtxs_coords_rounded(mesh, relative=False):
vtxs_coords = {}
vtxs = cmds.ls(mesh + '.vtx[*]', fl=True)
for vtx in vtxs:
if relative:
coord = cmds.xform(vtx, q=True, t=True, r=True)
else:
coord = cmds.xform(vtx, q=True, t=True, ws=True)
vtx_id = ''.join(i for i in vtx.split('.')[1] if i.isdigit())
vtxs_coords[vtx_id] = [round(c, rounded) for c in coord]
return vtxs_coords
if not mesh:
mesh = cmds.ls(sl=True)
if mesh:
mesh = mesh[0]
else:
print '# Select something.'
return
init_data = get_vtxs_coords_rounded(mesh, relative=True)
not_changed_meshes = []
for m in get_meshes():
m_data = get_vtxs_coords_rounded(m, relative=True)
if init_data == m_data:
not_changed_meshes.append(m)
if not_changed_meshes:
cmds.select(not_changed_meshes)
else:
print '# No static blendshapes found for %s.' % mesh
def batch_dir_import(ext='mb'):
folder_path_list = cmds.fileDialog2(fm=3)
folder_path = folder_path_list[0] + '/'
files = cmds.getFileList(folder=folder_path, filespec='*.%s' % ext)
if len(files) == 0:
cmds.warning("No files found")
else:
for f in files:
cmds.file(folder_path + f, i=True)
def rename_ps(add='', data=[], prefix=True, suffix=False):
if not data:
data = cmds.ls(sl=True, fl=True)
for d in data:
name = d.split('|')[-1]
name = name.split(':')[-1]
if prefix:
d_renamed = add + '_' + name
elif suffix:
d_renamed = name + '_' + add
else:
continue
try:
cmds.rename(d, d_renamed)
except Exception:
print '# Cannot rename', d
def get_blendshape_targets(bl):
return cmds.listAttr(bl + '.w', multi=True)
|
'''Dsp module contains functions pertaining to the actual generation,
manipulation, and analysis of sound. This ranges from generating sounds to
calculating sound to noise ratio.
'''
###############################################################################
import sys, os
import inspect
import numpy as np
from numpy.fft import fft, rfft, ifft, irfft
from scipy.signal import hamming, hann, resample, iirfilter, lfilter
import librosa
import math
currentdir = os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe())))
packagedir = os.path.dirname(currentdir)
sys.path.insert(0, packagedir)
import soundpy as sp
def generate_sound(freq=200, amplitude=0.4, sr=8000, dur_sec=0.25):
'''Generates a sound signal with the provided parameters. Signal begins at 0.
Parameters
----------
freq : int, float
The frequency in Hz the signal should have (default 200 Hz). This pertains
to the number of ossicliations per second.
amplitude : int, float
The parameter controling how much energy the signal should have.
(default 0.4)
sr : int
The sampling rate of the signal, or how many samples make up the signal
per second. (default 8000)
Returns
-------
sound_samples : np.ndarray [size = ()]
The samples of the generated sound
sr : int
The sample rate of the generated signal
Examples
--------
>>> sound, sr = generate_sound(freq=5, amplitude=0.5, sr=5, dur_sec=1)
>>> sound
array([ 0.000000e+00, 5.000000e-01, 3.061617e-16, -5.000000e-01, -6.123234e-16])
>>> sr
5
'''
#The variable `time` holds the expected number of measurements taken:
time = get_time_points(dur_sec, sr=sr)
# unit circle: 2pi equals a full circle
# https://www.marksmath.org/visualization/unit_circle/
full_circle = 2 * np.pi
sound_samples = amplitude * np.sin((freq*full_circle)*time)
return sound_samples, sr
def get_time_points(dur_sec,sr):
'''Get evenly spaced time points from zero to length of `dur_sec`.
The time points align with the provided sample rate, making it easy
to plot a signal with a time line in seconds.
Parameters
----------
dur_sec : int, float
The amount of time in seconds
sr : int
The sample rate relevant for the signal
Returns
-------
time : np.ndarray [size = (num_time_points,)]
Examples
--------
>>> # 50 milliseconds at sample rate of 100 (100 samples per second)
>>> x = get_time_points(0.05,100)
>>> x.shape
(5,)
>>> x
array([0. , 0.0125, 0.025 , 0.0375, 0.05 ])
'''
time = np.linspace(0, dur_sec, int(np.floor(dur_sec*sr)))
return time
def generate_noise(num_samples, amplitude=0.025, random_seed=None):
'''Generates noise to be of a certain amplitude and number of samples.
Useful for adding noise to another signal of length `num_samples`.
Parameters
----------
num_samples : int
The number of total samples making up the noise signal.
amplitude : float
Allows the noise signal to be louder or quieter. (default 0.025)
random_seed : int, optional
Useful for repeating 'random' noise samples.
Examples
--------
>>> noise = generate_noise(5, random_seed = 0)
>>> noise
array([0.04410131, 0.01000393, 0.02446845, 0.05602233, 0.04668895])
'''
if random_seed is not None:
np.random.seed(random_seed)
noise = amplitude * np.random.randn(num_samples)
return noise
def set_signal_length(samples, numsamps):
'''Sets audio signal to be a certain length. Zeropads if too short.
Useful for setting signals to be a certain length, regardless of how
long the audio signal is.
Parameters
----------
samples : np.ndarray [size = (num_samples, num_channels), or (num_samples,)]
The array of sample data to be zero padded.
numsamps : int
The desired number of samples.
Returns
-------
data : np.ndarray [size = (numsamps, num_channels), or (numsamps,)]
Copy of samples zeropadded or limited to `numsamps`.
Examples
--------
>>> import numpy as np
>>> input_samples = np.array([1,2,3,4,5])
>>> output_samples = set_signal_length(input_samples, numsamps = 8)
>>> output_samples
array([1, 2, 3, 4, 5, 0, 0, 0])
>>> output_samples = set_signal_length(input_samples, numsamps = 4)
>>> output_samples
array([1, 2, 3, 4])
'''
data = samples.copy()
if data.shape[0] < numsamps:
diff = numsamps - data.shape[0]
if len(data.shape) > 1:
signal_zeropadded = np.zeros(
(data.shape[0] + int(diff),data.shape[1]))
else:
signal_zeropadded = np.zeros(
(data.shape[0] + int(diff),))
for i, row in enumerate(data):
signal_zeropadded[i] = row
data = signal_zeropadded
else:
if len(data.shape) > 1:
data = data[:numsamps,:]
else:
data = data[:numsamps]
# ensure returned data same dtype as input
data = sp.utils.match_dtype(data, samples)
return data
# works for stereo sound (raw signal data)
def scalesound(data, max_val = 1, min_val=None):
'''Scales the input array to range between `min_val` and `max_val`.
Parameters
----------
data : np.ndarray [size = (num_samples,) or (num_samples, num_channels)]
Original samples
max_val : int, float
The maximum value the dataset is to range from (default 1)
min_val : int, float, optional
The minimum value the dataset is to range from. If set to None,
will be set to the opposiite of `max_val`. E.g. if `max_val` is set to
0.8, `min_val` will be set to -0.8. (default None)
Returns
-------
samples : np.ndarray [size = (num_samples,) or (num_samples, num_channels)]
Copy of original data, scaled to the min and max values.
Examples
--------
>>> import numpy as np
>>> np.random.seed(0)
>>> input_samples = np.random.random_sample((5,))
>>> input_samples
array([0.5488135 , 0.71518937, 0.60276338, 0.54488318, 0.4236548 ])
>>> input_samples.max()
0.7151893663724195
>>> input_samples.min()
0.4236547993389047
>>> # default setting: between -1 and 1
>>> output_samples = scalesound(input_samples)
>>> output_samples
array([-0.14138 ,1., 0.22872961, -0.16834299, -1.])
>>> output_samples.max()
1.0
>>> output_samples.min()
-1.0
>>> # range between -100 and 100
>>> output_samples = scalesound(input_samples, max_val = 100, min_val = -100)
>>> output_samples
array([ -14.13800026,100., 22.87296052,-16.83429866,-100.])
>>> output_samples.max()
100.0
>>> output_samples.min()
-100.0
'''
if min_val is None:
min_val = -max_val
samples = data.copy()
if isinstance(samples, np.ndarray):
samples = np.interp(samples,
(samples.min(), samples.max()),
(min_val, max_val))
else:
samples = np.interp(samples,
(min(samples), max(samples)),
(min_val, max_val))
return samples
def shape_samps_channels(data):
'''Returns data in shape (num_samps, num_channels)
Parameters
----------
data : np.ndarray [size= (num_samples,) or (num_samples, num_channels), or (num_channels, num_samples)]
The data that needs to be checked for correct format
Returns
-------
data : np.ndarray [size = (num_samples,) or (num_samples, num_channels)]
'''
if len(data.shape) == 1:
return data
if len(data.shape) > 2:
raise ValueError('Expected 2 dimensional data: (num_samples, num_channels,) not '+\
'shape {}'.format(data.shape))
if data.shape[0] < data.shape[1]:
# assumes number of samples will be greater than number of channels
data = data.T
assert data.shape[0] > data.shape[1]
return data
def resample_audio(samples, sr_original, sr_desired):
'''Allows audio samples to be resampled to desired sample rate.
Parameters
----------
samples : np.ndarray [size = (num_samples,)]
The samples to be resampled.
sr_original : int
The orignal sample rate of the samples.
sr_desired : int
The desired sample rate of the samples.
Returns
-------
resampled : np.ndarray [size = (num_samples_resampled,)]
The resampled samples.
sr_desired : int
The newly applied sample rate
Examples
--------
>>> import numpy as np
>>> # example samples from 5 millisecond signal with sr 100 and frequency 10
>>> input_samples = np.array([0.00e+00, 2.82842712e-01, 4.000e-01, 2.82842712e-01, 4.89858720e-17])
>>> # we want to resample to 80 instead of 100 (for this example's sake)
>>> output_samples, sr = resample_audio(input_samples, sr_original = 100, sr_desired = 80)
>>> output_samples
array([-2.22044605e-17, 3.35408001e-01, 3.72022523e-01, 6.51178161e-02])
'''
time_sec = len(samples)/sr_original
num_samples = int(time_sec * sr_desired)
resampled = resample(samples, num_samples)
return resampled, sr_desired
def stereo2mono(data):
'''If sound data has multiple channels, reduces to first channel
Parameters
----------
data : numpy.ndarray
The series of sound samples, with 1+ columns/channels
Returns
-------
data_mono : numpy.ndarray
The series of sound samples, with first column
Examples
--------
>>> import numpy as np
>>> data = np.linspace(0,20)
>>> data_2channel = data.reshape(25,2)
>>> data_2channel[:5]
array([[0. , 0.40816327],
[0.81632653, 1.2244898 ],
[1.63265306, 2.04081633],
[2.44897959, 2.85714286],
[3.26530612, 3.67346939]])
>>> data_mono = stereo2mono(data_2channel)
>>> data_mono[:5]
array([0. , 0.81632653, 1.63265306, 2.44897959, 3.26530612])
'''
data_mono = data.copy()
if len(data.shape) > 1 and data.shape[1] > 1:
# ensure data is samples first, channels second
data_mono = sp.dsp.shape_samps_channels(data_mono)
data_mono = np.take(data_mono,0,axis=-1)
return data_mono
def add_backgroundsound(audio_main, audio_background, sr, snr = None,
pad_mainsound_sec = None, total_len_sec=None,
wrap = False, stationary_noise = True,
random_seed = None, extend_window_ms = 0,
remove_dc = False, mirror_sound = False, clip_at_zero = True,
**kwargs):
'''Adds a sound (i.e. background noise) to a target signal. Stereo sound should work.
If the sample rates of the two audio samples do not match, the sample
rate of `audio_main` will be applied. (i.e. the `audio_background` will
be resampled). If you have issues with clicks at the beginning or end of
signals, see `soundpy.dsp.clip_at_zero`.
Parameters
----------
audio_main : str, pathlib.PosixPath, or np.ndarray [size=(num_samples,) or (num_samples, num_channels)]
Sound file of the main sound (will not be modified; only delayed if
specified). If not path or string, should be a data samples corrresponding to the provided sample rate.
audio_background : str, pathlib.PosixPath, or np.ndarray [size=(num_samples,)]
Sound file of the background sound (will be modified /repeated to match
or extend the length indicated). If not of type pathlib.PosixPath or
string, should be a data samples corrresponding to the provided sample rate.
sr : int
The sample rate of sounds to be added together. Note: sr of 44100 or higher
is suggested.
snr : int, float, list, tuple
The sound-to-noise-ratio of the target and background signals. Note: this
is an approximation and needs further testing and development to be
used as an official measurement of snr. If no SNR provided, signals
will be added together as-is. (default None)
pad_mainsound_sec : int or float, optional
Length of time in seconds the background sound will pad the main sound.
For example, if `pad_mainsound_sec` is set to 1, one second of the
`audio_background` will be played before `audio_main` starts as well as
after the `main audio` stops.
(default None)
total_len_sec : int or float, optional
Total length of combined sound in seconds. If none, the sound will end
after the (padded) target sound ends (default None).
wrap : bool
If False, the random selection of sound will be limited to end by
the end of the audio file. If True, the random selection will wrap to
beginning of the audio file if extends beyond the end of the audio file.
(default False)
stationary_noise : bool
If False, `soundpy.feats.get_vad_stft` will be applied to noise to get
energy of the active noise in the signal. Otherwise energy will be
collected via `soundpy.dsp.get_stft`. (default True)
random_seed : int
If provided, the 'random' section of noise will be chosen using this seed.
(default None)
extend_window_ms : int or float
The number of milliseconds the voice activity detected should be padded with.
This might be useful to ensure sufficient amount of activity is calculated.
(default 0)
remove_dc : bool
If the dc bias should be removed. This aids in the removal of clicks.
See `soundpy.dsp.remove_dc_bias`.
(default False)
**kwargs : additional keyword arguments
The keyword arguments for soundpy.files.loadsound
Returns
-------
combined : numpy.ndarray [shape=(num_samples) or (num_samples, num_channels)]
The samples of the sounds added together
snr : int, float
The updated signal-to-noise ratio. Due to the non-stationary state of speech and sound in general,
this value is only an approximation.
References
----------
Yi Hu and Philipos C. Loizou : original authors
Copyright (c) 2006 by Philipos C. Loizou
SIP-Lab/CNN-VAD/ : GitHub Repo
Copyright (c) 2019 Signal and Image Processing Lab
MIT License
See Also
--------
soundpy.files.loadsound
Loads audiofiles.
soundpy.dsp.snr_adjustnoiselevel
Calculates how much to adjust noise signal to achieve SNR.
soundpy.feats.get_vad_stft
Returns stft matrix of only voice active regions
soundpy.feats.get_stft
Returns stft matrix of entire signal
'''
if sr < 44100:
import warnings
msg = 'Performance of signal to noise analysis is improved with '+\
'sample rates at or higher than 44100 Hz. Current sample rate '+\
'set at {}.'.format(sr)
input_type_main = sp.utils.path_or_samples(audio_main)
input_type_background = sp.utils.path_or_samples(audio_background)
if 'path' in input_type_main:
target, sr = sp.loadsound(audio_main, sr = sr,
remove_dc = remove_dc,**kwargs)
elif 'samples' in input_type_main:
target, sr = audio_main, sr
if 'path' in input_type_background:
sound2add, sr2 = sp.loadsound(audio_background, sr = sr,
remove_dc = remove_dc,**kwargs)
elif 'samples' in input_type_background:
sound2add, sr2 = audio_background, sr
if sr != sr2:
sound2add, sr2 = sp.dsp.resample_audio(sound2add, sr2, sr)
assert sr2 == sr
# make background same shape as signal
if len(target.shape) != len(sound2add.shape):
# ensure in shape (num_samples,) or (num_samples, num_channels)
target = sp.dsp.shape_samps_channels(target)
if len(target.shape) > 1:
num_channels = target.shape[1]
else:
num_channels = 1
sound2add = sp.dsp.apply_num_channels(sound2add, num_channels)
if remove_dc:
target = sp.dsp.remove_dc_bias(target)
sound2add = sp.dsp.remove_dc_bias(sound2add)
target_stft, __ = sp.feats.get_vad_stft(target, sr,
extend_window_ms = extend_window_ms)
if not target_stft.any():
import warnings
msg = '\nNo voice activity detected in target signal.'
warnings.warn(msg)
target_stft = sp.feats.get_stft(target,sr)
if stationary_noise:
noise_stft = sp.feats.get_stft(sound2add, sr)
else:
# get energy of noise when active (e.g. car honking)
noise_stft, __ = sp.feats.get_vad_stft(sound2add, sr,
extend_window_ms = extend_window_ms)
if not noise_stft.any():
noise_stft = sp.feats.get_stft(sound2add, sr)
target_power = np.abs(target_stft)**2
noise_power = np.abs(noise_stft)**2
target_energy = np.mean(target_power)
noise_energy = np.mean(noise_power)
if snr is not None:
if isinstance(snr, list) or isinstance(snr, tuple):
snr = np.random.choice(snr)
elif isinstance(snr, int) or isinstance(snr, float) or isinstance(snr, np.int_) \
or isinstance(snr, np.float_):
pass
else:
raise TypeError('Function `add_backgroundsound` expects parameter '+\
'`snr` to be an int or float or a list / tuple of ints or floats, '+\
'not of type {}.'.format(type(snr)))
# see soundpy.dsp.snr_adjustnoiselevel
adjust_sound = (np.sqrt(target_energy/(noise_energy+1e-6) / (10**(snr/10))))
sound2add *= adjust_sound
# get SNR where voice activity is detected.
new_snr = sp.dsp.get_vad_snr(target, sound2add, sr=sr)
if pad_mainsound_sec is None:
pad_mainsound_sec = 0
num_padding_samples = int(sr*pad_mainsound_sec)*2 #pad on both sides of sound
if total_len_sec is not None:
total_samps = int(sr*total_len_sec)
else:
total_samps = len(target) + num_padding_samples
if total_samps < len(target) + num_padding_samples:
diff = len(target) + num_padding_samples - total_samps
import warnings
warnings.warn('The length of `audio_main` and `pad_mainsound_sec `'+\
'exceeds `total_len_sec`. {} samples from '.format(diff)+\
'`audio_main` will be cut off in '+\
'the `combined` audio signal.')
# make the background sound match the length of total samples
if len(sound2add) < total_samps:
# if shorter than total_samps, extend the noise
sound2add = sp.dsp.apply_sample_length(sound2add, total_samps,
mirror_sound = mirror_sound,
clip_at_zero = clip_at_zero)
else:
# otherwise, choose random selection of noise
sound2add = sp.dsp.clip_at_zero(sound2add)[:-1]
sound2add = sp.dsp.random_selection_samples(sound2add,
total_samps,
wrap = wrap,
random_seed = random_seed)
# separate samples to add to the target signal
target_sound = sound2add[num_padding_samples//2:len(target) \
+ num_padding_samples//2]
# If target is longer than indicated length, shorten it
if len(target_sound) < len(target):
target = target[:len(target_sound)]
combined = target_sound + target
if remove_dc:
combined = sp.dsp.remove_dc_bias(combined)
if pad_mainsound_sec:
# set aside samples for beginning delay (if there is one)
beginning_pad = sound2add[:num_padding_samples//2]
ending_pad = sound2add[num_padding_samples//2+len(target):]
combined = np.concatenate((beginning_pad, combined, ending_pad))
if len(combined) > total_samps:
combined = combined[:total_samps]
elif len(combined) < total_samps:
# set aside ending samples for ending (if sound is extended)
ending_sound = sound2add[len(target)+num_padding_samples:total_samps]
combined = np.concatenate((combined, ending_sound))
return combined, new_snr
def hz_to_mel(freq):
'''Converts frequency to Mel scale
Parameters
----------
freq : int or float or array like of ints / floats
The frequency/ies to convert to Mel scale.
Returns
-------
mel : int or float or array of ints / floats
The frequency/ies in Mel scale.
References
----------
https://en.wikipedia.org/wiki/Mel_scale#Formula
Fayek, H. M. (2016). Speech Processing for Machine Learning: Filter banks, Mel-Frequency Cepstral Coefficients (MFCCs) and What’s In-Between. Retrieved from https://haythamfayek.com/2016/04/21/speech-processing-for-machine-learning.html
'''
mel = (2595 * np.log10(1 + freq / 700))
return mel
def mel_to_hz(mel):
'''Converts Mel item or list to frequency/ies.
Parameters
----------
mel : int, float, or list of ints / floats
Mel item(s) to be converted to Hz.
Returns
-------
freq : int, float, or list of ints / floats
The converted frequency/ies
References
----------
https://en.wikipedia.org/wiki/Mel_scale#Formula
Fayek, H. M. (2016). Speech Processing for Machine Learning: Filter banks, Mel-Frequency Cepstral Coefficients (MFCCs) and What’s In-Between. Retrieved from https://haythamfayek.com/2016/04/21/speech-processing-for-machine-learning.html
'''
freq = (700 * (10**(mel / 2595) -1))
return freq
def fbank_filters(fmin, fmax, num_filters):
'''Calculates the mel filterbanks given a min and max frequency and `num_filters`.
Parameters
----------
fmin : int, float
Minimum frequency relevant in signal.
fmax : int, float
Maximum frequency relevant in signal.
num_filters : int
The number of evenly spaced filters (according to mel scale) between the `fmin`
and `fmax` frequencies.
Returns
-------
mel_points : np.ndarray [size=(num_filters,)]
An array of floats containing evenly spaced filters (according to mel scale).
References
----------
Fayek, H. M. (2016). Speech Processing for Machine Learning: Filter banks, Mel-Frequency Cepstral Coefficients (MFCCs) and What’s In-Between. Retrieved from https://haythamfayek.com/2016/04/21/speech-processing-for-machine-learning.html
'''
if fmin > 0:
low_freq_mel = sp.dsp.hz_to_mel(fmin)
else:
low_freq_mel = 0
high_freq_mel = sp.dsp.hz_to_mel(fmax)
mel_points = np.linspace(low_freq_mel, high_freq_mel, num_filters +2)
return mel_points
def sinosoidal_liftering(mfccs, cep_lifter = 22):
'''Reduces influence of higher coefficients; found useful in automatic speech rec.
Parameters
----------
mfccs : np.ndarray [shape=(num_samples, num_mfcc)]
The matrix containing mel-frequency cepstral coefficients.
cep_lifter : int
The amount to apply `sinosoidal_liftering`. (default 22)
References
----------
Fayek, H. M. (2016). Speech Processing for Machine Learning: Filter banks, Mel-Frequency Cepstral Coefficients (MFCCs) and What’s In-Between. Retrieved from https://haythamfayek.com/2016/04/21/speech-processing-for-machine-learning.html
'''
(nframes, ncoeff) = mfccs.shape
n = np.arange(ncoeff)
lift = 1 + (cep_lifter / 2) * np.sin(np.pi * n / cep_lifter)
mfccs *= lift
return mfccs
def index_at_zero(samples, num_dec_places=2):
'''Finds indices of start and end of utterance, given amplitude strength.
Parameters
----------
samples : numpy.ndarray [size= (num_samples,) or (num_samples, num_channels)]
The samples to index where the zeros surrounding speech are located.
num_dec_places : int
To the number of decimal places the lowest value in `samples` should
be rounded to. (default 2)
Returns
-------
f_0 : int
The index of the last occuring zero, right before speech or sound begins.
l_0 : int
The index of the first occuring zero, after speech ends.
Examples
--------
>>> signal = np.array([-1, 0, 1, 2, 3, 2, 1, 0, -1, -2, -3, -2, -1, 0, 1])
>>> zero_1, zero_2 = index_at_zero(signal)
>>> # +1 to include zero_2 in signal
>>> signal[zero_1:zero_2+1]
[ 0 1 2 3 2 1 0 -1 -2 -3 -2 -1 0]
>>> # does not assume a zero preceeds any sample
>>> signal = np.array([1, 2, 1, 0, -1, -2, -1, 0, 1, 2, 1])
>>> zero_1, zero_2 = index_at_zero(signal)
>>> signal[zero_1:zero_2+1]
[ 0 -1 -2 -1 0]
'''
almost_zero = 1e-1
original_shape = samples.shape
samps = samples.copy()
if len(original_shape) > 1:
# if multiple channels find where it is 0 across all channels
samps = sp.dsp.average_channels(samps)
min_samp = np.argmin(np.abs(samps))
# in some instances, stored as numpy array
if isinstance(samps[min_samp], np.ndarray):
assert len(samps[min_samp]) == 1
min_samp = samps[min_samp][0]
else:
min_samp = samps[min_samp]
if round(min_samp,num_dec_places) <= almost_zero:
almost_zero += min_samp
# find first instance of zero:
f_0_etc = np.where(np.abs(samps) <= almost_zero)
if len(f_0_etc[0]) > 0:
# get index of first zero
for i, index in enumerate(f_0_etc[0]):
# if more silence follows, adjust f_0
if i == len(f_0_etc[0])-1:
import warnings
warnings.warn('\n\nWarning: Only zeros found in signal.\n\n')
f_0 = 0
else:
if index+1 != f_0_etc[0][i+1]:
f_0 = index
break
else:
# no zero found
f_0 = 0
# find end of utterance last zero
l_0_etc = np.where(np.abs(np.flip(samps)) <= almost_zero)
if len(l_0_etc[0]) > 1:
# get index of first zero
for i, index in enumerate(l_0_etc[0]):
# if more silence follows, adjust l_0
if i == len(l_0_etc[0])-1:
# warning should get called for f_0
#import warnings
#warnings.warn('\n\nWarning: Only zeros found in signal.\n\n')
l_0 = 0
else:
if index+1 != l_0_etc[0][i+1]:
l_0 = index
break
else:
# no zeros found
l_0 = 0
if l_0 != 0:
l_0 = len(samps) - l_0 - 1
else:
l_0 = len(samps) - 1
try:
assert f_0 != l_0
except AssertionError:
import warnings
warnings.warn('\n\nWarning: only one zero was found. Returning '+\
'sample indices that encompass more energy.')
if sum(np.abs(samps[f_0:])) > sum(np.abs(samps[:f_0])):
f_0, l_0 = 0, l_0
else:
f_0, l_0 = f_0, len(samps)-1
return f_0, l_0
def clip_at_zero(samples, samp_win = None, neg2pos = True, **kwargs):
'''Clips the signal at samples close to zero.
The samples where clipping occurs crosses the zero line from negative to positive. This
clipping process allows for a smoother transition of audio, especially if concatenating audio.
Parameters
----------
samples : np.ndarray [shape = (num_samples, ) or (num_samples, num_channels)]
The array containing sample data. Should work on stereo sound.
start_with_zero : bool
If True, the returned array will begin with 0 (or close to 0). Otherwise
the array will end with 0.
neg2pos : bool
If True, the returned array will begin with positive values and end with
negative values. Otherwise, the array will be returned with the first
zeros detected, regardless of surrounding positive or negative values.
samp_win : int, optional
The window of samples to apply when clipping at zero crossings. The zero
crossings adjacent to the main signal will be used. This is useful to remove
already existing clicks within the signal, often found at the beginning and / or
end of signals.
kwargs : additional keyword arguments
Keyword arguments for `soundpy.dsp.index_at_zero`.
Warning
-------
If only one zero found.
Examples
--------
>>> sig = np.array([-2,-1,0,1, 2, 1, 0, -1, -2, -1, 0, 1, 2, 1,0])
>>> clip_at_zero(sig) # defaults
[ 0 1 2 1 0 -1 -2 -1 0]
>>> # finds first and last insance of zeros, regardless of surrounding
>>> # negative or positive values in signal
>>> clip_at_zero(sig, neg2pos = False)
[ 0 1 2 1 0 -1 -2 -1 0 1 2 1 0]
>>> # avoid clicks at start of signal
>>> sig = np.array([0,-10,-20,-1,0,1, 2, 1, 0, -1, -2, -1, 0, 1, 2, 1,0])
>>> clip_at_zero(sig, samp_win = 5)
[ 0 1 2 1 0 -1 -2 -1 0]
'''
almost_zero = 1e-1
original_shape = samples.shape
samps = samples.copy()
if samp_win is not None:
samps_beg = samps[:samp_win]
samps_end = samps[-samp_win:]
# find last instance of zero within window at beginning of signal
__, f_0 = index_at_zero(samps_beg)
# find first instance of zero within window at end of signal
l_0, __ = index_at_zero(samps_end)
# match l_0 to original samples
l_0 += len(samps)-samp_win
else:
f_0, l_0 = index_at_zero(samps)
# ensure same shape as original_shape
samps = samples[f_0:l_0+1]
# ensure beginning of signal starts positive and ends negative
if not neg2pos:
return samps
try:
if len(samps.shape) > 1:
beg_pos_neg = sum(samps[:3,0])
end_pos_neg = sum(samps[-4:,0])
else:
beg_pos_neg = sum(samps[:3])
end_pos_neg = sum(samps[-4:])
except IndexError:
raise ValueError('Function clip_at_zero can only be applied to arrays '+\
'longer than 5 samples.\n\n')
if beg_pos_neg > 0 and end_pos_neg < 0:
return samps
# try to cut at different zero but only
# if more than 1 zero left in signal:
if len(np.where(samps <= almost_zero)[0]) > 1:
if beg_pos_neg > 0 and end_pos_neg > 0:
# won't include the last zero
samps_no_last_zero = samps[:-1]
f_0, l_0 = index_at_zero(samps_no_last_zero)
samps = samps_no_last_zero[f_0:l_0+1]
elif beg_pos_neg < 0:
if end_pos_neg < 0:
# won't include the first zero
samps_no_first_zero = samps[f_0+1::]
f_0, l_0 = index_at_zero(samps_no_first_zero)
samps = samps_no_first_zero[f_0:l_0+1]
else:
samps_no_first_last_zero = samps[f_0+1:-1]
f_0, l_0 = index_at_zero(samps_no_first_last_zero)
samps = samps_no_first_last_zero[f_0:l_0+1]
try:
if len(samps.shape) > 1:
assert sum(samps[:2,0]) > 0 and \
sum(samps[-2:,0]) < 0
else:
assert sum(samps[:2]) > 0 and sum(samps[-2:]) < 0
except AssertionError:
import warnings
warnings.warn('\n\nWarning: was not able to clip at zero where '+\
'`samples` begin positive and end negative.\n\n')
return samps
def remove_dc_bias(samples, samp_win = None):
'''Removes DC bias by subtracting mean from sample data.
Seems to work best without samp_win.
# TODO add moving average?
Parameters
----------
samples : np.ndarray [shape=(samples, num_channels) or (samples)]
The sample data to center around zero. This worsk on both mono and stero data.
samp_win: int, optional
Apply subtraction of mean at windows - experimental. (default None)
Returns
-------
samps : np.ndarray [shape=(samples, num_channels) or (samples)]
The `samples` with zero mean.
References
----------
Lyons, Richard. (2011). Understanding Digital Signal Processing (3rd Edition).
'''
samps = samples.copy()
if samp_win is not None:
subframes = math.ceil(len(samples) / samp_win)
for frame in range(subframes):
section = samps[frame * samp_win : frame * samp_win + samp_win]
ave = np.mean(section)
samps[frame * samp_win : frame * samp_win + samp_win] -= ave
else:
ave = np.mean(samps)
samps -= ave
return samps
def apply_num_channels(sound_data, num_channels):
'''Ensures `data` has indicated `num_channels`.
To increase number of channels, the first column will be duplicated. To limit
channels, channels will simply be removed.
Parameters
----------
sound_data : np.ndarray [size= (num_samples,) or (num_samples, num_channels)]
The data to adjust the number of channels
num_channels : int
The number of channels desired
Returns
-------
data : np.ndarray [size = (num_samples, num_channels)]
Examples
--------
>>> import numpy as np
>>> data = np.array([1, 1, 1, 1])
>>> data_3d = apply_num_channels(data, 3)
>>> data_3d
array([[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
>>> data_2d = apply_num_channels(data_3d, 2)
>>> data_2d
array([[1, 1],
[1, 1],
[1, 1],
[1, 1]])
'''
if len(sound_data.shape)== 1:
data = np.expand_dims(sound_data, axis=1)
else:
data = sound_data
diff = num_channels - data.shape[1]
if diff < 0:
# limit number of channels
data = data[:,:num_channels]
return data
elif diff == 0:
# no change necessary
return sound_data
# add channels
duplicated_data = np.expand_dims(data[:,0], axis=1)
for i in range(diff):
data = np.append(data, duplicated_data, axis=1)
return data
def apply_sample_length(data, target_len, mirror_sound = False, clip_at_zero = True):
'''Extends a sound by repeating it until its `target_len`.
If the `target_len` is shorter than the length of `data`, `data`
will be shortened to the specificed `target_len`
This is perhaps useful when working with repetitive or
stationary sounds.
Parameters
----------
data : np.ndarray [size = (num_samples,) or (num_samples, num_channels)]
The data to be checked or extended in length. If shape (num_channels, num_samples),
the data will be reshaped to (num_samples, num_channels).
target_len : int
The length of samples the input `data` should be.
Returns
-------
new_data : np.ndarray [size=(target_len, ) or (target_len, num_channels)]
Examples
--------
>>> import numpy as np
>>> data = np.array([1,2,3,4])
>>> sp.dsp.apply_sample_length(data, 12)
array([1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4])
>>> # two channels
>>> data = np.zeros((3,2))
>>> data[:,0] = np.array([0,1,2])
>>> data[:,1] = np.array([1,2,3])
>>> data
array([[0., 1.],
[1., 2.],
[2., 3.]])
>>> sp.dsp.apply_sample_length(data,5)
array([[0., 1.],
[1., 2.],
[2., 3.],
[0., 1.],
[1., 2.]])
'''
if len(data.shape) > 2:
raise ValueError('Too many dimensions.')
if len(data) > target_len:
new_data = data[:target_len]
return new_data
elif len(data) == target_len:
new_data = data
return new_data
else:
while len(data) < target_len:
if clip_at_zero:
data_clipped = sp.dsp.clip_at_zero(data)
# get rid of last zero
if len(data_clipped) < len(data):
data = data_clipped[:-1]
if mirror_sound:
data = np.concatenate((data, np.flip(data[:-1])))
else:
data = np.concatenate((data, data))
if len(data) >= target_len:
break
if len(data.shape) > 1:
# ensure stereo in correct format (num_samples, num_channels)
data = sp.dsp.shape_samps_channels(data)
num_channels = data.shape[1]
else:
num_channels = 0
if num_channels:
new_data = np.zeros((target_len, num_channels))
else:
new_data = np.zeros((target_len,))
row_id = 0
while row_id < len(new_data):
if row_id + len(data) > len(new_data):
diff = row_id + len(data) - len(new_data)
new_data[row_id:] += data[:-diff]
else:
new_data[row_id:row_id+len(data)] = data
row_id += len(data)
new_data = sp.utils.match_dtype(new_data, data)
return new_data
# TODO: raise error or only warning if original data cut off?
def zeropad_sound(data, target_len, sr, delay_sec=None):
'''If the sound data needs to be a certain length, zero pad it.
Parameters
----------
data : numpy.ndarray [size = (num_samples,) or (num_samples, num_channels)]
The sound data that needs zero padding. Shape (len(data),).
target_len : int
The number of samples the `data` should have
sr : int
The samplerate of the `data`
delay_sec : int, float, optional
If the data should be zero padded also at the beginning.
(default None)
Returns
-------
signal_zeropadded : numpy.ndarray [size = (target_len,) or (target_len, num_channels)]
The data zero padded.
Examples
--------
>>> import numpy as np
>>> x = np.array([1,2,3,4])
>>> # with 1 second delay (with sr of 4, that makes 4 sample delay)
>>> x_zeropadded = zeropad_sound(x, target_len=10, sr=4, delay_sec=1)
>>> x_zeropadded
array([0., 0., 0., 0., 1., 2., 3., 4., 0., 0.])
>>> # without delay
>>> x_zeropadded = zeropad_sound(x, target_len=10, sr=4)
>>> x_zeropadded
array([1., 2., 3., 4., 0., 0., 0., 0., 0., 0.])
>>> # if signal is longer than desired length:
>>> x_zeropadded = zeropad_sound(x, target_len=3, sr=4)
UserWarning: The signal cannot be zeropadded and will instead be truncated as length of `data` is 4 and `target_len` is 3.
len(data), target_len))
>>> x_zeropadded
array([1, 2, 3])
'''
# ensure data follows shape of (num_samples,) or (num_samples, num_channels)
data = sp.dsp.shape_samps_channels(data)
num_channels = get_num_channels(data)
if delay_sec is None:
delay_sec = 0
delay_samps = sr * delay_sec
if target_len < len(data) + delay_samps:
import warnings
# data must be truncated:
remove_samples = len(data) - len(data[:target_len-delay_samps])
if remove_samples >= len(data):
warnings.warn('All data will be lost and replaced with zeros with the '+\
'provided `target_len` and `delay_sec` settings. Data length is '+\
'{}, target_len is {}, and delay samples is {}.'.format(
len(data), target_len, delay_samps))
data = data[:target_len-delay_samps]
warnings.warn('The `target_len` is shorter than the `data` and `delay_sec`. '+\
'Therefore the data will be cut off by {} sample(s).'.format(remove_samples))
if len(data) < target_len:
diff = target_len - len(data)
signal_zeropadded = np.zeros((data.shape[0] + int(diff)))
if num_channels > 1:
signal_zeropadded = apply_num_channels(signal_zeropadded, num_channels)
assert signal_zeropadded.shape[1] == data.shape[1]
for i, row in enumerate(data):
signal_zeropadded[i+delay_samps] += row
else:
import warnings
warnings.warn('The signal cannot be zeropadded and will instead be truncated '+\
'as length of `data` is {} and `target_len` is {}.'.format(
len(data), target_len))
signal_zeropadded = data[:target_len]
return signal_zeropadded
def get_num_channels(data):
if len(data.shape) > 1 and data.shape[1] > 1:
num_channels = data.shape[1]
else:
num_channels = 1
return num_channels
# TODO clarify how length of output array is established
def combine_sounds(file1, file2, match2shortest=True, time_delay_sec=None,total_dur_sec=None):
'''Combines sounds
Parameters
----------
file1 : str
One of two files to be added together
file2 : str
Second of two files to be added together
match2shortest : bool
If the lengths of the addition should be limited by the shorter sound.
(defaul True)
time_delay_sec : int, float, optional
The amount of time in seconds before the sounds are added together.
The longer sound will play for this period of time before the shorter
sound is added to it. (default 1)
total_dur_sec : int, float, optional
The total duration in seconds of the combined sounds. (default 5)
Returns
-------
added_sound : numpy.ndarray
The sound samples of the two soundfiles added together
sr1 : int
The sample rate of the original signals and added sound
'''
data1, sr1 = sp.loadsound(file1)
data2, sr2 = sp.loadsound(file2)
if sr1 != sr2:
data2, sr2 = resample_audio(data2, sr2, sr1)
if time_delay_sec:
num_extra_samples = int(sr1*time_delay_sec)
else:
num_extra_samples = 0
if len(data1) > len(data2):
data_long = data1
data_short = data2
else:
data_long = data2
data_short = data1
dl_copy = data_long.copy()
ds_copy = data_short.copy()
if match2shortest:
data_short = zeropad_sound(data_short, len(ds_copy) + num_extra_samples, sr1, delay_sec= time_delay_sec)
data_long = data_long[:len(ds_copy)+num_extra_samples]
else:
data_short = zeropad_sound(data_short,len(dl_copy), sr1, delay_sec= time_delay_sec)
added_sound = data_long + data_short
if total_dur_sec:
added_sound = added_sound[:sr1*total_dur_sec]
return added_sound, sr1
def calc_frame_length(dur_frame_millisec, sr):
"""Calculates the number of samples necessary for each frame
Parameters
----------
dur_frame_millisec : int or float
time in milliseconds each frame should be
sr : int
sampling rate of the samples to be framed
Returns
-------
frame_length : int
the number of samples necessary to fill a frame
Examples
--------
>>> calc_frame_length(dur_frame_millisec=20, sr=1000)
20
>>> calc_frame_length(dur_frame_millisec=20, sr=48000)
960
>>> calc_frame_length(dur_frame_millisec=25.5, sr=22500)
573
"""
frame_length = int(dur_frame_millisec * sr // 1000)
return frame_length
def calc_num_overlap_samples(samples_per_frame, percent_overlap):
"""Calculate the number of samples that constitute the overlap of frames
Parameters
----------
samples_per_frame : int
the number of samples in each window / frame
percent_overlap : int, float
either an integer between 0 and 100 or a decimal between 0.0 and 1.0
indicating the amount of overlap of windows / frames
Returns
-------
num_overlap_samples : int
the number of samples in the overlap
Examples
--------
>>> calc_num_overlap_samples(samples_per_frame=100,percent_overlap=0.10)
10
>>> calc_num_overlap_samples(samples_per_frame=100,percent_overlap=10)
10
>>> calc_num_overlap_samples(samples_per_frame=960,percent_overlap=0.5)
480
>>> calc_num_overlap_samples(samples_per_frame=960,percent_overlap=75)
720
"""
if percent_overlap > 1:
percent_overlap *= 0.01
num_overlap_samples = int(samples_per_frame * percent_overlap)
return num_overlap_samples
def calc_num_subframes(tot_samples, frame_length, overlap_samples, zeropad=False):
"""Assigns total frames needed to process entire noise or target series
This function calculates the number of full frames that can be
created given the total number of samples, the number of samples in
each frame, and the number of overlapping samples.
Parameters
----------
tot_samples : int
total number of samples in the entire series
frame_length : int
total number of samples in each frame / processing window
overlap_samples : int
number of samples in overlap between frames
zeropad : bool, optional
If False, number of subframes limited to full frames. If True,
number of subframes extended to zeropad the last partial frame.
(default False)
Returns
-------
subframes : int
The number of subframes necessary to fully process the audio samples
at given `frame_length`, `overlap_samples`, and `zeropad`.
Examples
--------
>>> calc_num_subframes(30,10,5)
5
>>> calc_num_subframes(30,20,5)
3
"""
import math
if overlap_samples == 0:
if zeropad:
subframes = int(math.ceil(tot_samples/frame_length))
else:
subframes = int(tot_samples/frame_length)
return subframes
trim = frame_length - overlap_samples
totsamps_adjusted = tot_samples-trim
if zeropad:
subframes = int(math.ceil(totsamps_adjusted / overlap_samples))
else:
subframes = int(totsamps_adjusted / overlap_samples)
return subframes
def create_window(window_type, frame_length):
"""Creates window according to set window type and frame length
the Hamming window tapers edges to around 0.08 while the Hann window
tapers edges to 0.0. Both are commonly used in noise filtering.
Parameters
----------
window_type : str
type of window to be applied (default 'hamming')
Returns
-------
window : ndarray
a window fitted to the class attribute 'frame_length'
Examples
--------
>>> #create Hamming window
>>> hamm_win = create_window('hamming', frame_length=5)
>>> hamm_win
array([0.08, 0.54, 1. , 0.54, 0.08])
>>> #create Hann window
>>> hann_win = create_window('hann',frame_length=5)
>>> hann_win
array([0. , 0.5, 1. , 0.5, 0. ])
"""
if window_type.lower() == 'hamming':
window = hamming(frame_length)
elif 'hann' in window_type.lower():
window = hann(frame_length)
return window
def apply_window(samples, window, zeropad=False):
"""Applies predefined window to a section of samples. Mono or stereo sound checked.
The length of the samples must be the same length as the window.
Parameters
----------
samples : ndarray [shape=(num_samples,) or (num_samples, num_channels)]
series of samples with the length of input window
window : ndarray [shape=(num_samples,) or (num_samples, num_channels)]
window to be applied to the signal. If window does not match number of
channels of sample data, the missing channels will be applied to the window,
repeating the first channel.
Returns
-------
samples_win : ndarray
series with tapered sides according to the window provided
Examples
--------
>>> import numpy as np
>>> input_signal = np.array([ 0. , 0.36371897, -0.302721,
... -0.1117662 , 0.3957433 ])
>>> window_hamming = np.array([0.08, 0.54, 1. , 0.54, 0.08])
>>> apply_window(input_signal, window_hamming)
array([ 0. , 0.19640824, -0.302721 , -0.06035375, 0.03165946])
>>> window_hann = np.array([0. , 0.5, 1. , 0.5, 0. ])
>>> apply_window(input_signal, window_hann)
array([ 0. , 0.18185948, -0.302721 , -0.0558831 , 0. ])
"""
if len(samples.shape) == 1:
if len(window.shape) > 1:
window = window[:,0]
elif len(samples.shape) != len(window.shape) or samples.shape[1] != window.shape[1]:
window = sp.dsp.add_channels(window, samples.shape[1])
# in the off chance the window has more channels than samples:
if window.shape[1] > samples.shape[1]:
window = window[:,:samples.shape[1]]
if zeropad:
if samples.shape != window.shape:
temp_matrix = sp.dsp.create_empty_matrix(
window.shape)
temp_matrix[:len(samples)] = samples
samples = temp_matrix
samples_win = samples * window
return samples_win
def add_channels(samples, channels_total):
'''Copies columns of `samples` to create additional channels.
Parameters
----------
samples : np.ndarray [shape=(num_samples) or (num_samples,num_channels)]
The samples to add channels to.
channels_total : int
The total number of channels desired. For example, if `samples` already has
2 channels and you want it to have 3, set `channels_total` to 3.
Returns
-------
x : np.ndarray [shape = (num_samples, channels_total)]
A copy of `samples` with desired number of channels.
Examples
--------
>>> import numpy as np
>>> samps_mono = np.array([1,2,3,4,5])
>>> samps_stereo2 = add_channels(samps_mono, 2)
>>> samps_stereo2
array([[1, 1],
... [2, 2],
... [3, 3],
... [4, 4],
... [5, 5]])
>>> samps_stereo5 = add_channels(samps_stereo2, 5)
>>> samps_stereo5
array([[1, 1, 1, 1, 1],
... [2, 2, 2, 2, 2],
... [3, 3, 3, 3, 3],
... [4, 4, 4, 4, 4],
... [5, 5, 5, 5, 5]])
Warning
-------
If `channels_total` is less than or equal to the number of channels already presesnt
in `samples`. No channels added in those cases.
'''
y = samples.copy()
if len(y.shape) == 1:
for i in range(channels_total):
if i == 0:
x = np.vstack((y, y))
elif i < channels_total-1:
x = np.vstack((x, y))
else:
if y.shape[1] == channels_total:
import warnings
msg = '\n\nWarning: provided `samples` already has {} channels.'.format(channels_total)+\
'\nTo add 1 channel to samples shaped (5, 1), set `channels_total` to 2.' +\
'\nNo channels added.\n'
warnings.warn(msg)
return y
elif y.shape[1] > channels_total:
import warnings
msg = '\n\nWarning: function soundpy.dsp.add_channels adds channels. '+\
'`samples` currently has {} channels and provided`'.format(samples.shape[1]) +\
' `channels_total` is less than that: {}'.format(channels_total) +\
'\nTo add 1 channel to samples shaped (5, 2), set `channels_total` to 3.'+\
'\nNo channels added.\n'
warnings.warn(msg)
x = y.T
extra_channels = channels_total//samples.shape[1]
for i in range(extra_channels):
x = np.vstack((x, y.T))
x = x.T
if x.shape[1] > channels_total:
x = x[:,:channels_total]
return x
def average_channels(data):
'''Averages all channels in a stereo signal into one channel.
Parameters
----------
data : np.ndarray [size=(num_samples, num_channels)]
The stereo data to average out. If mono data supplied, mono data is returned unchanged.
Returns
-------
data averaged : np.ndarray [size=(num_samples)]
Copy of `data` averaged into one channel.
Examples
--------
>>> import numpy as np
>>> input_samples1 = np.array([1,2,3,4,5])
>>> input_samples2 = np.array([1,1,3,3,5])
>>> input_2channels = np.vstack((input_samples1, input_samples2)).T
>>> input_averaged = average_channels(input_2channels)
>>> input_averaged
array([1. , 1.5, 3. , 3.5, 5. ])
'''
# average out channels
if len(data.shape) > 1 and data.shape[1] > 1:
data_summed = data[:,0].copy()
for channel in range(data.shape[1]):
if channel == 0:
pass
else:
data_summed += data[:,channel]
return data_summed / data.shape[1]
else:
return data
def calc_fft(signal_section, real_signal=None, fft_bins = None, **kwargs):
"""Calculates the fast Fourier transform of a time series. Should work with stereo signals.
The length of the signal_section determines the number of frequency
bins analyzed if `fft_bins` not set. Therefore, if there are higher frequencies in the
signal, the length of the `signal_section` should be long enough to
accommodate those frequencies.
The frequency bins with energy levels at around zero denote frequencies
not prevelant in the signal;the frequency bins with prevalent energy
levels relate to frequencies as well as their amplitudes that are in
the signal.
Parameters
----------
signal_section : ndarray [shape = (num_samples) or (num_samples, num_channels)]
the series that the fft will be applied to. If stereo sound, will return a FFT
for each channel.
real_signal : bool
If True, only half of the fft will be returned (the fft is mirrored). Otherwise the
full fft will be returned.
kwargs : additional keyword arguments
keyword arguments for numpy.fft.fft or nump.fft.rfft
Returns
-------
fft_vals : ndarray [shape=(num_fft_bins), or (num_fft_bins, num_channels), dtype=np.complex_]
the series transformed into the frequency domain with the same
shape as the input series
"""
if sp.dsp.ismono(signal_section):
if real_signal:
fft_vals = rfft(signal_section, n = fft_bins, **kwargs)
else:
fft_vals = fft(signal_section, n = fft_bins, **kwargs)
else:
for channel in range(signal_section.shape[1]):
if channel == 0:
if real_signal:
fft_vals = rfft(signal_section[:,channel], n = fft_bins, **kwargs)
else:
fft_vals = fft(signal_section[:,channel], n = fft_bins, **kwargs)
x = fft_vals
else:
if real_signal:
fft_vals = rfft(signal_section[:,channel], n = fft_bins, **kwargs)
else:
fft_vals = fft(signal_section[:,channel], n = fft_bins, **kwargs)
x = np.stack((x, fft_vals), axis=-1)
fft_vals = x
return fft_vals
def ismono(data):
# ensure channels last
data = sp.dsp.shape_samps_channels(data)
if len(data.shape) > 1 and data.shape[1] > 1:
return False
else:
return True
# TODO: https://github.com/biopython/biopython/issues/1496
# Fix numpy array repr for Doctest.
def calc_power(fft_vals):
'''Calculates the power of fft values
Parameters
----------
fft_vals : ndarray (complex or floats)
the fft values of a windowed section of a series
Returns
-------
power_spec : ndarray
the squared absolute value of the input fft values
Example
-------
>>> import numpy as np
>>> matrix = np.array([[1,1,1],[2j,2j,2j],[-3,-3,-3]],
... dtype=np.complex_)
>>> calc_power(matrix)
array([[0.33333333, 0.33333333, 0.33333333],
[1.33333333, 1.33333333, 1.33333333],
[3. , 3. , 3. ]])
'''
power_spec = np.abs(fft_vals)**2 / len(fft_vals)
return power_spec
def calc_average_power(matrix, num_iters):
'''Divides matrix values by the number of times power values were added.
This function assumes the power values of n-number of series were
calculated and added. It divides the values in the input matrix by n,
i.e. 'num_iters'.
Parameters
----------
matrix : ndarray
a collection of floats or ints representing the sum of power
values across several series sets
num_iters : int
an integer denoting the number of times power values were added to
the input matrix
Returns
-------
matrix : ndarray
the averaged input matrix
Examples
--------
>>> matrix = np.array([[6,6,6],[3,3,3],[1,1,1]])
>>> ave_matrix = calc_average_power(matrix, 3)
>>> ave_matrix
array([[2. , 2. , 2. ],
[1. , 1. , 1. ],
[0.33333333, 0.33333333, 0.33333333]])
'''
if matrix.dtype == 'int64':
matrix = matrix.astype('float')
for i in range(len(matrix)):
matrix[i] /= num_iters
return matrix
def calc_phase(fft_matrix, radians=False):
'''Calculates phase from complex fft values.
Parameters
----------
fft_vals : np.ndarray [shape=(num_frames, num_features), dtype=complex]
matrix with fft values
radians : boolean
False and complex values are returned. True and radians are returned.
(Default False)
Returns
-------
phase : np.ndarray [shape=(num_frames, num_features)]
Phase values for fft_vals. If radians is set to False, dtype = complex.
If radians is set to True, dtype = float.
Examples
--------
>>> import numpy as np
>>> frame_length = 10
>>> time = np.arange(0, 10, 0.1)
>>> signal = np.sin(time)[:frame_length]
>>> fft_vals = np.fft.fft(signal)
>>> phase = calc_phase(fft_vals, radians=False)
>>> phase[:2]
array([ 1. +0.j , -0.37872566+0.92550898j])
>>> phase = calc_phase(fft_vals, radians=True)
>>> phase[:2]
array([0. , 1.95921533])
'''
if not radians:
if len(fft_matrix.shape) > 1 and fft_matrix.shape[1] > 1:
# soundpy works with (num_frames, num_features)
# librosa works with (num_features, num_frames)
fft_matrix = fft_matrix.T
__, phase = librosa.magphase(fft_matrix)
if len(phase.shape) > 1 and phase.shape[1] > 1:
# transpose back to (num_frames, num_features)
phase = phase.T
else:
# in radians
#if normalization:
#phase = np.angle(fft_matrix) / (frame_length * norm_win)
#else:
phase = np.angle(fft_matrix)
return phase
def reconstruct_whole_spectrum(band_reduced_noise_matrix, n_fft=None):
'''Reconstruct whole spectrum by mirroring complex conjugate of data.
Parameters
----------
band_reduced_noise_matrix : np.ndarray [size=(n_fft,), dtype=np.float or np.complex_]
Matrix with either power or fft values of the left part of the fft. The whole
fft can be provided; however the right values will be overwritten by a mirrored
left side.
n_fft : int, optional
If None, `n_fft` set to length of `band_reduced_noise_matrix`. `n_fft` defines
the size of the mirrored vector.
Returns
-------
output_matrix : np.ndarray [size = (n_fft,), dtype=np.float or np.complex_]
Mirrored vector of input data.
Examples
--------
>>> x = np.array([3.,2.,1.,0.])
>>> # double the size of x
>>> x_rec = sp.dsp.reconstruct_whole_spectrum(x, n_fft=int(len(x)*2))
>>> x_rec
array([3., 2., 1., 0., 0., 1., 2., 3.])
>>> # overwrite right side of data
>>> x = np.array([3.,2.,1.,0.,0.,2.,3.,5.])
>>> x_rec = sp.dsp.reconstruct_whole_spectrum(x, n_fft=len(x))
>>> x_rec
array([3., 2., 1., 0., 0., 1., 2., 3.])
'''
# expects 1d data
if len(band_reduced_noise_matrix.shape) > 1:
band_reduced_noise_matrix = band_reduced_noise_matrix.reshape((
band_reduced_noise_matrix.shape[0]))
if n_fft is None:
n_fft = len(band_reduced_noise_matrix)
if np.issubdtype(band_reduced_noise_matrix.dtype, np.complexfloating):
complex_vals = True
else:
complex_vals = False
total_rows = n_fft
output_matrix = create_empty_matrix((total_rows,), complex_vals=complex_vals)
if band_reduced_noise_matrix.shape[0] < n_fft:
temp_matrix = create_empty_matrix((total_rows,), complex_vals=complex_vals)
temp_matrix[:len(band_reduced_noise_matrix)] += band_reduced_noise_matrix
band_reduced_noise_matrix = temp_matrix
# flip up-down
flipped_matrix = np.flip(band_reduced_noise_matrix, axis=0)
output_matrix[0:n_fft//2+1,] += band_reduced_noise_matrix[0:n_fft//2+1]
output_matrix[n_fft//2+1:,] += flipped_matrix[n_fft//2+1:]
assert output_matrix.shape == (n_fft,)
return output_matrix
# TODO: test with 2d+ dimensions
def apply_original_phase(spectrum, phase):
'''Multiplies phase to power spectrum
Parameters
----------
spectrum : np.ndarray [shape=(n,), dtype=np.float or np.complex]
Magnitude or power spectrum
phase : np.ndarray [shape=(n,), dtype=np.float or np.complex]
Phase to be applied to spectrum
Returns
-------
spectrum_complex : np.ndarray [shape=(n,), dtype = np.complex]
'''
# ensure 1d dimensions
if len(spectrum.shape) > 1:
spectrum = spectrum.reshape((
spectrum.shape[0],))
if len(phase.shape) > 1:
phase = phase.reshape((
phase.shape[0],))
assert spectrum.shape == phase.shape
# Whether or not phase is represented in radians or a spectrum.
if isinstance(phase[0], np.complex):
radians = False
else:
radians = True
if not radians:
spectrum_complex = spectrum * phase
else:
import cmath
phase_prepped = (1/2) * np.cos(phase) + cmath.sqrt(-1) * np.sin(phase)
spectrum_complex = spectrum**(1/2) * phase_prepped
return spectrum_complex
def calc_posteri_snr(target_power_spec, noise_power_spec):
"""Calculates and signal to noise ratio of current frame
Parameters
----------
target_power_spec : ndarray
matrix of shape with power values of target
signal
noise_power_spec : ndarray
matrix of shape with power values of noise
signal
Returns
-------
posteri_snr : ndarray
matrix containing the signal to noise ratio
Examples
--------
>>> sig_power = np.array([6,6,6,6])
>>> noise_power = np.array([2,2,2,2])
>>> calc_posteri_snr(sig_power, noise_power)
array([3., 3., 3., 3.])
"""
posteri_snr = np.zeros(target_power_spec.shape)
for i in range(len(target_power_spec)):
posteri_snr[i] += target_power_spec[i] / noise_power_spec[i]
return posteri_snr
def get_max_index(matrix):
'''If not np.ndarray, expects real sample data.
'''
max_val = 0
for i, value in enumerate(matrix):
if isinstance(value, np.ndarray):
if sum(value) > max_val:
max_val = sum(value)
max_index = i
else:
if np.abs(value) > max_val:
max_val = np.abs(value)
max_index = i
return max_index
def get_local_target_high_power(target_samples, sr, local_size_ms=25, min_power_percent=0.25):
# get the size of power spectrum of length local_size_ms
if local_size_ms is None:
local_size_ms = 25
if min_power_percent is None:
min_power_percent = 0.25
target_power_size = sp.feats.get_feats(target_samples, sr=sr,
feature_type='powspec',
dur_sec = local_size_ms/1000)
target_power = sp.feats.get_feats(target_samples, sr=sr,
feature_type='powspec')
target_high_power = sp.dsp.create_empty_matrix(target_power_size.shape,
complex_vals=False)
max_index = sp.dsp.get_max_index(target_power)
max_power = sum(target_power[max_index])
min_power = max_power * min_power_percent
index=0
for row in target_power:
# only get power values for `local_size_ms`
if index == len(target_high_power):
break
if sum(row) >= min_power:
target_high_power[index] = row
index += 1
return target_high_power
def get_vad_snr(target_samples, noise_samples, sr, extend_window_ms = 0):
'''Approximates the signal to noise ratio of two sets of power spectrums
Note: this is a simple implementation and should not be used for
official/exact measurement of snr.
Parameters
----------
target_samples : np.ndarray [size = (num_samples, )]
The samples of the main / speech signal. Only frames with
higher levels of energy will be used to calculate SNR.
noise_samples : np.ndarray [size = (num_samples, )]
The samples of background noise. Expects only noise, no speech.
Must be the same sample rate as the target_samples
sr : int
The sample rate for the audio samples.
local_size_ms : int or float
The length in milliseconds to calculate level of SNR.
(default 25)
min_power_percent : float
The minimum percentage of energy / power the target samples
should have. This is to look at only sections with speech or
other signal of interest and not periods of silence.
Value should be between 0 and 1. (default 0.25)
References
----------
http://www1.icsi.berkeley.edu/Speech/faq/speechSNR.html
Gomolka, Ryszard. (2017). Re: How to measure signal-to-noise ratio (SNR) in real time?. Retrieved from: https://www.researchgate.net/post/How_to_measure_signal-to-noise_ratio_SNR_in_real_time/586a880f217e2060b65a8853/citation/download.
https://www.who.int/occupational_health/publications/noise1.pdf
'''
# get target power with only high energy values (vad)
vad_stft, __ = sp.feats.get_vad_stft(target_samples,
sr=sr, extend_window_ms = extend_window_ms)
if not vad_stft.any():
import warnings
msg = '\nNo voice activity found in target signal.'
vad_stft = sp.feats.get_stft(target_samples,
sr=sr)
target_power = np.abs(vad_stft)**2
noise_stft = sp.feats.get_stft(noise_samples, sr=sr)
noise_power = np.abs(noise_stft)**2
snr = 10 * np.log10(np.mean(target_power)/ (np.mean(noise_power) + 1e-6))
snr = np.mean(snr)
return snr
# Not having success with this
def snr_adjustnoiselevel(target_samples, noise_samples, sr, snr):
'''Computes scale factor to adjust noise samples to achieve snr.
From script addnoise_asl_nseg.m:
This function adds noise to a file at a specified SNR level. It uses
the active speech level to compute the speech energy. The
active speech level is computed as per ITU-T P.56 standard.
soundpy Note: this functionality was pulled from the MATLAB script: addnoise_asl_nseg.m at this GitHub repo:
https://github.com/SIP-Lab/CNN-VAD/blob/master/Training%20Code/Functions/addnoise_asl_nseg.m
I do not understand all that went on to calculate the scale
factor and therefore do not explain anything futher than
the original script.
Parameters
----------
target_samples : np.ndarray [size = (num_samples,)]
The audio samples of the target / clean signal.
noise_samples : np.ndarray [size = (num_samples,)]
The audio samples of the noise signal.
sr : int
The sample rate of both `target_samples` and `noise_samples`
snr : int
The desired signal-to-noise ratio of the target and noise
audio signals.
Returns
-------
scale_factor : int, float
The factor to which noise samples should be multiplied
before being added to target samples to achieve SNR.
References
----------
Yi Hu and Philipos C. Loizou : original authors
Copyright (c) 2006 by Philipos C. Loizou
SIP-Lab/CNN-VAD/ : GitHub Repo
Copyright (c) 2019 Signal and Image Processing Lab
MIT License
ITU-T (1993). Objective measurement of active speech level. ITU-T
Recommendation P. 56
See Also
--------
soundpy.dsp.asl_P56
'''
#% Px is the active speech level ms energy, asl is the active factor, and c0
#% is the active speech level threshold.
target_px, asl, c0 = sp.dsp.asl_P56(target_samples, sr)
x = target_samples
x_len = len(x)
# apply IRS? to noise segment?
# TODO: randomize section of noise
#noise_samples = noise_samples[:len(speech_samps)]
noise_px = noise_samples.T @ noise_samples / len(target_samples)
print(noise_px)
#% we need to scale the noise segment samples to obtain the desired snr= 10*
#% log10( Px/ (sf^2 * Pn))
# Just used this within soundpy.dsp.add_backgroundsound
scale_factor = (np.sqrt(target_px/noise_px / (10**(snr/10))))
return scale_factor
def asl_P56(samples, sr, bitdepth=16, smooth_factor=0.03, hangover=0.2, margin_db=15.9):
'''Computes the active speech level according to ITU-T P.56 standard.
Note: I don't personally understand the functionality behind
this function and therefore do not offer the best documentation as
of yet.
Parameters
----------
samples : np.ndarray [size = (num_samples, )]
The audio samples, for example speech samples.
sr : int
The sample rate of `samples`.
bitdepth : int
The bitdepth of audio. Expects 16. (default 16)
smooth_factor : float
Time smoothing factor. (default 0.03)
hangover : float
Hangover. Thank goodness not the kind I'm familiar with.
(default 0.2)
margin_db : int, float
Margin decibels... (default 15.9)
Returns
-------
asl_ms : float
The active speech level ms energy
asl : float
The active factor
c0 : float
Active speech level threshold
References
----------
ITU-T (1993). Objective measurement of active speech level. ITU-T
Recommendation P. 56
TODO handle bitdepth variation - what if not 16?
TODO improve documentation
'''
thresh_nu = bitdepth -1 #number of thresholds
I = math.ceil(sr*hangover) # hangover in samples.. is this percent_overlap?
# 8820
g = np.exp( -1 /( sr * smooth_factor)) # smoothing factor in envelop detection
# 0.99924
###c( 1: thres_no)= 2.^ (-15: thres_no- 16);
###% vector with thresholds from one quantizing level up to half the maximum
###% code, at a step of 2, in the case of 16bit samples, from 2^-15 to 0.5;
###a( 1: thres_no)= 0; % activity counter for each level threshold
###hang( 1: thres_no)= I; % hangover counter for each level threshold
#% vector with thresholds from one quantizing level up to half the maximum
thresholds = np.zeros((len(np.arange(-15,thresh_nu-16),)))
for i, item in enumerate(np.arange(-15,thresh_nu-16)):
thresholds[i] = 2.**item
thresh_nu = len(thresholds)
#% activity counter for each level threshold
activity_counter = np.zeros((len(thresholds),))
# % hangover counter for each level threshold
hangover_matrix = np.zeros((len(thresholds,)))
hangover_matrix[:] = I
#% long-term level square energy of x
square_energy = samples.T @ samples
# 154.55
x_len = len(samples)
# use a 2nd order IIR filter to detect the envelope q
x_abs = np.abs(samples)
p, q = iirfilter(2,Wn=[1-g, g])
iir_2ndorder = lfilter(p, q, x_abs)
for index in range(x_len):
for j in range(thresh_nu):
if iir_2ndorder[index] < thresholds[j]:
activity_counter[j] += 1
hangover_matrix[j] += 1
else:
break
asl = 0
asl_rsm = 0
eps = 2**-52
# https://www.researchgate.net/post/What_does_eps_in_MATLAB_mean_What_is_the_value_of_it
if activity_counter[0]==0:
return None #??
#pass #
#AdB1 = None #?
#else:
AdB1 = 10 * np.log10(square_energy/activity_counter[0] + eps)
# if activity_counter[0] = 1 --> 21.89073220006766
CdB1 = 20 * np.log10( thresholds[0] + eps)
if AdB1 - CdB1 < margin_db:
return None#????
AdB = np.zeros((thresh_nu,))
CdB = np.zeros((thresh_nu,))
Delta = np.zeros((thresh_nu,))
AdB[0] = AdB1
CdB[0] = CdB1
Delta[0] = AdB1 - CdB1
for j in np.arange(0, thresh_nu, 2):
AdB[j] = 10 * np.log10( square_energy / (activity_counter[j] + eps) + eps)
CdB[j] = 20 * np.log10( thresholds[j] + eps)
for j in np.arange(0, thresh_nu, 2):
if activity_counter[j] != 0:
Delta[j] = AdB[j] - CdB[j]
if Delta[j] <= margin_db:
# % interpolate to find the asl
asl_ms_log, cl0 = bin_interp(
AdB[j], AdB[j-1], CdB[j], CdB[j-1], margin_db, 0.5)
asl_ms = 10**(asl_ms_log/10)
asl = (square_energy/x_len)/asl_ms
c0 = 10**( cl0/20)
return asl_ms, asl, c0
def bin_interp(upcount, lwcount, upthr, lwthr, Margin, tol):
if tol < 0:
tol = -tol
# % Check if extreme counts are not already the true active value
iteration = 0
if np.abs(upcount - upthr - Margin) < tol:
asl_ms_log = upcount
cc = upthr
return asl_ms_log, cc
if np.abs(lwcount - lwthr - Margin) < tol:
asl_ms_log = lwcount
cc = lwthr
return asl_ms_log, cc
# % Initialize first middle for given (initial) bounds
midcount = (upcount + lwcount) / 2.0
midthr = (upthr + lwthr) / 2.0
# % Repeats loop until `diff' falls inside the tolerance (-tol<=diff<=tol)
while True:
diff = midcount - midthr - Margin
if np.abs(diff) <= tol:
break
#% if tolerance is not met up to 20 iteractions, then relax the
#% tolerance by 10%
iteration += 1
if iteration > 20:
tol = tol * 1.1
if diff > tol: # then the new bounds are:
# upper and middle activities
midcount = (upcount + midcount) / 2.0
# and thresholds
midthr = (upthr + midthr) / 2.0
elif (diff < -tol): # then the new bounds are:
# middle and lower activities
midcount = (midcount + lwcount) / 2.0
# and thresholds
midthr = (midthr + lwthr) / 2.0
#% Since the tolerance has been satisfied, midcount is selected
#% as the interpolated value with a tol [dB] tolerance.
asl_ms_log = midcount
cc = midthr
return asl_ms_log, cc
def calc_posteri_prime(posteri_snr):
"""Calculates the posteri prime
Parameters
----------
posteri_snr : ndarray
The signal-to-noise ratio of the noisey signal, frame by frame.
Returns
-------
posteri_prime : ndarray
The primed posteri_snr, calculated according to the reference paper.
References
----------
Scalart, P. and Filho, J. (1996). Speech enhancement based on a priori
signal to noise estimation. Proc. IEEE Int. Conf. Acoust., Speech, Signal
Processing, 629-632.
"""
posteri_prime = posteri_snr - 1
posteri_prime[posteri_prime < 0] = 0
return posteri_prime
def calc_prior_snr(snr, snr_prime, smooth_factor=0.98, first_iter=None, gain=None):
"""Estimates the signal-to-noise ratio of the previous frame
Depending on the `first_iter` argument, the prior snr is calculated
according to different algorithms. If `first_iter` is None, prior snr is
calculated according to Scalart and Filho (1996); if `first_iter`
is True or False, snr prior is calculated according to Loizou (2013).
Parameters
----------
snr : ndarray
The sound-to-noise ratio of target vs noise power/energy levels.
snr_prime : ndarray
The prime of the snr (see Scalart & Filho (1996))
smooth_factor : float
The value applied to smooth the signal. (default 0.98)
first_iter : None, True, False
If None, snr prior values are estimated the same, no matter if it is
the first iteration or not (Scalart & Filho (1996))
If True, snr prior values are estimated without gain (Loizou 2013)
If False, snr prior values are enstimed with gain (Loizou 2013)
(default None)
gain : None, ndarray
If None, gain will not be used. If gain, it is a previously calculated
value from the previous frame. (default None)
Returns
-------
prior_snr : ndarray
Estimation of signal-to-noise ratio of the previous frame of target signal.
References
----------
C Loizou, P. (2013). Speech Enhancement: Theory and Practice.
Scalart, P. and Filho, J. (1996). Speech enhancement based on a
priori signal to noise estimation. Proc. IEEE Int. Conf. Acoust.,
Speech, Signal Processing, 629-632.
"""
if first_iter is None:
# calculate according to apriori SNR equation (6) in paper
# Scalart, P. and Filho, J. (1996)
first_arg = (1 - smooth_factor) * snr_prime
second_arg = smooth_factor * snr
prior_snr = first_arg + second_arg
elif first_iter is True:
# calculate according to Loizou (2013)
# don't yet have previous gain or snr values to apply
first_arg = smooth_factor
second_arg = (1-smooth_factor) * snr_prime
prior_snr = first_arg + second_arg
elif first_iter is False:
# now have previous gain and snr values
first_arg = smooth_factor * (gain**2) * snr
second_arg = (1 - smooth_factor) * snr_prime
prior_snr = first_arg + second_arg
return prior_snr
def calc_gain(prior_snr):
'''Calculates the gain (i.e. attenuation) values to reduce noise.
Parameters
----------
prior_snr : ndarray
The prior signal-to-noise ratio estimation
Returns
-------
gain : ndarray
An array of attenuation values to be applied to the signal (stft) array
at the current frame.
References
----------
C Loizou, P. (2013). Speech Enhancement: Theory and Practice.
Scalart, P. and Filho, J. (1996). Speech enhancement based on a
priori signal to noise estimation. Proc. IEEE Int. Conf. Acoust.,
Speech, Signal Processing, 629-632.
'''
gain = np.sqrt(prior_snr/(1+prior_snr))
return gain
def apply_gain_fft(fft_vals, gain):
'''Reduces noise by applying gain values to the stft / fft array of the
target signal
Parameters
----------
fft_vals : ndarray(complex)
Matrix containing complex values (i.e. stft values) of target signal
gain : ndarray(real)
Matrix containing calculated attenuation values to apply to 'fft_vals'
Returns
-------
enhanced_fft : ndarray(complex)
Matrix with attenuated noise in target (stft) values
'''
enhanced_fft = fft_vals * gain
assert enhanced_fft.shape == fft_vals.shape
return enhanced_fft
def postfilter(original_powerspec, noisereduced_powerspec, gain,
threshold=0.4, scale=10):
'''Apply filter that reduces musical noise resulting from other filter.
If it is estimated that speech (or target signal) is present, reduced
filtering is applied.
References
----------
T. Esch and P. Vary, "Efficient musical noise suppression for speech enhancement
system," Proceedings of IEEE International Conference on Acoustics, Speech and
Signal Processing, Taipei, 2009.
'''
power_ratio_current_frame = sp.dsp.calc_power_ratio(
original_powerspec,
noisereduced_powerspec)
# is there speech? If so, SNR decision = 1
if power_ratio_current_frame < threshold:
SNR_decision = power_ratio_current_frame
else:
SNR_decision = 1
noise_frame_len = sp.dsp.calc_noise_frame_len(SNR_decision, threshold, scale)
# apply window
postfilter_coeffs = sp.dsp.calc_linear_impulse(
noise_frame_len,
num_freq_bins = original_powerspec.shape[0])
gain_postfilter = np.convolve(gain, postfilter_coeffs, mode='valid')
return gain_postfilter
def calc_ifft(signal_section, real_signal=None, norm=False):
"""Calculates the inverse fft of a series of fft values
The real values of the ifft can be used to be saved as an audiofile
Parameters
----------
signal_section : ndarray [shape=(num_freq_bins,)
The frame of fft values to apply the inverse fft to
num_fft : int, optional
The number of total fft values applied when calculating the original fft.
If not given, length of `signal_section` is used.
norm : bool
Whether or not the ifft should apply 'ortho' normalization
(default False)
Returns
-------
ifft_vals : ndarray(complex)
The inverse Fourier transform of filtered audio data
"""
if norm:
norm = 'ortho'
else:
norm = None
if real_signal:
ifft_vals = irfft(signal_section, norm=norm)
else:
ifft_vals = ifft(signal_section, norm=norm)
return ifft_vals
def control_volume(samples, max_limit):
"""Keeps max volume of samples to within a specified range.
Parameters
----------
samples : ndarray
series of audio samples
max_limit: float
maximum boundary of the maximum value of the audio samples
Returns
-------
samples : np.ndarray
samples with volume adjusted (if need be).
Examples
--------
>>> import numpy as np
>>> #low volume example: increase volume to desired window
>>> x = np.array([-0.03, 0.04, -0.05, 0.02])
>>> x = control_volume(x, max_limit=0.25)
>>> x
array([-0.13888889, 0.25 , -0.25 , 0.13888889])
>>> #high volume example: decrease volume to desired window
>>> y = np.array([-0.3, 0.4, -0.5, 0.2])
>>> y = control_volume(y, max_limit=0.15)
>>> y
array([-0.08333333, 0.15 , -0.15 , 0.08333333])
"""
if max(samples) != max_limit:
samples = np.interp(samples,
(samples.min(), samples.max()),
(-max_limit, max_limit))
return samples
######### Functions related to postfilter###############
def calc_power_ratio(original_powerspec, noisereduced_powerspec):
'''Calc. the ratio of original vs noise reduced power spectrum.
'''
power_ratio = sum(noisereduced_powerspec) / \
sum(original_powerspec)/len(noisereduced_powerspec)
return power_ratio
def calc_noise_frame_len(SNR_decision, threshold, scale):
'''Calc. window length for calculating moving average.
Note: lower SNRs require larger window.
'''
if SNR_decision < 1:
soft_decision = 1 - (SNR_decision/threshold)
soft_decision_scaled = round((soft_decision) * scale)
noise_frame_len = 2 * soft_decision_scaled + 1
else:
noise_frame_len = SNR_decision
return noise_frame_len
def calc_linear_impulse(noise_frame_len, num_freq_bins):
'''Calc. the post filter coefficients to be applied to gain values.
'''
linear_filter_impulse = np.zeros((num_freq_bins,))
for i in range(num_freq_bins):
if i < noise_frame_len:
linear_filter_impulse[i] = 1 / noise_frame_len
else:
linear_filter_impulse[i] = 0
return linear_filter_impulse
def adjust_volume(samples, vol_range):
samps = samples.copy()
adjusted_volume = np.interp(samps,
(samps.min(), samps.max()),
(-vol_range, vol_range))
return adjusted_volume
def spread_volumes(samples, vol_list = [0.1,0.3,0.5]):
'''Returns samples with a range of volumes.
This may be useful in applying to training data (transforming data).
Parameters
----------
samples : ndarray
Series belonging to acoustic signal.
vol_list : list
List of floats or ints representing the volumes the samples
are to be oriented towards. (default [0.1,0.3,0.5])
Returns
-------
volrange_dict : tuple
Tuple of `volrange_dict` values containing `samples` at various vols.
'''
if samples is None or len(samples) == 0:
raise ValueError('No Audio sample data recognized.')
max_vol = max(samples)
if round(max_vol) > 1:
raise ValueError('Audio data not normalized.')
volrange_dict = {}
for i, vol in enumerate(vol_list):
volrange_dict[i] = adjust_volume(samples, vol)
return tuple(volrange_dict.values())
def create_empty_matrix(shape, complex_vals=False):
'''Allows creation of a matrix filled with real or complex zeros.
In digital signal processing, complex numbers are common; it is
important to note that if complex_vals=False and complex values are
inserted into the matrix, the imaginary part will be removed.
Parameters
----------
shape : tuple or int
tuple or int indicating the shape or length of desired matrix or
vector, respectively
complex_vals : bool
indicator of whether or not the matrix will receive real or complex
values (default False)
Returns
----------
matrix : ndarray
a matrix filled with real or complex zeros
Examples
----------
>>> matrix = create_empty_matrix((3,4))
>>> matrix
array([[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 0.]])
>>> matrix_complex = create_empty_matrix((3,4),complex_vals=True)
>>> matrix_complex
array([[0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j]])
>>> vector = create_empty_matrix(5,)
>>> vector
array([0., 0., 0., 0., 0.])
'''
if complex_vals:
matrix = np.zeros(shape, dtype=np.complex_)
else:
matrix = np.zeros(shape, dtype=float)
return matrix
# TODO test this in applications (currently not implemented)
def overlap_add(enhanced_matrix, frame_length, overlap, complex_vals=False):
'''Overlaps and adds windowed sections together to form 1D signal.
Parameters
----------
enhanced_matrix : np.ndarray [shape=(frame_length, num_frames), dtype=float]
Matrix with enhance values
frame_length : int
Number of samples per frame
overlap : int
Number of samples that overlap
Returns
-------
new_signal : np.ndarray [shape=(frame_length,), dtype=float]
Length equals (frame_length - overlap) * enhanced_matrix.shape[1] + overlap
Examples
--------
>>> import numpy as np
>>> enhanced_matrix = np.ones((4, 4))
>>> frame_length = 4
>>> overlap = 1
>>> sig = overlap_add(enhanced_matrix, frame_length, overlap)
>>> sig
array([1., 1., 1., 2., 1., 1., 2., 1., 1., 2., 1., 1., 1.])
'''
try:
assert enhanced_matrix.shape[0] == frame_length
except AssertionError as e:
raise TypeError('The first dimension of the enhance matrix should '+ \
'match the frame length. {} does not match frame length {}'.format(
enhanced_matrix.shape[0], frame_length))
if np.issubdtype(enhanced_matrix.dtype, np.complexfloating):
complex_vals = True
else:
complex_vals = False
increments = frame_length - overlap
start= increments
mid= start + overlap
stop= start + frame_length
expected_len = increments * enhanced_matrix.shape[1] + overlap
new_signal = create_empty_matrix(
(expected_len,),
complex_vals=complex_vals)
for i in range(enhanced_matrix.shape[1]):
if i == 0:
new_signal[:frame_length] += enhanced_matrix[:frame_length,i]
else:
new_signal[start:mid] += enhanced_matrix[:overlap,i]
new_signal[mid:stop] += enhanced_matrix[overlap:frame_length,i]
start += increments
mid = start+overlap
stop = start+frame_length
return new_signal
def random_selection_samples(samples, len_section_samps, wrap=False, random_seed=None, axis=0):
'''Selects a section of samples, starting at random.
Parameters
----------
samples : np.ndarray [shape = (num_samples, )]
The array of sample data
len_section_samps : int
How many samples should be randomly selected
wrap : bool
If False, the selected noise will not be wrapped from end to beginning;
if True, the random selected may take sound sample that is wrapped
from the end to the beginning. See examples below. (default False)
random_seed : int, optional
If replicated randomization desired. (default None)
Examples
--------
>>> import numpy as np
>>> # no wrap:
>>> x = np.array([1,2,3,4,5,6,7,8,9,10])
>>> n = sp.dsp.random_selection_samples(x, len_section_samps = 7,
... wrap = False, random_seed = 40)
>>> n
array([3, 4, 5, 6, 7, 8, 9])
>>> # with wrap:
>>> n = sp.dsp.random_selection_samples(x, len_section_samps = 7,
... wrap = True, random_seed = 40)
>>> n
array([ 7, 8, 9, 10, 1, 2, 3])
'''
if not isinstance(samples, np.ndarray):
samples = np.array(samples)
if random_seed is not None and random_seed is not False:
np.random.seed(random_seed)
if wrap:
start_index = np.random.choice(range(len(samples)))
if start_index + len_section_samps > len(samples):
left_over = start_index + len_section_samps - len(samples)
start = samples[start_index:]
end = samples[:left_over]
total_section = np.concatenate((start,end),axis=axis)
return total_section
else:
total_section = samples[start_index:start_index+len_section_samps]
return total_section
else:
max_index_start = len(samples) - len_section_samps + 1
if max_index_start > 0:
start_index = np.random.choice(range(max_index_start))
else:
start_index = 0
total_section = samples[start_index:start_index+len_section_samps]
return total_section
# TODO: remove? function get_mean_freq is much better
def get_pitch(sound, sr=16000, win_size_ms = 50, percent_overlap = 0.5,
real_signal = False, fft_bins = 1024,
window = 'hann', **kwargs):
'''Approximates pitch by collecting dominant frequencies of signal.
'''
import warnings
warnings.warn('\n\nWarning: `soundpy.dsp.get_pitch` is experimental at best.'+\
' \nPerhaps try `soundpy.dsp.get_mean_freq`, which is still experimental '+\
'but a bit more reliable.\n\n')
if isinstance(sound, np.ndarray):
data = sound
else:
data, sr2 = sp.loadsound(sound, sr=sr)
assert sr2 == sr
frame_length = sp.dsp.calc_frame_length(win_size_ms, sr)
num_overlap_samples = int(frame_length * percent_overlap)
num_subframes = sp.dsp.calc_num_subframes(len(data),
frame_length = frame_length,
overlap_samples = num_overlap_samples,
zeropad = True)
max_freq = sr//2
# ensure even
if not max_freq % 2 == 0:
max_freq += 1
total_rows = fft_bins
# initialize empty matrix for dominant frequency values of speech frames
freq_matrix = sp.dsp.create_empty_matrix((num_subframes,),
complex_vals = False)
section_start = 0
window_frame = sp.dsp.create_window(window, frame_length)
row = 0
for frame in range(num_subframes):
section = data[section_start:section_start+frame_length]
# otherwise calculate frequency info
section = sp.dsp.apply_window(section,
window_frame,
zeropad = True)
section_fft = sp.dsp.calc_fft(section,
real_signal = real_signal,
fft_bins = total_rows,
)
# limit exploration of dominant frequency to max frequency (Nyquist Theorem)
section_fft = section_fft[:max_freq]
section_power = sp.dsp.calc_power(section_fft)
dom_f = sp.dsp.get_dom_freq(section_power)
freq_matrix[row] = dom_f
row += 1
section_start += (frame_length - num_overlap_samples)
return freq_matrix
# TODO consolidate into VAD? get_vad_stft, or get_vad_samples? Avoid extra processing?
# Perhaps as class attribute..
def get_mean_freq(sound, sr=16000, win_size_ms = 50, percent_overlap = 0.5,
real_signal = False, fft_bins = 1024,
window = 'hann', percent_vad=0.75):
'''Takes the mean of dominant frequencies of voice activated regions in a signal.
Note: Silences discarded.
The average fundamental frequency for a male voice is 125Hz; for a female voice it’s 200Hz; and for a child’s voice, 300Hz. (Russell, J., 2020)
References
----------
Russell, James (2020) The Human Voice and the Frequency Range.
Retrieved from:
https://blog.accusonus.com/pro-audio-production/human-voice-frequency-range/
'''
if isinstance(sound, np.ndarray):
data = sound
else:
data, sr2 = sp.loadsound(sound, sr=sr)
assert sr2 == sr
frame_length = sp.dsp.calc_frame_length(win_size_ms, sr)
num_overlap_samples = int(frame_length * percent_overlap)
num_subframes = sp.dsp.calc_num_subframes(len(data),
frame_length = frame_length,
overlap_samples = num_overlap_samples,
zeropad = True)
max_freq = sr//2
# ensure even
if not max_freq % 2 == 0:
max_freq += 1
total_rows = fft_bins
# initialize empty matrix for dominant frequency values of speech frames
freq_matrix = sp.dsp.create_empty_matrix((num_subframes,),
complex_vals = False)
section_start = 0
extra_rows = 0
e_min = None
f_min = None
sfm_min = None
window_frame = sp.dsp.create_window(window, frame_length)
row = 0
for frame in range(num_subframes):
section = data[section_start:section_start+frame_length]
section_vad, values_vad = sp.dsp.vad(section,
sr=sr,
win_size_ms = 10,
percent_overlap = 0.5,
min_energy = e_min,
min_freq = f_min,
min_sfm = sfm_min)
# adjusted number of values returned by vad() - still not 100%
# if should return three or four (probably three)
if len(values_vad) == 3:
e, f, sfm = values_vad
elif len(values_vad) == 4:
sr, e, f, sfm = values_vad
if e_min is None or e < e_min:
e_min = e
if f_min is None or f < f_min:
f_min = f
if sfm_min is None or sfm < sfm_min:
sfm_min = sfm
if sum(section_vad)/len(section_vad) < percent_vad:
# start over with the loop
extra_rows += 1
section_start += (frame_length - num_overlap_samples)
continue
# otherwise calculate frequency info
section = sp.dsp.apply_window(section,
window_frame,
zeropad = True)
section_fft = sp.dsp.calc_fft(section,
real_signal = real_signal,
fft_bins = total_rows,
)
# limit exploration of dominant frequency to max frequency (Nyquist Theorem)
section_fft = section_fft[:max_freq]
section_power = sp.dsp.calc_power(section_fft)
dom_f = sp.dsp.get_dom_freq(section_power)
if dom_f > 0:
freq_matrix[row] = dom_f
row += 1
else:
extra_rows += 1
section_start += (frame_length - num_overlap_samples)
freq_matrix = freq_matrix[:-extra_rows]
fund_freq = np.mean(freq_matrix)
return round(fund_freq,2)
# TODO: finicky
# Seems that removing the dc offset helps, as does having a higher sample rate
# still exploring influence of window size and percent overlap
# NOTE: good settings for VAD in SNR calc:
# set percent_overlap at 0.5, win_size_ms at 300
# (and padding 100 ms to the identified VAD start and end)
# TODO: fix ERROR issue: frame == 0, measure_noise_frames == 0
#vad_matrix, vad_settings = sp.dsp.vad(audio, sr,
#File "/home/airos/Projects/gitlab/family-language-tracker/soundpy/dsp.py", line 2877, in vad
#if ste - min_energy > thresh_e:
#UnboundLocalError: local variable 'thresh_e' referenced before assignment
def vad(sound, sr, win_size_ms = 50, percent_overlap = 0,
real_signal = False, fft_bins = None, window = 'hann',
energy_thresh = 40, freq_thresh = 185, sfm_thresh = 5,
min_energy = None, min_freq = None, min_sfm = None, use_beg_ms = 120):
'''
Warning: this VAD works best with sample rates above 44100 Hz.
Parameters
----------
energy_thresh : int, float
The minimum amount of energy for speech detection.
freq_thresh : int, float
The maximum frequency threshold.
sfm_thresh : int, float
The spectral flatness measure threshold.
References
----------
M. H. Moattar and M. M. Homayounpour, "A simple but efficient real-time Voice Activity Detection algorithm," 2009 17th European Signal Processing Conference, Glasgow, 2009, pp. 2549-2553.
'''
# if real signal, divide freq_thresh in half, as only half freq_bins are used
if real_signal:
freq_thresh /= 2
if fft_bins is None:
fft_bins = sr // 2
if fft_bins % 2 != 0:
fft_bins += 1
max_freq = sr//2
# ensure even
if not max_freq % 2 == 0:
max_freq += 1
if isinstance(sound, np.ndarray):
data = sound
# resample audio if sr is lower than 44100
if sr < 44100:
import warnings
msg = '\nWarning: VAD works best with sample rates above 44100 Hz.'
warnings.warn(msg)
else:
if sr < 44100:
import warnings
msg = '\nWarning: VAD works best with sample rates above 44100 Hz.'
warnings.warn(msg)
data, sr2 = sp.loadsound(sound, sr=sr)
assert sr2 == sr
# first scale samples to be between -1 and 1
data = sp.dsp.scalesound(data, max_val = 1)
frame_length = sp.dsp.calc_frame_length(win_size_ms, sr)
num_overlap_samples = int(frame_length * percent_overlap)
num_subframes = sp.dsp.calc_num_subframes(len(data),
frame_length = frame_length,
overlap_samples = num_overlap_samples,
zeropad = True)
# limit number of beginning frames to measure background noise:
measure_noise_samples = sp.dsp.calc_frame_length(use_beg_ms, sr)
measure_noise_frames = sp.dsp.calc_num_subframes(measure_noise_samples,
frame_length = frame_length,
overlap_samples = num_overlap_samples,
zeropad = True)
# initialize empty matrix to vad values into
vad_matrix = sp.dsp.create_empty_matrix((num_subframes,),
complex_vals = False)
section_start = 0
speech = 0
silence = 0
min_energy_array = np.zeros(measure_noise_frames)
min_freq_array = np.zeros(measure_noise_frames)
min_sfm_array = np.zeros(measure_noise_frames)
window_frame = sp.dsp.create_window(window, frame_length)
for frame in range(num_subframes):
samples = data[section_start:section_start+frame_length]
samples_windowed = sp.dsp.apply_window(samples, window_frame, zeropad = True)
# apply dft to large window - increase frequency resolution during warping
section_fft = sp.dsp.calc_fft(samples_windowed,
real_signal = real_signal,
fft_bins = fft_bins,
)
# limit exploration of dominant frequency to max frequency (Nyquist Theorem)
section_fft = section_fft[:max_freq]
section_power = sp.dsp.calc_power(section_fft)
# set minimum values if not yet set
if min_energy is None and frame == 0:
min_energy = sp.dsp.short_term_energy(samples)
#elif frame < measure_noise_frames:
#new_min_energy = sp.dsp.short_term_energy(samples)
#min_energy_array[frame] = new_min_energy
#min_energy = np.mean(min_energy_array[:frame+1])
ste = sp.dsp.short_term_energy(samples)
if ste < min_energy and frame < measure_noise_frames:
min_energy = ste
if min_freq is None and frame == 0:
min_freq = sp.dsp.get_dom_freq(section_power)
#elif frame < measure_noise_frames:
#new_min_freq = sp.dsp.get_dom_freq(section_power)
#min_freq_array[frame] = new_min_freq
#min_freq = np.mean(min_freq_array[:frame+1])
f = sp.dsp.get_dom_freq(section_power)
if f < min_freq and frame < measure_noise_frames:
min_freq = f
# TODO fix when NAN value
if min_sfm is None and frame == 0:
try:
min_sfm = sp.dsp.spectral_flatness_measure(section_fft)
except TypeError:
min_sfm = 0
#elif frame < measure_noise_frames:
#new_min_sfm = sp.dsp.spectral_flatness_measure(section_fft)
#min_sfm_array[frame] = new_min_sfm
#min_sfm= np.mean(min_sfm_array[:frame+1])
# TODO fix when NAN value
try:
sfm = sp.dsp.spectral_flatness_measure(section_fft)
except TypeError:
if frame == 0:
sfm = 0
else:
pass # it should already be defined
if sfm < min_sfm and frame < measure_noise_frames:
min_sfm = sfm
# decide if speech or silence
# not finding this helpful
if frame < measure_noise_frames:
thresh_e = energy_thresh * np.log(min_energy)
else:
# what if frame == 0 and measure_noise_frames == 0?
# still samples to process
thresh_e = energy_thresh # TODO: test this fix
counter = 0
if ste - min_energy > thresh_e:
counter += 1
if f - min_freq > freq_thresh:
counter += 1
if sfm - min_sfm > sfm_thresh:
counter += 1
if counter >= 1:
vad_matrix[frame] += 1
speech += 1
# set silence back to 0 if at least 5 speech frames
if speech > 5:
silence = 0
else:
vad_matrix[frame] += 0
# update min energy and silence count
silence += 1
# not finding this helpful
if frame < measure_noise_frames:
min_energy = ((silence * min_energy) + ste) / \
silence + 1
# if silence has been longer than 10 frames, set speech to 0
if silence > 10:
speech = 0
# not finding this helpful
if frame < measure_noise_frames:
thresh_e = energy_thresh * np.log(min_energy)
section_start += (frame_length - num_overlap_samples)
return vad_matrix, (sr, min_energy, min_freq, min_sfm)
def suspended_energy(speech_energy,speech_energy_mean,row,start):
try:
if start == True:
if row <= len(speech_energy)-4:
if speech_energy[row+1] and speech_energy[row+2] and speech_energy[row+3] > speech_energy_mean:
return True
else:
if row >= 3:
if speech_energy[row-1] and speech_energy[row-2] and speech_energy[row-3] > speech_energy_mean:
return True
except IndexError as ie:
return False
def sound_index(speech_energy,speech_energy_mean,start = True):
'''Identifies the index of where speech or energy starts or ends.
'''
if start == True:
side = 1
beg = 0
end = len(speech_energy)
else:
side = -1
beg = len(speech_energy)-1
end = -1
for row in range(beg,end,side):
if speech_energy[row] > speech_energy_mean:
if suspended_energy(speech_energy, speech_energy_mean, row,start=start):
if start==True:
#to catch plosive sounds
while row >= 0:
row -= 1
row -= 1
if row < 0:
row = 0
break
return row, True
else:
#to catch quiet consonant endings
while row <= len(speech_energy):
row += 1
row += 1
if row > len(speech_energy):
row = len(speech_energy)
break
return row, True
else:
#print("No Speech Detected")
pass
return beg, False
def get_energy(stft):
rms_list = [np.sqrt(sum(np.abs(stft[row])**2)/stft.shape[1]) for row in range(len(stft))]
return rms_list
def get_energy_mean(rms_energy):
energy_mean = sum(rms_energy)/len(rms_energy)
return energy_mean
# TODO test
def spectral_flatness_measure(spectrum):
import scipy
spectrum = np.abs(spectrum)
g = scipy.stats.gmean(spectrum)
a = np.mean(spectrum)
if not np.isfinite(a).all():
raise TypeError(a)
elif not np.isfinite(a).any():
raise ValueError(a)
sfm = 10 * np.log10(g/(a+1e-6))
return sfm
def get_dom_freq(power_values):
'''If real_signal (i.e. half fft bins), might mess up values.
'''
freq_bin = np.argmax(power_values)
return freq_bin
def short_term_energy(signal_windowed):
'''
Expects `signal` to be scaled (-1, 1) as well as windowed.
References
----------
http://vlab.amrita.edu/?sub=3&brch=164&sim=857&cnt=1
'''
ste = sum(np.abs(signal_windowed)**2)
return ste
def bilinear_warp(fft_value, alpha):
'''Subfunction for vocal tract length perturbation.
See Also
--------
soundpy.augment.vtlp
References
----------
Kim, C., Shin, M., Garg, A., & Gowda, D. (2019). Improved vocal tract length perturbation
for a state-of-the-art end-to-end speech recognition system. Interspeech. September 15-19,
Graz, Austria.
'''
nominator = (1-alpha) * np.sin(fft_value)
denominator = 1 - (1-alpha) * np.cos(fft_value)
fft_warped = fft_value + 2 * np.arctan(nominator/(denominator + 1e-6) + 1e-6)
return fft_warped
def piecewise_linear_warp(fft_value, alpha, max_freq):
'''Subfunction for vocal tract length perturbation.
See Also
--------
soundpy.augment.vtlp
References
----------
Kim, C., Shin, M., Garg, A., & Gowda, D. (2019). Improved vocal tract length perturbation
for a state-of-the-art end-to-end speech recognition system. Interspeech. September 15-19,
Graz, Austria.
'''
if fft_value.all() <= max_freq * (min(alpha, 1)/ (alpha + 1e-6)):
fft_warped = fft_value * alpha
else:
nominator = np.pi - max_freq * (min(alpha, 1))
denominator = np.pi - max_freq * (min(alpha, 1) / (alpha + 1e-6))
fft_warped = np.pi - (nominator / denominator + 1e-6) * (np.pi - fft_value)
return fft_warped
# TODO: remove? function get_mean_freq is much better
def f0_approximation(sound, sr, low_freq = 50, high_freq = 300, **kwargs):
'''Approximates fundamental frequency.
Limits the stft of voice active sections to frequencies to between
`low_freq` and `high_freq` and takes mean of the dominant frequencies
within that range. Defaults are set at 50 and 300 as most human speech
frequencies occur between 85 and 255 Hz.
References
----------
https://en.wikipedia.org/wiki/Voice_frequency
'''
import warnings
warnings.warn('\n\nWarning: `soundpy.dsp.f0_approximation` is experimental at'+\
' best. \nPerhaps try `soundpy.dsp.get_mean_freq`, which is still '+\
'experimental but a bit more reliable.\n\n')
import scipy
if isinstance(sound, np.ndarray):
data = sound
else:
data, sr2 = sp.loadsound(sound, sr=sr)
assert sr2 == sr
stft = sp.feats.get_vad_stft(data, sr=sr, **kwargs)
# get_vad_stft now returns stft, vad_matrix but used to return just stft
if isinstance(stft, tuple):
stft = stft[0]
stft = stft[:,low_freq:high_freq]
power = np.abs(stft)**2
dom_f = []
for row in power:
dom_f.append(sp.dsp.get_dom_freq(row))
return np.mean(dom_f)
if __name__ == '__main__':
import doctest
doctest.testmod()
|
import os
from nose.tools import assert_greater
from scrapy.selector import Selector
from .. import parse_user_arts
CURRENT_PATH = os.path.dirname(__file__)
def test_parse_user_arts():
for name in [
'tid.html',
]:
yield check_parse_user_arts, name
def check_parse_user_arts(name):
with open(os.path.join(CURRENT_PATH, name), 'rb') as f:
sel = Selector(text=f.read(), type='html')
assert_greater(len(list(parse_user_arts(sel))), 0)
|
import math
import logging
import gensim
from collections import defaultdict
from .matcher import Matcher
class WordWeightMatcher(Matcher):
"""
採用詞權重來比對短語相似度
"""
def __init__(self, segLib="Taiba"):
super().__init__(segLib)
self.wordDictionary = defaultdict(int) # 保存每個詞的出現次數
self.totalWords = 0 # 詞總數
self.wordWeights = defaultdict(int) # 保存每個詞的權重
def initialize(self):
logging.info("初始化模塊中...")
self.TitlesSegmentation()
self.buildWordDictionary()
self.loadStopWords("data/stopwords/chinese_sw.txt")
self.loadStopWords("data/stopwords/specialMarks.txt")
self.calculateWeight()
logging.info("初始化完成 :>")
def buildWordDictionary(self):
for title in self.segTitles:
for word in title:
self.wordDictionary[word] += 1
self.totalWords += 1
logging.info("詞記數完成")
def buildWordBag(self):
dictionary = gensim.corpora.Dictionary(self.titles)
def calculateWeight(self):
# 算法的數學推導請見:
# 非主流自然语言处理——遗忘算法系列(四):改进TF-IDF权重公式
# http://www.52nlp.cn/forgetnlp4
# 此處儲存的 weight 為後項,即 -1 * log(N/T)
for word,count in self.wordDictionary.items():
self.wordWeights[word] = -1 * math.log10(count/self.totalWords)
logging.info("詞統計完成")
def getCooccurrence(self, q1, q2):
#TODO NEED OPTIMIZE!!!!
res = []
for word in q1:
if word in q2:
res.append(word)
return res
def getWordWeight(self, word, n=1):
#TODO FIX N
return(n * self.wordWeights[word])
def match(self, query, sort=False):
"""
讀入使用者 query,若語料庫中存在相同的句子,便回傳該句子與標號
"""
max_similarity = -1
target = ""
index = -1
segQuery = [word for word in self.wordSegmentation(query)
if word not in self.stopwords]
for index,title in enumerate(self.segTitles):
if len(title) == 0:
continue
allWordsWeight = 0.
coWordsWeight = 0.
coWords = self.getCooccurrence(title, segQuery)
for word in coWords:
coWordsWeight += self.getWordWeight(word)
for word in title:
if word not in coWords:
allWordsWeight += self.getWordWeight(word)
for word in segQuery:
if word not in coWords:
allWordsWeight += self.getWordWeight(word)
similarity = coWordsWeight/allWordsWeight
if similarity > max_similarity:
max_similarity = similarity
target = title
target_idx = index
self.similarity = max_similarity * 100 #統一為百分制
return target,target_idx
|
from PhotochemPy import PhotochemPy
template = 'Archean2Proterozoic'
sun = 'Sun_2.7Ga.txt'
pc = PhotochemPy('../input/templates/'+template+'/species.dat', \
'../input/templates/'+template+'/reactions.rx', \
'../input/templates/'+template+'/settings.yaml', \
'../input/templates/'+template+'/atmosphere.txt', \
'../input/templates/'+template+'/'+sun)
converged = pc.integrate(method='Backward_Euler')
converged = pc.integrate(method='CVODE_BDF')
|
import inflection
SCHEMA = {
'Assignment': {
'create_endpoint_name': 'assignToEnvironment'
},
'EntityAssignment': {
'create_endpoint_name': 'assignToEntity'
},
'MaterialAssignment': {
'create_endpoint_name': 'assignToMaterial'
},
'Datapoint': {
'id_field_name': 'data_id'
},
'CoordinateSpace': {
'id_field_name': 'space_id'
},
'Pose2D': {
'search_endpoint_name': 'searchPoses2D',
'id_field_name': 'pose_id'
},
'Pose3D': {
'search_endpoint_name': 'searchPoses3D',
'id_field_name': 'pose_id'
},
'PoseTrack2D': {
'search_endpoint_name': 'searchPoseTracks2D',
'id_field_name': 'pose_track_id'
},
'Pose3D': {
'search_endpoint_name': 'searchPoseTracks3D',
'id_field_name': 'pose_track_id'
},
'AccelerometerData': {
'search_endpoint_name': 'searchAccelerometerData'
},
'GyroscopeData': {
'search_endpoint_name': 'searchGyroscopeData'
},
'MagnetometerData': {
'search_endpoint_name': 'searchMagnetometerData'
}
}
def create_endpoint_name(object_name):
name = SCHEMA.get(object_name, {}).get('create_endpoint_name')
if name is None:
name = 'create' + object_name
return name
def create_endpoint_argument_name(object_name):
name = SCHEMA.get(object_name, {}).get('create_endpoint_argument_name')
if name is None:
name = inflection.camelize(object_name, uppercase_first_letter=False)
return name
def create_endpoint_argument_type(object_name):
name = SCHEMA.get(object_name, {}).get('create_endpoint_argument_type')
if name is None:
name = object_name + 'Input'
return name
def update_endpoint_name(object_name):
name = SCHEMA.get(object_name, {}).get('update_endpoint_name')
if name is None:
name = 'update' + object_name
return name
def update_endpoint_argument_name(object_name):
name = SCHEMA.get(object_name, {}).get('update_endpoint_argument_name')
if name is None:
name = inflection.camelize(object_name, uppercase_first_letter=False)
return name
def update_endpoint_argument_type(object_name):
name = SCHEMA.get(object_name, {}).get('update_endpoint_argument_type')
if name is None:
name = object_name + 'UpdateInput'
return name
def search_endpoint_name(object_name):
name = SCHEMA.get(object_name, {}).get('search_endpoint_name')
if name is None:
name = 'search' + object_name + 's'
return name
def delete_endpoint_name(object_name):
name = SCHEMA.get(object_name, {}).get('delete_endpoint_name')
if name is None:
name = 'delete' + object_name
return name
def id_field_name(object_name):
name = SCHEMA.get(object_name, {}).get('id_field_name')
if name is None:
name = inflection.underscore(object_name) + '_id'
return name
|
#!/usr/bin/env python
'''
Use Netmiko to connect to each of the devices in the database. Execute 'show version' on each device.
Record the amount of time required to do this.
DISCLAIMER NOTE: Solution is limited to the exercise's scope
'''
from net_system.models import NetworkDevice
import django
from termcolor import colored
from datetime import datetime
from netmiko import ConnectHandler
def sh_ver(a_device):
# Execute cmd with NETMIKO
creds = a_device.credentials
rem_conn_ssh = ConnectHandler(device_type=a_device.device_type, ip=a_device.ip_address, username=creds.username,
password=creds.password, port=a_device.port, secret='')
# Output cmd
output = rem_conn_ssh.send_command_expect("show version")
print "\n <<--------------------------->> \n "+ colored(output, 'green') + "\n"
def main():
# Main function to connect to the devices using NETMIKO and execute a cmd.
django.setup()
# Record start time
start_time = datetime.now()
pylab_devices = NetworkDevice.objects.all()
for a_device in pylab_devices:
sh_ver(a_device)
# Function sh_ver runtime calculation
runtime = datetime.now() - start_time
print "This operation required " + colored(runtime, 'red')
if __name__ == "__main__":
main()
|
# coding: utf-8
# Author: Marc Weber
"""
=========================================================================================
Multiple sequence alignment
=========================================================================================
We follow the definition of conservation score in @Capra2007 by computing the Jensen-Shannon divergence (JSD) of MSA columns.
I found the source code of the protein residue conservation prediction score by the Capra lab,
[here](http://compbio.cs.princeton.edu/conservation/). Adapted the source code and verified that we get the same results
as in the web tool.
Capra2007: Capra, J. A., & Singh, M. (2007). Predicting functionally important residues from sequence conservation.
Bioinformatics, 23(15), 1875–1882. http://doi.org/10.1093/bioinformatics/btm270 """
import subprocess
from subprocess import CalledProcessError
import shlex
from pathlib import Path
import re
import numpy as np
# This is the BLOSUM62 distribution. It is the default background distribution.
blosum62_bg_dist = dict(zip(
['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'V'],
[float(x) for x in "0.078 0.051 0.041 0.052 0.024 0.034 0.059 0.083 0.025 0.062 0.092 0.056 0.024 0.044 0.043 0.059 0.055 0.014 0.034 0.072".split()]
))
amino_acids = ['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'V', '-']
PSEUDOCOUNT = .0000001
def weighted_freq_count_pseudocount(col, seq_weights, pc_amount):
""" Return the weighted frequency count for a column--with pseudocount."""
# if the weights do not match, use equal weight
if len(seq_weights) != len(col):
seq_weights = [1.] * len(col)
aa_num = 0
freq_counts = dict(zip(amino_acids, len(amino_acids)*[pc_amount]))
for aa in amino_acids:
for j in range(len(col)):
if col[j] == aa:
freq_counts[aa] = freq_counts[aa] + 1 * seq_weights[j]
for aa in amino_acids:
freq_counts[aa] = freq_counts[aa] / (sum(seq_weights) + len(amino_acids) * pc_amount)
return freq_counts
def weighted_gap_penalty(col, seq_weights):
""" Calculate the simple gap penalty multiplier for the column. If the
sequences are weighted, the gaps, when penalized, are weighted
accordingly. """
# if the weights do not match, use equal weight
if len(seq_weights) != len(col):
seq_weights = [1.] * len(col)
gap_sum = 0.
for i in range(len(col)):
if col[i] == '-':
gap_sum += seq_weights[i]
return 1 - (gap_sum / sum(seq_weights))
def msa_Jensen_Shannon_divergence(col, bg_distr, seq_weights, gap_penalty=True):
""" Return the Jensen-Shannon Divergence for the column with the background
distribution bg_distr. sim_matrix is ignored. JSD is the default method."""
fc = weighted_freq_count_pseudocount(col, seq_weights, PSEUDOCOUNT)
# if background distribution lacks a gap count, remove fc gap count
aaList = amino_acids
if '-' not in bg_distr.keys():
aaList = [aa for aa in aaList if aa != '-']
fc = {aa:fc[aa] for aa in aaList}
if fc.keys() != bg_distr.keys():
raise ValueError("fc and bg_distr amino acids are not the same.")
p = np.array([fc[aa] for aa in aaList])
q = np.array([bg_distr[aa] for aa in aaList])
p = p/sum(p)
q = q/sum(q)
r = 0.5 * p + 0.5 * q
d = 0.
for i in range(len(p)):
if r[i] != 0.0:
if p[i] == 0.0:
d += q[i] * np.log2(q[i]/r[i])
elif q[i] == 0.0:
d += p[i] * np.log2(p[i]/r[i])
else:
d += p[i] * np.log2(p[i]/r[i]) + q[i] * np.log2(q[i]/r[i])
d /= 2.
if gap_penalty:
return d * weighted_gap_penalty(col, seq_weights)
else:
return d
def multiple_sequence_alignment(fastafile, outputDir=None, nThreads=1, tool='t-coffee', toolOptions=None, verbose=True):
"""
"""
if outputDir is None:
outputPath = Path(fastafile).resolve().parent
else:
outputPath = Path(outputDir)
print("outputDir", outputDir)
fastafileOut = str(outputPath / re.sub(r'(.+)\.f[na]a', r'\1.aln', Path(fastafile).name))
if tool == 't-coffee':
cmd = 't_coffee -n_core={:d} -seq "{}"'.format(nThreads, fastafile)
elif tool == 'mafft':
if not toolOptions:
toolOptions = '--globalpair'
cmd = 'mafft {} --clustalout --maxiterate 1000 --thread {:d} "{}" > "{}"'\
.format(toolOptions, nThreads, fastafile, fastafileOut)
else:
cmd = None
if verbose:
print("MSA tool:", tool)
print("fastafileOut:", fastafileOut)
print("cmd:", cmd)
# cmd = shlex.split(cmd)
stderr = subprocess.STDOUT if verbose else subprocess.DEVNULL
try:
cmd_output = subprocess.check_output(cmd, stderr=stderr, cwd=str(outputPath), shell=True)
cmd_output = re.sub(r'\\n','\n',str(cmd_output))
if verbose: print(cmd_output)
except CalledProcessError as err:
print(err.output)
print(err.stderr)
if tool == 'mafft':
print("Alignment had an error, we try the same with the --anysymbol option")
cmd = 'mafft --anysymbol {} --clustalout --maxiterate 1000 --thread {:d} "{}" > "{}"'\
.format(toolOptions, nThreads, fastafile, fastafileOut)
cmd_output = subprocess.check_output(cmd, stderr=stderr, cwd=str(outputPath), shell=True)
cmd_output = re.sub(r'\\n','\n',str(cmd_output))
if verbose: print(cmd_output)
else:
raise
with open(fastafileOut, 'r') as f:
MSA_result = f.read()
return MSA_result, fastafileOut
|
# pylint: disable=import-outside-toplevel
# pylint: disable=R0904
import json
import logging
import re
import time
from time import sleep
from uuid import uuid4
import docker
from botocore import UNSIGNED
from botocore.config import Config
from rpdk.core.contract.interface import Action, HandlerErrorCode, OperationStatus
from rpdk.core.exceptions import InvalidProjectError
from ..boto_helpers import (
LOWER_CAMEL_CRED_KEYS,
create_sdk_session,
get_account,
get_temporary_credentials,
)
from ..jsonutils.pointer import fragment_decode, fragment_list
from ..jsonutils.utils import traverse
LOG = logging.getLogger(__name__)
def prune_properties(document, paths):
"""Prune given properties from a document.
This assumes properties will always have an object (dict) as a parent.
The function modifies the document in-place, but also returns the document
for convenience. (The return value may be ignored.)
"""
for path in paths:
try:
_prop, resolved_path, parent = traverse(document, path)
except LookupError:
pass # not found means nothing to delete
else:
key = resolved_path[-1]
del parent[key]
return document
def prune_properties_if_not_exist_in_path(output_model, input_model, paths):
"""Prune given properties from a model.
This assumes properties will always have an object (dict) as a parent.
The function returns the model after pruning the path which exists
in the paths tuple but not in the input_model
"""
output_document = {"properties": output_model.copy()}
input_document = {"properties": input_model.copy()}
for path in paths:
try:
if not path_exists(input_document, path):
_prop, resolved_path, parent = traverse(output_document, path)
key = resolved_path[-1]
del parent[key]
except LookupError:
pass
return output_document["properties"]
def prune_properties_which_dont_exist_in_path(model, paths):
"""Prunes model to properties present in path. This method removes any property
from the model which does not exists in the paths
This assumes properties will always have an object (dict) as a parent.
The function returns the model after pruning all but the path which exists
in the paths tuple from the input_model
"""
document = {"properties": model.copy()}
for model_path in model.keys():
path_tuple = ("properties", model_path)
if path_tuple not in paths:
_prop, resolved_path, parent = traverse(document, path_tuple)
key = resolved_path[-1]
del parent[key]
return document["properties"]
def path_exists(document, path):
try:
_prop, _resolved_path, _parent = traverse(document, path)
except LookupError:
return False
else:
return True
def prune_properties_from_model(model, paths):
"""Prune given properties from a resource model.
This assumes the dict passed in is a resources model i.e. solely the properties.
This also assumes the paths to remove are prefixed with "/properties",
as many of the paths are defined in the schema
The function modifies the document in-place, but also returns the document
for convenience. (The return value may be ignored.)
"""
return prune_properties({"properties": model}, paths)["properties"]
def override_properties(document, overrides):
"""Override given properties from a document."""
for path, obj in overrides.items():
try:
_prop, resolved_path, parent = traverse(document, path)
except LookupError:
LOG.debug(
"Override failed.\nPath %s\nObject %s\nDocument %s", path, obj, document
)
LOG.warning("Override with path %s not found, skipping", path)
else:
key = resolved_path[-1]
parent[key] = obj
return document
class ResourceClient: # pylint: disable=too-many-instance-attributes
def __init__(
self,
function_name,
endpoint,
region,
schema,
overrides,
inputs=None,
role_arn=None,
timeout_in_seconds="30",
type_name=None,
log_group_name=None,
log_role_arn=None,
docker_image=None,
executable_entrypoint=None,
): # pylint: disable=too-many-arguments
self._schema = schema
self._session = create_sdk_session(region)
self._role_arn = role_arn
self._type_name = type_name
self._log_group_name = log_group_name
self._log_role_arn = log_role_arn
self.region = region
self.account = get_account(
self._session,
get_temporary_credentials(self._session, LOWER_CAMEL_CRED_KEYS, role_arn),
)
self._function_name = function_name
if endpoint.startswith("http://"):
self._client = self._session.client(
"lambda",
endpoint_url=endpoint,
use_ssl=False,
verify=False,
config=Config(
signature_version=UNSIGNED,
# needs to be long if docker is running on a slow machine
read_timeout=5 * 60,
retries={"max_attempts": 0},
region_name=self._session.region_name,
),
)
else:
self._client = self._session.client("lambda", endpoint_url=endpoint)
self._schema = None
self._strategy = None
self._update_strategy = None
self._invalid_strategy = None
self._overrides = overrides
self._update_schema(schema)
self._inputs = inputs
self._timeout_in_seconds = int(timeout_in_seconds)
self._docker_image = docker_image
self._docker_client = docker.from_env() if self._docker_image else None
self._executable_entrypoint = executable_entrypoint
def _properties_to_paths(self, key):
return {fragment_decode(prop, prefix="") for prop in self._schema.get(key, [])}
def _update_schema(self, schema):
# TODO: resolve $ref
self._schema = schema
self._strategy = None
self._update_strategy = None
self._invalid_strategy = None
self.primary_identifier_paths = self._properties_to_paths("primaryIdentifier")
self.read_only_paths = self._properties_to_paths("readOnlyProperties")
self.write_only_paths = self._properties_to_paths("writeOnlyProperties")
self.create_only_paths = self._properties_to_paths("createOnlyProperties")
self.properties_without_insertion_order = self.get_metadata()
additional_identifiers = self._schema.get("additionalIdentifiers", [])
self._additional_identifiers_paths = [
{fragment_decode(prop, prefix="") for prop in identifier}
for identifier in additional_identifiers
]
def has_only_writable_identifiers(self):
return all(
path in self.create_only_paths for path in self.primary_identifier_paths
)
def assert_write_only_property_does_not_exist(self, resource_model):
if self.write_only_paths:
assert not any(
self.key_error_safe_traverse(resource_model, write_only_property)
for write_only_property in self.write_only_paths
), "The model MUST NOT return properties defined as \
writeOnlyProperties in the resource schema"
def get_metadata(self):
try:
properties = self._schema["properties"]
except KeyError:
return set()
else:
return {
prop
for prop in properties.keys()
if "insertionOrder" in properties[prop]
and properties[prop]["insertionOrder"] == "false"
}
@property
def strategy(self):
# an empty strategy (i.e. false-y) is valid
if self._strategy is not None:
return self._strategy
# imported here to avoid hypothesis being loaded before pytest is loaded
from .resource_generator import ResourceGenerator
# make a copy so the original schema is never modified
schema = json.loads(json.dumps(self._schema))
prune_properties(schema, self.read_only_paths)
self._strategy = ResourceGenerator(schema).generate_schema_strategy(schema)
return self._strategy
@property
def invalid_strategy(self):
# an empty strategy (i.e. false-y) is valid
if self._invalid_strategy is not None:
return self._invalid_strategy
# imported here to avoid hypothesis being loaded before pytest is loaded
from .resource_generator import ResourceGenerator
# make a copy so the original schema is never modified
schema = json.loads(json.dumps(self._schema))
self._invalid_strategy = ResourceGenerator(schema).generate_schema_strategy(
schema
)
return self._invalid_strategy
@property
def update_strategy(self):
# an empty strategy (i.e. false-y) is valid
if self._update_strategy is not None:
return self._update_strategy
# imported here to avoid hypothesis being loaded before pytest is loaded
from .resource_generator import ResourceGenerator
# make a copy so the original schema is never modified
schema = json.loads(json.dumps(self._schema))
prune_properties(schema, self.read_only_paths)
prune_properties(schema, self.create_only_paths)
self._update_strategy = ResourceGenerator(schema).generate_schema_strategy(
schema
)
return self._update_strategy
def generate_create_example(self):
if self._inputs:
return self._inputs["CREATE"]
example = self.strategy.example()
return override_properties(example, self._overrides.get("CREATE", {}))
def generate_invalid_create_example(self):
if self._inputs:
return self._inputs["INVALID"]
example = self.invalid_strategy.example()
return override_properties(example, self._overrides.get("CREATE", {}))
def get_unique_keys_for_model(self, create_model):
return {
k: v
for k, v in create_model.items()
if self.is_property_in_path(k, self.primary_identifier_paths)
or any(
map(
lambda additional_identifier_paths, key=k: self.is_property_in_path(
key, additional_identifier_paths
),
self._additional_identifiers_paths,
)
)
}
@staticmethod
def is_property_in_path(key, paths):
for path in paths:
prop = fragment_list(path, "properties")[0]
if prop == key:
return True
return False
def validate_update_example_keys(self, unique_identifiers, update_example):
for primary_identifier in self.primary_identifier_paths:
if primary_identifier in self.create_only_paths:
primary_key = fragment_list(primary_identifier, "properties")[0]
assert update_example[primary_key] == unique_identifiers[primary_key], (
"Any createOnlyProperties specified in update handler input "
"MUST NOT be different from their previous state"
)
def generate_update_example(self, create_model):
if self._inputs:
unique_identifiers = self.get_unique_keys_for_model(create_model)
update_example = self._inputs["UPDATE"]
if self.create_only_paths:
self.validate_update_example_keys(unique_identifiers, update_example)
update_example.update(unique_identifiers)
create_model_with_read_only_properties = (
prune_properties_which_dont_exist_in_path(
create_model, self.read_only_paths
)
)
return {**create_model_with_read_only_properties, **update_example}
overrides = self._overrides.get("UPDATE", self._overrides.get("CREATE", {}))
example = override_properties(self.update_strategy.example(), overrides)
return {**create_model, **example}
def generate_invalid_update_example(self, create_model):
if self._inputs:
return self._inputs["INVALID"]
overrides = self._overrides.get("UPDATE", self._overrides.get("CREATE", {}))
example = override_properties(self.invalid_strategy.example(), overrides)
return {**create_model, **example}
def compare(self, inputs, outputs):
assertion_error_message = (
"All properties specified in the request MUST "
"be present in the model returned, and they MUST"
" match exactly, with the exception of properties"
" defined as writeOnlyProperties in the resource schema"
)
try:
if isinstance(inputs, dict):
for key in inputs:
if isinstance(inputs[key], dict):
self.compare(inputs[key], outputs[key])
elif isinstance(inputs[key], list):
assert len(inputs[key]) == len(outputs[key])
self.compare_list(inputs[key], outputs[key])
else:
assert inputs[key] == outputs[key], assertion_error_message
else:
assert inputs == outputs, assertion_error_message
except Exception as exception:
raise AssertionError(assertion_error_message) from exception
def compare_list(self, inputs, outputs):
for index in range(len(inputs)): # pylint: disable=C0200
self.compare(inputs[index], outputs[index])
@staticmethod
def key_error_safe_traverse(resource_model, write_only_property):
try:
return traverse(
resource_model, fragment_list(write_only_property, "properties")
)[0]
except KeyError:
return None
@staticmethod
def assert_in_progress(status, response):
assert status == OperationStatus.IN_PROGRESS, "status should be IN_PROGRESS"
assert (
response.get("errorCode", 0) == 0
), "IN_PROGRESS events should have no error code set"
assert (
response.get("resourceModels") is None
), "IN_PROGRESS events should not include any resource models"
return response.get("callbackDelaySeconds", 0)
@staticmethod
def assert_success(status, response):
assert status == OperationStatus.SUCCESS, "status should be SUCCESS"
assert (
response.get("errorCode", 0) == 0
), "SUCCESS events should have no error code set"
assert (
response.get("callbackDelaySeconds", 0) == 0
), "SUCCESS events should have no callback delay"
@staticmethod
def assert_failed(status, response):
assert status == OperationStatus.FAILED, "status should be FAILED"
assert "errorCode" in response, "FAILED events must have an error code set"
# raises a KeyError if the error code is invalid
error_code = HandlerErrorCode[response["errorCode"]]
assert (
response.get("callbackDelaySeconds", 0) == 0
), "FAILED events should have no callback delay"
assert (
response.get("resourceModels") is None
), "FAILED events should not include any resource models"
return error_code
@staticmethod
# pylint: disable=R0913
def make_request(
desired_resource_state,
previous_resource_state,
region,
account,
action,
creds,
type_name,
log_group_name,
log_creds,
token,
callback_context=None,
**kwargs,
):
request_body = {
"requestData": {
"callerCredentials": creds,
"resourceProperties": desired_resource_state,
"previousResourceProperties": previous_resource_state,
"logicalResourceId": token,
},
"region": region,
"awsAccountId": account,
"action": action,
"callbackContext": callback_context,
"bearerToken": token,
"resourceType": type_name,
**kwargs,
}
if log_group_name and log_creds:
request_body["requestData"]["providerCredentials"] = log_creds
request_body["requestData"]["providerLogGroupName"] = log_group_name
return request_body
@staticmethod
def generate_token():
return str(uuid4())
def assert_time(self, start_time, end_time, action):
timeout_in_seconds = (
self._timeout_in_seconds
if action in (Action.READ, Action.LIST)
else self._timeout_in_seconds * 2
)
assert end_time - start_time <= timeout_in_seconds, (
"Handler %r timed out." % action
)
@staticmethod
def assert_primary_identifier(primary_identifier_paths, resource_model):
try:
assert all(
traverse(
resource_model, fragment_list(primary_identifier, "properties")
)[0]
for primary_identifier in primary_identifier_paths
), "Every returned model MUST include the primaryIdentifier"
except KeyError as e:
raise AssertionError(
"Every returned model MUST include the primaryIdentifier"
) from e
@staticmethod
def is_primary_identifier_equal(
primary_identifier_path, created_model, updated_model
):
try:
return all(
traverse(
created_model, fragment_list(primary_identifier, "properties")
)[0]
== traverse(
updated_model, fragment_list(primary_identifier, "properties")
)[0]
for primary_identifier in primary_identifier_path
)
except KeyError as e:
raise AssertionError(
"The primaryIdentifier returned in every progress event must\
match the primaryIdentifier passed into the request"
) from e
def _make_payload(self, action, current_model, previous_model=None, **kwargs):
return self.make_request(
current_model,
previous_model,
self.region,
self.account,
action,
get_temporary_credentials(
self._session, LOWER_CAMEL_CRED_KEYS, self._role_arn
),
self._type_name,
self._log_group_name,
get_temporary_credentials(
self._session, LOWER_CAMEL_CRED_KEYS, self._log_role_arn
),
self.generate_token(),
**kwargs,
)
def _call(self, payload):
request_without_write_properties = prune_properties(
payload["requestData"]["resourceProperties"], self.write_only_paths
)
previous_request_without_write_properties = None
if payload["requestData"]["previousResourceProperties"]:
previous_request_without_write_properties = prune_properties(
payload["requestData"]["previousResourceProperties"],
self.write_only_paths,
)
payload_to_log = {
"callbackContext": payload["callbackContext"],
"action": payload["action"],
"requestData": {
"resourceProperties": request_without_write_properties,
"previousResourceProperties": previous_request_without_write_properties,
"logicalResourceId": payload["requestData"]["logicalResourceId"],
},
"region": payload["region"],
"awsAccountId": payload["awsAccountId"],
"bearerToken": payload["bearerToken"],
}
LOG.debug(
"Sending request\n%s",
json.dumps(payload_to_log, ensure_ascii=False, indent=2),
)
payload = json.dumps(payload, ensure_ascii=False, indent=2)
if self._docker_image:
if not self._executable_entrypoint:
raise InvalidProjectError(
"executableEntrypoint not set in .rpdk-config. "
"Have you run cfn generate?"
)
result = (
self._docker_client.containers.run(
self._docker_image,
self._executable_entrypoint + " '" + payload + "'",
)
.decode()
.strip()
)
LOG.debug("=== Handler execution logs ===")
LOG.debug(result)
# pylint: disable=W1401
regex = "__CFN_RESOURCE_START_RESPONSE__([\s\S]*)__CFN_RESOURCE_END_RESPONSE__" # noqa: W605,B950 # pylint: disable=C0301
payload = json.loads(re.search(regex, result).group(1))
else:
result = self._client.invoke(
FunctionName=self._function_name, Payload=payload.encode("utf-8")
)
try:
payload = json.load(result["Payload"])
except json.decoder.JSONDecodeError as json_error:
LOG.debug("Received invalid response\n%s", result["Payload"])
raise ValueError(
"Handler Output is not a valid JSON document"
) from json_error
LOG.debug("Received response\n%s", payload)
return payload
def call_and_assert(
self, action, assert_status, current_model, previous_model=None, **kwargs
):
if not self.has_required_handlers():
raise ValueError("Create/Read/Delete handlers are required")
if assert_status not in [OperationStatus.SUCCESS, OperationStatus.FAILED]:
raise ValueError("Assert status {} not supported.".format(assert_status))
status, response = self.call(action, current_model, previous_model, **kwargs)
if assert_status == OperationStatus.SUCCESS:
self.assert_success(status, response)
error_code = None
else:
error_code = self.assert_failed(status, response)
return status, response, error_code
def call(self, action, current_model, previous_model=None, **kwargs):
request = self._make_payload(action, current_model, previous_model, **kwargs)
start_time = time.time()
response = self._call(request)
self.assert_time(start_time, time.time(), action)
# this throws a KeyError if status isn't present, or if it isn't a valid status
status = OperationStatus[response["status"]]
if action in (Action.READ, Action.LIST):
assert status != OperationStatus.IN_PROGRESS
return status, response
while status == OperationStatus.IN_PROGRESS:
callback_delay_seconds = self.assert_in_progress(status, response)
self.assert_primary_identifier(
self.primary_identifier_paths, response.get("resourceModel")
)
sleep(callback_delay_seconds)
request["requestData"]["resourceProperties"] = response.get("resourceModel")
request["callbackContext"] = response.get("callbackContext")
response = self._call(request)
status = OperationStatus[response["status"]]
# ensure writeOnlyProperties are not returned on final responses
if "resourceModel" in response.keys() and status == OperationStatus.SUCCESS:
self.assert_write_only_property_does_not_exist(response["resourceModel"])
return status, response
def has_update_handler(self):
return "update" in self._schema["handlers"]
def has_required_handlers(self):
try:
has_delete = "delete" in self._schema["handlers"]
has_create = "create" in self._schema["handlers"]
has_read = "read" in self._schema["handlers"]
return has_read and has_create and has_delete
except KeyError:
return False
|
from __future__ import absolute_import
import sys, os, shutil
import forcebalance.parser
import unittest
from __init__ import ForceBalanceTestCase
class TestParser(ForceBalanceTestCase):
def test_parse_inputs_returns_tuple(self):
"""Check parse_inputs() returns type"""
output = forcebalance.parser.parse_inputs('test/files/very_simple.in')
self.assertEqual(type(output), tuple,
msg = "\nExpected parse_inputs() to return a tuple, but got a %s instead" % type(output).__name__)
self.assertEqual(type(output[0]), dict,
msg = "\nExpected parse_inputs()[0] to be an options dictionary, got a %s instead" % type(output).__name__)
self.assertEqual(type(output[1]), list,
msg = "\nExpected parse_inputs()[1] to be a target list, got a %s instead" % type(output[1]).__name__)
self.assertEqual(type(output[1][0]), dict,
msg = "\nExpected parse_inputs()[1][0] to be a target dictionary, got a %s instead" % type(output[1]).__name__)
def test_parse_inputs_generates_default_options(self):
"""Check parse_inputs() without arguments generates dictionary of default options"""
output = forcebalance.parser.parse_inputs()
defaults = forcebalance.parser.gen_opts_defaults
defaults.update({'root':os.getcwd()})
defaults.update({'input_file':None})
target_defaults = forcebalance.parser.tgt_opts_defaults
self.assertEqual(output[0], defaults,
msg="\nparse_inputs() target options do not match those in forcebalance.parser.gen_opts_defaults")
self.assertEqual(output[1][0], target_defaults,
msg="\nparse_inputs() target options do not match those in forcebalance.parser.tgt_opts_defaults")
def test_parse_inputs_yields_consistent_results(self):
"""Check parse_inputs() gives consistent results"""
output1 = forcebalance.parser.parse_inputs('test/files/very_simple.in')
output2 = forcebalance.parser.parse_inputs('test/files/very_simple.in')
self.assertEqual(output1,output2)
os.chdir('test/files')
output3 = forcebalance.parser.parse_inputs('very_simple.in')
output4 = forcebalance.parser.parse_inputs('very_simple.in')
self.assertEqual(output3,output4)
# directory change should lead to different result in output['root']
self.assertNotEqual(output1,output3)
# different parameters from the same file should yield different results
shutil.copyfile('0.energy_force.in', 'test.in')
output5 = forcebalance.parser.parse_inputs('test.in')
shutil.copyfile('1.netforce_torque.in','test.in')
output6 = forcebalance.parser.parse_inputs('test.in')
self.assertNotEqual(output5,output6)
os.remove('test.in')
if __name__ == '__main__':
unittest.main()
|
import json
from collections import Counter
def read_depparse_features(data_path):
with open(data_path, "r") as f:
examples = [json.loads(jsonline) for jsonline in f.readlines()]
dependencies = []
predicted_heads = []
predicted_dependencies = []
for example in examples:
doc_key = example['doc_key']
depparse_features = example['depparse_features']
dependency_features = []
head_features = []
predicted_dependency_features = []
for sent_feature in depparse_features:
temp_dependency = []
temp_predicted_head = []
temp_predicted_dependency = []
for feature in sent_feature:
word_id = feature['id']
head_id = feature['head_id']
deprel = feature['deprel']
temp_dependency.append([deprel, head_id, word_id])
temp_predicted_head.append(head_id)
temp_predicted_dependency.append(deprel)
dependency_features.append(temp_dependency)
head_features.append(temp_predicted_head)
predicted_dependency_features.append(temp_predicted_dependency)
dependencies.append(dependency_features)
predicted_heads.append(head_features)
predicted_dependencies.append(predicted_dependency_features)
print("num of examples {}".format(len(dependencies)))
return dependencies, predicted_heads, predicted_dependencies
def build_dep_tag_vocab(dependencies, vocab_size=1000, min_freq=0):
counter = Counter()
for sent_dependency in dependencies:
for dependency in sent_dependency:
counter.update(dependency)
dep_tags = ['<pad>']
min_freq = max(min_freq, 1)
words_and_frequencies = sorted(counter.items(), key=lambda tup: tup[0])
words_and_frequencies.sort(key=lambda tup: tup[1], reverse=True)
for word, freq in words_and_frequencies:
if freq < min_freq or len(dep_tags) == vocab_size:
break
if word == '<pad>':
continue
dep_tags.append(word)
tag2id = {tag: i for i, tag in enumerate(dep_tags)}
keys = list(tag2id.keys())
tags = []
for i in range(len(tag2id)):
if i < 2:
tags.append(keys[i])
continue
key = keys[i]
if key == "root":
continue
tags.append(key)
tag2id = {tag: i for i, tag in enumerate(tags)}
return dep_tags, tag2id
if __name__ == '__main__':
language = "english"
total_dependencies = []
for name in ["train", "dev", "test"]:
data_path = "conll_data/{}.{}.depparse.allennlp.jsonlines".format(name, language)
dependencies, predicted_heads, predicted_dependencies = read_depparse_features(data_path)
total_dependencies.extend(predicted_dependencies)
dep_tags, tag2id = build_dep_tag_vocab(total_dependencies)
print("dependency tag size {}".format(len(tag2id)))
with open("conll_data/dep_allennlp_vocab.txt", "w") as f:
for tag, id in tag2id.items():
f.write(tag + " " + str(id) + "\n")
|
import tkinter as tk
import tkinter.ttk as ttk
import Inbound
import MainInventory
import Outbound
from Database import Database
database = Database()
def main():
window = tk.Tk()
window.title("UNCW Football!")
tab_control = ttk.Notebook(window)
inventory = MainInventory.MainInventory(tab_control, database)
tab_control.add(inventory.tab, text="Main Inventory")
inbound = Inbound.Inbound(tab_control, database)
tab_control.add(inbound.tab, text="Inbound Shipments")
outbound = Outbound.Outbound(tab_control, database)
tab_control.add(outbound.tab, text="Outbound Shipments")
tab_control.pack(expand=1, fill="both")
# # TAB2~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# tab2 = ttk.Frame((tabControl))
# tabControl.add(tab2, text="Outbound")
# tabControl.pack(expand=1, fill="both")
# columns = ('id', 'name', 'amount', 'shipdate', 'destination')
# table = ttk.Treeview(tab2, columns=columns, show='headings')
#
# new_item = ttk.Button(tab2, text="Outbound", command=lambda: create_item(table))
# new_item.pack()
#
# table.heading('id', text='ID', anchor='w', command=lambda: sort_table(table, "id", False))
# table.column('id', anchor="w", width=50)
# table.heading('name', text='Product Name', command=lambda: sort_table(table, "name", False))
# table.column('name', anchor='center', width=200)
# table.heading('shipdate', text='Shipping Date', command=lambda: sort_table(table, "shipdate", False))
# table.column('shipdate', anchor='center', width=100)
# table.heading('destination', text='Destination', command=lambda: sort_table(table, "destination", False))
# table.column('destination', anchor='center', width=100)
# update_table(table)
# table.pack()
# # TAB3~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# tab3 = ttk.Frame((tabControl))
# tabControl.add(tab3, text="Inbound")
# tabControl.pack(expand=1, fill="both")
# columns = ('id', 'name', 'amount', 'arrivaldate','tracking')
# table = ttk.Treeview(tab3, columns=columns, show='headings')
#
# new_item = ttk.Button(tab3, text="Inbound", command=lambda: create_item(table))
# new_item.pack()
#
# table.heading('id', text='ID', anchor='w', command=lambda: sort_table(table, "id", False))
# table.column('id', anchor="w", width=50)
# table.heading('name', text='Product Name', command=lambda: sort_table(table, "name", False))
# table.column('name', anchor='center', width=200)
# table.heading('arrivaldate', text='Arrival Date', command=lambda: sort_table(table, "arrivaldate", False))
# table.column('arrivaldate', anchor='center', width=100)
# table.heading('tracking', text='Tracking Number', command=lambda: sort_table(table, "tracking", False))
# table.column('tracking', anchor='center', width=100)
# update_table(table)
# table.pack()
#
# # TAB4~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# tab4 = ttk.Frame((tabControl))
# tabControl.add(tab4, text="Users")
# tabControl.pack(expand=1, fill="both")
# columns = ('id', 'name', 'email', 'shift')
# table = ttk.Treeview(tab4, columns=columns, show='headings')
#
# new_item = ttk.Button(tab4, text="New Item", command=lambda: create_item(table))
# new_item.pack()
#
# table.heading('id', text='ID', anchor='w', command=lambda: sort_table(table, "id", False))
# table.column('id', anchor="w", width=50)
# table.heading('name', text='Employee Name', command=lambda: sort_table(table, "name", False))
# table.column('name', anchor='center', width=200)
# table.heading('email', text='Email', command=lambda: sort_table(table, "email", False))
# table.column('email', anchor='center', width=100)
# table.heading('shift', text='Shift', command=lambda: sort_table(table, "shift", False))
# table.column('shift', anchor='center', width=100)
# update_table(table)
# table.pack()
window.mainloop()
if __name__ == '__main__':
main()
|
import pytest
from admin.maintenance import views
from django.utils import timezone
from django.test import RequestFactory
from django.contrib.auth.models import Permission
from django.core.exceptions import PermissionDenied
import website.maintenance as maintenance
from osf.models import MaintenanceState
from osf_tests.factories import AuthUserFactory
from admin_tests.utilities import setup_view
pytestmark = pytest.mark.django_db
@pytest.fixture
def date():
return timezone.now()
@pytest.fixture()
def maintenance_alert():
maintenance.set_maintenance('')
return maintenance.get_maintenance()
@pytest.fixture()
def user():
return AuthUserFactory()
@pytest.fixture()
def req(user):
req = RequestFactory().get('/fake_path')
req.user = user
return req
@pytest.mark.urls('admin.base.urls')
class TestMaintenanceDisplay:
@pytest.fixture()
def plain_view(self):
return views.MaintenanceDisplay
@pytest.fixture()
def view(self, req, plain_view):
view = plain_view()
setup_view(view, req)
return view
def test_has_alert(self, view, maintenance_alert):
data = view.get_context_data()
assert data['current_alert']
def test_has_no_alert(self, view):
data = view.get_context_data()
assert not data.get('current_alert')
def test_no_user_permissions_raises_error(self, req, plain_view):
with pytest.raises(PermissionDenied):
plain_view.as_view()(req)
def test_correct_view_permissions(self, req, user, plain_view):
view_permission = Permission.objects.get(codename='change_maintenancestate')
user.user_permissions.add(view_permission)
user.save()
res = plain_view.as_view()(req)
assert res.status_code == 200
def test_create_maintenance(self, view, req):
message = 'Whooo. Its Custom!'
req.POST = {'start': '2018/01/27 10:24', 'level': 1, 'message': message}
view.post(req)
assert MaintenanceState.objects.first().message == message
@pytest.mark.urls('admin.base.urls')
class TestDeleteMaintenance:
@pytest.fixture()
def plain_view(self):
return views.DeleteMaintenance
@pytest.fixture()
def view(self, req, plain_view):
view = plain_view()
setup_view(view, req)
return view
def test_delete(self, view, req):
res = view.delete(req)
assert res.url == '/maintenance/'
assert res.status_code == 302
assert MaintenanceState.objects.all().count() == 0
def test_no_user_permissions_raises_error(self, req, plain_view):
with pytest.raises(PermissionDenied):
plain_view.as_view()(req)
def test_correct_view_permissions(self, req, user, plain_view):
view_permission = Permission.objects.get(codename='delete_maintenancestate')
user.user_permissions.add(view_permission)
user.save()
res = plain_view.as_view()(req)
assert res.status_code == 200
|
from typing import List
from flask import Response, make_response, jsonify
def item_not_found(item_name: str, field_name: str, field_value: str) -> Response:
return make_response(
jsonify(
{
"error": {
"code": 1,
"msg": f"Could not find {item_name} with "
f'{field_name}="{field_value}"',
}
}
),
404,
)
def could_not_update(item_name: str, field_name: str, field_value: str) -> Response:
return make_response(
jsonify(
{
"error": {
"code": 2,
"msg": f"Could not update {item_name} for "
f'"{field_name}={field_value}"',
}
}
),
404,
)
def could_not_create(item_name: str) -> Response:
return make_response(
jsonify({"error": {"code": 3, "msg": f"Could not create {item_name}"}}), 400
)
def not_authorized() -> Response:
return make_response(
jsonify(
{
"error": {
"code": 4,
"msg": "You are not authorized to perform this operation.",
}
}
),
401,
)
def invalid_join_token() -> Response:
return make_response(
jsonify({"error": {"code": 5, "msg": "The join token provided is invalid."}}),
401,
)
def is_not_json() -> Response:
return make_response(
jsonify(
{
"error": {
"code": 6,
"msg": "The request does not contain a valid JSON body.",
}
}
),
400,
)
def bad_body_arg() -> Response:
return make_response(
jsonify(
{
"error": {
"code": 7,
"msg": "Your request JSON body contains invalid data.",
}
}
),
400,
)
def missing_json_arg(arg_missing: str) -> Response:
return make_response(
jsonify(
{
"error": {
"code": 8,
"msg": f"Your JSON request is missing argument: {arg_missing}.",
}
}
),
400,
)
def invalid_body_arg(bad_arg: str) -> Response:
return make_response(
jsonify(
{
"error": {
"code": 9,
"msg": "Your JSON request passed an incorrect value for "
f"argument {bad_arg}",
}
}
),
400,
)
def invalid_url_arg(bad_arg: str) -> Response:
return make_response(
jsonify(
{
"error": {
"code": 10,
"msg": "Your GET request passed an incorrect value for "
f"argument {bad_arg}",
}
}
),
400,
)
def invalid_url_args_combination(bad_args: List[str]) -> Response:
return make_response(
jsonify(
{
"error": {
"code": 11,
"msg": "Your GET request passed an incorrect combination of "
f"values for the arguments {bad_args}",
}
}
),
400,
)
def missing_url_arg(missing_arg: str) -> Response:
return make_response(
jsonify(
{
"error": {
"code": 12,
"msg": "Your GET request is missing the URL argument "
f"{missing_arg}",
}
}
),
400,
)
|
from flask import Flask
app = Flask(__name__)
import grpc
import demo_pb2
import demo_pb2_grpc
@app.route('/grpc')
def hello_world_grpc():
with grpc.insecure_channel('127.0.0.1:9090') as channel:
client = demo_pb2_grpc.DemoServiceStub(channel)
response = client.CreateOne(demo_pb2.RequestData(
data="call create one from client",
))
print(response.return_code, response.message, response.data)
return 'Hello, World grpc!'
@app.route('/')
def hello_world():
return 'Hello, World!'
|
from services.log_service import LogService
from entities.word_evaluation import WordEvaluation
import os
import torch
import torch.nn as nn
from datetime import datetime
from typing import Dict, List, Tuple
from overrides import overrides
from entities.models.model_checkpoint import ModelCheckpoint
from entities.metric import Metric
from entities.batch_representation import BatchRepresentation
from enums.metric_type import MetricType
from services.data_service import DataService
from services.arguments.arguments_service_base import ArgumentsServiceBase
class ModelBase(nn.Module):
def __init__(
self,
data_service: DataService,
arguments_service: ArgumentsServiceBase,
log_service: LogService):
super(ModelBase, self).__init__()
self._data_service = data_service
self._arguments_service = arguments_service
self._log_service = log_service
self.do_not_save: bool = False
self.metric_log_key: str = None
def forward(self, batch_representation: BatchRepresentation):
return None
def calculate_accuracies(
self,
batch: BatchRepresentation,
outputs,
output_characters=False) -> Tuple[Dict[MetricType, float], List[str]]:
return ({}, None)
def compare_metric(self, best_metric: Metric, new_metric: Metric) -> bool:
if best_metric.is_new:
return True
current_best = 0
new_result = 0
if self.metric_log_key is not None:
current_best = best_metric.get_accuracy_metric(self.metric_log_key)
new_result = new_metric.get_accuracy_metric(self.metric_log_key)
if current_best == new_result:
best_loss = best_metric.get_current_loss()
current_loss = new_metric.get_current_loss()
result = (best_loss > current_loss) or (not self._arguments_service.consider_equal_results_as_worse and best_loss == current_loss)
else:
result = current_best < new_result
return result
def clip_gradients(self):
torch.nn.utils.clip_grad_norm_(self.parameters(), max_norm=1.0)
def save(
self,
path: str,
epoch: int,
iteration: int,
best_metrics: object,
resets_left: int,
name_prefix: str = None,
save_model_dict: bool = True) -> bool:
self._log_service.log_debug(f'Saving model [epoch: {epoch} | iteration: {iteration} | resets left: {resets_left}]')
assert self._data_service is not None
assert self._arguments_service is not None
model_checkpoint = ModelCheckpoint(
model_dict=self.state_dict() if save_model_dict else {},
epoch=epoch,
iteration=iteration,
best_metrics=best_metrics,
resets_left=resets_left)
checkpoint_name = self._get_model_name(name_prefix)
saved = self._data_service.save_python_obj(
model_checkpoint, path, checkpoint_name)
return saved
def load(
self,
path: str,
name_prefix: str = None,
name_suffix: str = None,
load_model_dict: bool = True,
use_checkpoint_name: bool = True,
checkpoint_name: str = None,
overwrite_args: Dict[str, object] = None) -> ModelCheckpoint:
self._log_service.log_debug(f'Loading model [path: {path} | name_prefix: {name_prefix} | name_suffix: {name_suffix}]')
assert self._data_service is not None
assert self._arguments_service is not None
if checkpoint_name is None:
if not use_checkpoint_name:
checkpoint_name = name_prefix
else:
checkpoint_name = self._arguments_service.resume_checkpoint_name
if checkpoint_name is None:
checkpoint_name = self._get_model_name(name_prefix, name_suffix, overwrite_args)
if not self._data_service.python_obj_exists(path, checkpoint_name):
error_message = f'Model checkpoint "{checkpoint_name}" not found at "{path}"'
self._log_service.log_error(error_message)
raise FileNotFoundError(error_message)
model_checkpoint: ModelCheckpoint = self._data_service.load_python_obj(
path, checkpoint_name)
if model_checkpoint is None:
error_message = 'Model checkpoint is empty'
self._log_service.log_error(error_message)
raise Exception(error_message)
if load_model_dict:
model_dict = model_checkpoint.model_dict
for module_name, module in self.named_modules():
if isinstance(module, ModelBase):
if module.do_not_save:
for parameter_name, parameter_value in module.named_parameters():
model_dict[f'{module_name}.{parameter_name}'] = parameter_value
module.before_load()
self.load_state_dict(model_dict)
for module_name, module in self.named_modules():
if isinstance(module, ModelBase):
module.after_load()
self._log_service.log_debug(f'Loaded model dictionary successfully')
return model_checkpoint
def _get_model_name(self, name_prefix: str = None, name_suffix: str = None, overwrite_args: Dict[str, object] = None) -> str:
result = self._arguments_service.get_configuration_name(overwrite_args)
if name_prefix is not None:
result = f'{name_prefix}_{result}'
if name_suffix is not None:
result = f'{result}{name_suffix}'
return result
def on_convergence(self) -> bool:
self._log_service.log_debug(f'Model converged')
result = self._on_convergence(self)
for _, module in self.named_modules():
result = result or self._on_convergence(module)
return result
def _on_convergence(self, main_module) -> bool:
result = False
for module_name, module in main_module.named_modules():
if module_name == '':
continue
if isinstance(module, ModelBase):
result = result or module.on_convergence()
return result
@overrides
def state_dict(self, destination=None, prefix='', keep_vars=False):
if self.do_not_save:
return None
result = super().state_dict(
destination=destination,
prefix=prefix,
keep_vars=keep_vars)
return result
@property
def keep_frozen(self) -> bool:
return False
def optimizer_parameters(self):
return self.parameters()
def calculate_evaluation_metrics(self) -> Dict[str, float]:
return {}
def finalize_batch_evaluation(self, is_new_best: bool):
pass
def before_load(self):
pass
def after_load(self):
pass
def get_embeddings(self, tokens: List[str], vocab_ids: List[torch.Tensor], skip_unknown: bool = False) -> List[WordEvaluation]:
raise NotImplementedError()
|
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 4 10:38:45 2021
Advent of Code 2021 is here
My goal is to attempt all challenges before the onset of 2022
@author: Rahul Venugopal
"""
#%% --- Day 4: Giant Squid --- Part 1
#test data
number_list = ['7','4','9','5','11','17','23','2','0','14','21','24','10','16','13','6','15','25','12','22','18','20','8','19','3','26','1']
cards = []
cards.append([['22', '13', '17', '11', '0'], ['8', '2', '23', '4', '24'],['21', '9', '14', '16', '7'], ['6', '10', '3', '18', '5'], ['1', '12', '20', '15', '19']])
cards.append([['3', '15', '0', '2', '22'], ['9', '18', '13', '17', '5'],['19', '8','7','25','23'],['20', '11', '10', '24','4'],['14','21','16','12','6']])
cards.append([['14','21','17','24','4'],['10','16','15','9','19'],['18','8','23','26','20'],['22','11','13','6','5'],['2','0','12','3','7']])
# for column bingo check
number_list = ['22','8','21','6','1']
#%% Load data which is in text format
file = open('input.txt','r')
data = file.readlines()
data = [line.rstrip() for line in data]
# extract the bingo name list
number_string = data[0]
number_list = list(number_string.split(","))
# get all the bingo cards
cards = []
for card_no in range(2,601,6):
cards.append(data[card_no:card_no+5])
for card_row in range(len(cards)):
for card_col in range(5):
cards[card_row][card_col] = cards[card_row][card_col].split()
print(len(cards))
# we got 100 bingo cards
import copy
copy_of_cards = copy.deepcopy(cards)
#%% taking first number from bingo list and checking if that number is there in every card in a loop and replacing if its a hit
bingo_nos = 0
lucky_card_all = []
lucky_no_row = []
lucky_no_col = []
while bingo_nos < len(number_list):
for bingo_card in range(len(cards)):
take_a_card = cards[bingo_card]
for rows in range(5):
for columns in range(5):
if take_a_card[rows][columns] == number_list[bingo_nos]:
take_a_card[rows][columns] = 'X'
# checking if any rows has hit bingo
for looper in range(len(take_a_card)):
if all(x == take_a_card[looper][0] for x in take_a_card[looper]):
lucky_card_all.append(bingo_card)
lucky_no_row.append(bingo_nos)
# checking if any columns has hit bingo
for second_looper in range(len(take_a_card)):
column_card = []
for entries in range(len(take_a_card)):
column_card.append([item[entries][:] for item in take_a_card])
if all(y == column_card[second_looper][0] for y in column_card[second_looper]):
lucky_card_all.append(bingo_card)
lucky_no_col.append(bingo_nos)
cards[bingo_card] = take_a_card
bingo_nos += 1
# find the min of first entries of lucky_card_row/col
# that is our lucky card
lucky_card = lucky_card_all[0]
#index of number just called
index_no_just_called = min(lucky_no_row[0], lucky_no_col[0])
# find the min of first entries of lucky_no_row/col
no_just_called = number_list[index_no_just_called]
#%% verifying
# load cards again
cards = copy_of_cards
lucky_bingo_card = cards[lucky_card]
bingo_nos = 0
while bingo_nos < index_no_just_called + 1:
for rows in range(5):
for columns in range(5):
if lucky_bingo_card[rows][columns] == number_list[bingo_nos]:
lucky_bingo_card[rows][columns] = 'X'
bingo_nos += 1
# Replace substring in list of strings
lucky_bingo_card = [[item.replace('X', '0') for item in big_item] for big_item in lucky_bingo_card]
# converting all str to int
int_list = [[int(x) for x in lst] for lst in lucky_bingo_card]
# flatten the nested list
import itertools
flattened_list = list(itertools.chain(*int_list))
final_score_of_board = sum(flattened_list) * int(no_just_called)
print(final_score_of_board)
#%% --- Day 4: Giant Squid --- Part 2
# in previous part we were saving winning card no.s in lucky_card_all variable
# getting unique entries and checking the last entry should work
from collections import OrderedDict
fail_card_no = list(OrderedDict.fromkeys(lucky_card_all))[-1]
fail_card = copy_of_cards[fail_card_no]
bingo_nos = 0
trigger = 0
while trigger != 1:
for rows in range(5):
for columns in range(5):
if fail_card[rows][columns] == number_list[bingo_nos]:
fail_card[rows][columns] = 'X'
# checking if any rows has hit bingo
for looper in range(len(fail_card)):
if all(x == fail_card[looper][0] for x in fail_card[looper]):
trigger = 1
# checking if any columns has hit bingo
for second_looper in range(len(fail_card)):
column_card = []
for entries in range(len(fail_card)):
column_card.append([item[entries][:] for item in fail_card])
if all(y == column_card[second_looper][0] for y in column_card[second_looper]):
trigger = 1
bingo_nos += 1
# getting the final bingo entry
print(bingo_nos-1)
last_no_in_bingo = number_list[bingo_nos-1]
# Replace substring in list of strings
fail_bingo_card = [[item.replace('X', '0') for item in big_item] for big_item in fail_card]
# converting all str to int
int_list = [[int(x) for x in lst] for lst in fail_bingo_card]
# flatten the nested list
import itertools
flattened_list = list(itertools.chain(*int_list))
final_score_of_board_failed = sum(flattened_list) * int(last_no_in_bingo)
print(final_score_of_board_failed)
|
import rfl.core as core
import rfl.tflbridge as tflbridge
import click
import json
class ReporterContext:
def __init__(self):
self._path = None
self._model = None
@property
def path(self):
return self._path
@path.setter
def path(self, new_path: str):
self._path = new_path
@property
def model(self):
return self._model
@model.setter
def model(self, new_model: core.Model):
self._model = new_model
class JSONReporter:
def summarize_tensor(tensor: core.Tensor):
return {
'shape': [int(dim) for dim in tensor.shape],
'bitwidth': {
'value': tensor.bitwidth,
'note': 'The average number of bits used to encode each element (= bit size / element count)'
}
}
def summarize_layer(layer: core.Layer):
compute_cost_report = {
'attr': { k:str(v) for (k, v) in layer.compute_cost.attr.items() },
'expr': layer.compute_cost.expr,
'value': str(layer.compute_cost.value),
}
if layer.compute_cost.note is not None:
compute_cost_report['note'] = layer.compute_cost.note
return {
'origin': layer.origin,
'operation': layer.operation.name,
'compute_cost': compute_cost_report,
'weight': [JSONReporter.summarize_tensor(t) for t in layer.weight],
}
def report(self, ctx: ReporterContext):
path = ctx.path
model = ctx.model
W = sum([l.compute_cost.value for l in model.layers])
model_input_size_in_byte = sum([i.get_size_in_byte() for i in model.inputs])
model_output_size_in_byte = sum([o.get_size_in_byte() for o in model.outputs])
model_weight_size_in_byte = sum([l.weight.get_size_in_byte() for l in model.layers])
Qmin = sum([model_input_size_in_byte, model_output_size_in_byte, model_weight_size_in_byte])
Imax = W / Qmin
meta = {
'path': str(path),
}
summary = {
'W': { 'value': str(W), 'unit': 'OPS', 'note': 'The number of operations' },
'Qmin': {
'value': str(Qmin),
'unit': 'B',
'note': 'The minimal number of byte transfer (Input size + Output size + Weight size)'
},
'Imax': { 'value': str(Imax), 'unit': 'OPS/B', 'note': 'W / Qmin' },
}
return json.dumps({
'meta': meta,
'summary': summary,
'detail': {
'input': [JSONReporter.summarize_tensor(i) for i in model.inputs],
'output': [JSONReporter.summarize_tensor(o) for o in model.outputs],
'layer': [JSONReporter.summarize_layer(l) for l in model.layers],
}
})
@click.command()
@click.argument("path", type=click.Path(exists=True, dir_okay=False))
def cli(path):
# TODO Check file extension and select importer
core_model = tflbridge.import_file_at(path)
# TODO Select reporter with command-line option
reporter = JSONReporter()
ctx = ReporterContext()
ctx.model = core_model
ctx.path = str(path)
report = reporter.report(ctx)
print(report)
cli()
|
from .episodes import * # noqa
from .podcasts import * # noqa
from .progress import * # noqa
from .playlists import * # noqa
|
import os, sys;
sModulePath = os.path.dirname(__file__);
sys.path = [sModulePath] + [sPath for sPath in sys.path if sPath.lower() != sModulePath.lower()];
from fTestDependencies import fTestDependencies;
fTestDependencies();
try: # mDebugOutput use is Optional
import mDebugOutput as m0DebugOutput;
except ModuleNotFoundError as oException:
if oException.args[0] != "No module named 'mDebugOutput'":
raise;
m0DebugOutput = None;
guExitCodeInternalError = 1; # Use standard value;
try:
try:
from mConsole import oConsole;
except:
import sys, threading;
oConsoleLock = threading.Lock();
class oConsole(object):
@staticmethod
def fOutput(*txArguments, **dxArguments):
sOutput = "";
for x in txArguments:
if isinstance(x, str):
sOutput += x;
sPadding = dxArguments.get("sPadding");
if sPadding:
sOutput.ljust(120, sPadding);
oConsoleLock.acquire();
print(sOutput);
sys.stdout.flush();
oConsoleLock.release();
@staticmethod
def fStatus(*txArguments, **dxArguments):
pass;
import sys;
#Import the test subject
import mRegistry;
# Test registry access
print("* Testing Registry access...");sys.stdout.flush();
oTestRegistryValue = mRegistry.cRegistryValue(
sTypeName = "SZ",
xValue = "Test value",
);
oRegistryHiveKeyNamedValue = mRegistry.cRegistryHiveKeyNamedValue(
sHiveName = "HKCU",
sKeyPath = "Software\\SkyLined\\mRegistry",
sValueName = "Test value name",
);
assert oRegistryHiveKeyNamedValue.sValueName == "Test value name", \
"Unexpected registry hive key value name: %s" % oRegistryHiveKeyNamedValue.sValueName;
assert oRegistryHiveKeyNamedValue.sFullPath == r"HKEY_CURRENT_USER\Software\SkyLined\mRegistry\Test value name", \
"Unexpected registry hive key value path: %s" % oRegistryHiveKeyNamedValue.sFullPath;
oRegistryHiveKey = oRegistryHiveKeyNamedValue.oRegistryHiveKey;
assert oRegistryHiveKey.sKeyPath == "Software\\SkyLined\\mRegistry", \
"Unexpected registry hive key name: %s" % oRegistryHiveKey.sKeyPath;
assert oRegistryHiveKey.sFullPath == "HKEY_CURRENT_USER\Software\SkyLined\mRegistry", \
"Unexpected registry hive key path: %s" % oRegistryHiveKey.sFullPath;
oRegistryHive = oRegistryHiveKey.oRegistryHive;
assert oRegistryHive.sHiveName == "HKEY_CURRENT_USER", \
"Unexpected registry hive name: %s" % (oRegistryHive.sHiveName);
assert oRegistryHive.sFullPath == "HKEY_CURRENT_USER", \
"Unexpected registry hive path: %s" % (oRegistryHive.sFullPath);
assert oRegistryHive == oRegistryHiveKeyNamedValue.oRegistryHive, \
"Registry hive mismatch: %s vs %s" % (oRegistryHive, oRegistryHiveKeyNamedValue.oRegistryHive);
assert oRegistryHiveKeyNamedValue.foSet(oTestRegistryValue), \
"Could not set named registry value!";
assert oRegistryHiveKeyNamedValue.foGet() == oTestRegistryValue, \
"Could not get named registry value!";
assert oRegistryHiveKey.doValue_by_Name["Test value name"].xValue == oTestRegistryValue.xValue, \
"Unexpected registry value mismatch (%s vs %s)" % \
(oRegistryHiveKey.doValue_by_Name["Test value name"].xValue, oTestRegistryValue.xValue);
assert oRegistryHiveKey.bExists, \
"Expected %s to exist!" % oRegistryHiveKey;
o0ParentHiveKey = oRegistryHiveKey.o0ParentHiveKey;
assert o0ParentHiveKey, \
"Unexpected lack of parent hive key!";
assert o0ParentHiveKey.sFullPath == r"HKEY_CURRENT_USER\Software\SkyLined", \
"Unexpected parent hive key path: %s" % o0ParentHiveKey.sFullPath;
for oParentSubHiveKey in o0ParentHiveKey.aoSubKeys:
if oParentSubHiveKey.sFullPath == oRegistryHiveKey.sFullPath:
break;
else:
raise AssertionError("%s is missing from sub keys (%s)" % (oRegistryHiveKey, o0ParentHiveKey.aoSubKeys));
oSubKey = oRegistryHiveKey.foCreateSubKey("Test key name");
oSubKey2 = oRegistryHiveKey.foGetSubKey("Test key name");
assert oSubKey.sFullPath == oSubKey2.sFullPath, \
"Unexpected sub key path mismatch (%s vs %s)" % (oSubKey.sFullPath, oSubKey2.sFullPath);
assert oSubKey.o0ParentHiveKey, \
"Unexpected lack of parent hive key in sub key!";
assert oSubKey.o0ParentHiveKey.sFullPath == oRegistryHiveKey.sFullPath, \
"Unexpected sub key parent path mismatch (%s vs %s)" % \
(oSubKey.o0ParentHiveKey.sFullPath, oRegistryHiveKey.sFullPath);
assert oRegistryHiveKey.doSubKey_by_sName["Test key name"].sFullPath == oSubKey.sFullPath, \
"Unexpected sub key path mismatch (%s vs %s)" % \
(oRegistryHiveKey.doSubKey_by_sName["Test key name"].sFullPath, oSubKey.sFullPath);
for oNamedValue in oRegistryHiveKey.aoNamedValues:
if oNamedValue.sFullPath == oRegistryHiveKeyNamedValue.sFullPath:
break;
else:
raise AssertionError("%s is missing from named values (%s)" % (oNamedValue, oRegistryHiveKey.aoNamedValues));
assert oRegistryHiveKeyNamedValue.fbDelete(), \
"Could not delete named registry value";
assert oRegistryHiveKeyNamedValue.foGet() is None, \
"Deleting named registry value failed!";
print(oRegistryHiveKey.foSetValueForName(oRegistryHiveKeyNamedValue.sValueName, oTestRegistryValue));
print(oRegistryHiveKey.foGetValueForName(oRegistryHiveKeyNamedValue.sValueName))
print(oRegistryHiveKey.fbDeleteValueForName(oRegistryHiveKeyNamedValue.sValueName))
print(oRegistryHiveKey.sFullPath);
print("+ Done.");
except Exception as oException:
if m0DebugOutput:
m0DebugOutput.fTerminateWithException(oException, guExitCodeInternalError, bShowStacksForAllThread = True);
raise;
|
from pandas import Timestamp
# Chinese holidays are quite irregular because
# 1. some of the holidays are base on traditional Chinese calendar (lunisolar)
# 2. the government tries very hard to arrange multi-day holidays
# and is not consistent in ways of doing so.
# So instead of writing rules for them, it's much cleaner to just use
# all past holidays as ad hoc holidays.
#
# Records start from 1992. SSE was founded at the end of 1990.
all_holidays = [
Timestamp('1991-01-01'),
Timestamp('1991-02-15'),
Timestamp('1991-02-18'),
Timestamp('1991-05-01'),
Timestamp('1991-10-01'),
Timestamp('1991-10-02'),
Timestamp('1992-01-01'),
Timestamp('1992-02-04'),
Timestamp('1992-02-05'),
Timestamp('1992-02-06'),
Timestamp('1992-05-01'),
Timestamp('1992-10-01'),
Timestamp('1992-10-02'),
Timestamp('1993-01-01'),
Timestamp('1993-01-25'),
Timestamp('1993-01-26'),
Timestamp('1993-10-01'),
Timestamp('1994-02-07'),
Timestamp('1994-02-08'),
Timestamp('1994-02-09'),
Timestamp('1994-02-10'),
Timestamp('1994-02-11'),
Timestamp('1994-05-02'),
Timestamp('1994-10-03'),
Timestamp('1994-10-04'),
Timestamp('1995-01-02'),
Timestamp('1995-01-30'),
Timestamp('1995-01-31'),
Timestamp('1995-02-01'),
Timestamp('1995-02-02'),
Timestamp('1995-02-03'),
Timestamp('1995-05-01'),
Timestamp('1995-10-02'),
Timestamp('1995-10-03'),
Timestamp('1996-01-01'),
Timestamp('1996-02-19'),
Timestamp('1996-02-20'),
Timestamp('1996-02-21'),
Timestamp('1996-02-22'),
Timestamp('1996-02-23'),
Timestamp('1996-02-26'),
Timestamp('1996-02-27'),
Timestamp('1996-02-28'),
Timestamp('1996-02-29'),
Timestamp('1996-03-01'),
Timestamp('1996-05-01'),
Timestamp('1996-09-30'),
Timestamp('1996-10-01'),
Timestamp('1996-10-02'),
Timestamp('1997-01-01'),
Timestamp('1997-02-03'),
Timestamp('1997-02-04'),
Timestamp('1997-02-05'),
Timestamp('1997-02-06'),
Timestamp('1997-02-07'),
Timestamp('1997-02-10'),
Timestamp('1997-02-11'),
Timestamp('1997-02-12'),
Timestamp('1997-02-13'),
Timestamp('1997-02-14'),
Timestamp('1997-05-01'),
Timestamp('1997-05-02'),
Timestamp('1997-06-30'),
Timestamp('1997-07-01'),
Timestamp('1997-10-01'),
Timestamp('1997-10-02'),
Timestamp('1997-10-03'),
Timestamp('1998-01-01'),
Timestamp('1998-01-02'),
Timestamp('1998-01-26'),
Timestamp('1998-01-27'),
Timestamp('1998-01-28'),
Timestamp('1998-01-29'),
Timestamp('1998-01-30'),
Timestamp('1998-02-02'),
Timestamp('1998-02-03'),
Timestamp('1998-02-04'),
Timestamp('1998-02-05'),
Timestamp('1998-02-06'),
Timestamp('1998-05-01'),
Timestamp('1998-10-01'),
Timestamp('1998-10-02'),
Timestamp('1999-01-01'),
Timestamp('1999-02-10'),
Timestamp('1999-02-11'),
Timestamp('1999-02-12'),
Timestamp('1999-02-15'),
Timestamp('1999-02-16'),
Timestamp('1999-02-17'),
Timestamp('1999-02-18'),
Timestamp('1999-02-19'),
Timestamp('1999-02-22'),
Timestamp('1999-02-23'),
Timestamp('1999-02-24'),
Timestamp('1999-02-25'),
Timestamp('1999-02-26'),
Timestamp('1999-05-03'),
Timestamp('1999-10-01'),
Timestamp('1999-10-04'),
Timestamp('1999-10-05'),
Timestamp('1999-10-06'),
Timestamp('1999-10-07'),
Timestamp('1999-12-20'),
Timestamp('1999-12-31'),
Timestamp('2000-01-03'),
Timestamp('2000-01-31'),
Timestamp('2000-02-01'),
Timestamp('2000-02-02'),
Timestamp('2000-02-03'),
Timestamp('2000-02-04'),
Timestamp('2000-02-07'),
Timestamp('2000-02-08'),
Timestamp('2000-02-09'),
Timestamp('2000-02-10'),
Timestamp('2000-02-11'),
Timestamp('2000-05-01'),
Timestamp('2000-05-02'),
Timestamp('2000-05-03'),
Timestamp('2000-05-04'),
Timestamp('2000-05-05'),
Timestamp('2000-10-02'),
Timestamp('2000-10-03'),
Timestamp('2000-10-04'),
Timestamp('2000-10-05'),
Timestamp('2000-10-06'),
Timestamp('2001-01-01'),
Timestamp('2001-01-22'),
Timestamp('2001-01-23'),
Timestamp('2001-01-24'),
Timestamp('2001-01-25'),
Timestamp('2001-01-26'),
Timestamp('2001-01-29'),
Timestamp('2001-01-30'),
Timestamp('2001-01-31'),
Timestamp('2001-02-01'),
Timestamp('2001-02-02'),
Timestamp('2001-05-01'),
Timestamp('2001-05-02'),
Timestamp('2001-05-03'),
Timestamp('2001-05-04'),
Timestamp('2001-05-07'),
Timestamp('2001-10-01'),
Timestamp('2001-10-02'),
Timestamp('2001-10-03'),
Timestamp('2001-10-04'),
Timestamp('2001-10-05'),
Timestamp('2002-01-01'),
Timestamp('2002-01-02'),
Timestamp('2002-01-03'),
Timestamp('2002-02-11'),
Timestamp('2002-02-12'),
Timestamp('2002-02-13'),
Timestamp('2002-02-14'),
Timestamp('2002-02-15'),
Timestamp('2002-02-18'),
Timestamp('2002-02-19'),
Timestamp('2002-02-20'),
Timestamp('2002-02-21'),
Timestamp('2002-02-22'),
Timestamp('2002-05-01'),
Timestamp('2002-05-02'),
Timestamp('2002-05-03'),
Timestamp('2002-05-06'),
Timestamp('2002-05-07'),
Timestamp('2002-09-30'),
Timestamp('2002-10-01'),
Timestamp('2002-10-02'),
Timestamp('2002-10-03'),
Timestamp('2002-10-04'),
Timestamp('2002-10-07'),
Timestamp('2003-01-01'),
Timestamp('2003-01-30'),
Timestamp('2003-01-31'),
Timestamp('2003-02-03'),
Timestamp('2003-02-04'),
Timestamp('2003-02-05'),
Timestamp('2003-02-06'),
Timestamp('2003-02-07'),
Timestamp('2003-05-01'),
Timestamp('2003-05-02'),
Timestamp('2003-05-05'),
Timestamp('2003-05-06'),
Timestamp('2003-05-07'),
Timestamp('2003-05-08'),
Timestamp('2003-05-09'),
Timestamp('2003-10-01'),
Timestamp('2003-10-02'),
Timestamp('2003-10-03'),
Timestamp('2003-10-06'),
Timestamp('2003-10-07'),
Timestamp('2004-01-01'),
Timestamp('2004-01-19'),
Timestamp('2004-01-20'),
Timestamp('2004-01-21'),
Timestamp('2004-01-22'),
Timestamp('2004-01-23'),
Timestamp('2004-01-26'),
Timestamp('2004-01-27'),
Timestamp('2004-01-28'),
Timestamp('2004-05-03'),
Timestamp('2004-05-04'),
Timestamp('2004-05-05'),
Timestamp('2004-05-06'),
Timestamp('2004-05-07'),
Timestamp('2004-10-01'),
Timestamp('2004-10-04'),
Timestamp('2004-10-05'),
Timestamp('2004-10-06'),
Timestamp('2004-10-07'),
Timestamp('2005-01-03'),
Timestamp('2005-02-07'),
Timestamp('2005-02-08'),
Timestamp('2005-02-09'),
Timestamp('2005-02-10'),
Timestamp('2005-02-11'),
Timestamp('2005-02-14'),
Timestamp('2005-02-15'),
Timestamp('2005-05-02'),
Timestamp('2005-05-03'),
Timestamp('2005-05-04'),
Timestamp('2005-05-05'),
Timestamp('2005-05-06'),
Timestamp('2005-10-03'),
Timestamp('2005-10-04'),
Timestamp('2005-10-05'),
Timestamp('2005-10-06'),
Timestamp('2005-10-07'),
Timestamp('2006-01-02'),
Timestamp('2006-01-03'),
Timestamp('2006-01-26'),
Timestamp('2006-01-27'),
Timestamp('2006-01-30'),
Timestamp('2006-01-31'),
Timestamp('2006-02-01'),
Timestamp('2006-02-02'),
Timestamp('2006-02-03'),
Timestamp('2006-05-01'),
Timestamp('2006-05-02'),
Timestamp('2006-05-03'),
Timestamp('2006-05-04'),
Timestamp('2006-05-05'),
Timestamp('2006-10-02'),
Timestamp('2006-10-03'),
Timestamp('2006-10-04'),
Timestamp('2006-10-05'),
Timestamp('2006-10-06'),
Timestamp('2007-01-01'),
Timestamp('2007-01-02'),
Timestamp('2007-01-03'),
Timestamp('2007-02-19'),
Timestamp('2007-02-20'),
Timestamp('2007-02-21'),
Timestamp('2007-02-22'),
Timestamp('2007-02-23'),
Timestamp('2007-05-01'),
Timestamp('2007-05-02'),
Timestamp('2007-05-03'),
Timestamp('2007-05-04'),
Timestamp('2007-05-07'),
Timestamp('2007-10-01'),
Timestamp('2007-10-02'),
Timestamp('2007-10-03'),
Timestamp('2007-10-04'),
Timestamp('2007-10-05'),
Timestamp('2007-12-31'),
Timestamp('2008-01-01'),
Timestamp('2008-02-06'),
Timestamp('2008-02-07'),
Timestamp('2008-02-08'),
Timestamp('2008-02-11'),
Timestamp('2008-02-12'),
Timestamp('2008-04-04'),
Timestamp('2008-05-01'),
Timestamp('2008-05-02'),
Timestamp('2008-06-09'),
Timestamp('2008-09-15'),
Timestamp('2008-09-29'),
Timestamp('2008-09-30'),
Timestamp('2008-10-01'),
Timestamp('2008-10-02'),
Timestamp('2008-10-03'),
Timestamp('2009-01-01'),
Timestamp('2009-01-02'),
Timestamp('2009-01-26'),
Timestamp('2009-01-27'),
Timestamp('2009-01-28'),
Timestamp('2009-01-29'),
Timestamp('2009-01-30'),
Timestamp('2009-04-06'),
Timestamp('2009-05-01'),
Timestamp('2009-05-28'),
Timestamp('2009-05-29'),
Timestamp('2009-10-01'),
Timestamp('2009-10-02'),
Timestamp('2009-10-05'),
Timestamp('2009-10-06'),
Timestamp('2009-10-07'),
Timestamp('2009-10-08'),
Timestamp('2010-01-01'),
Timestamp('2010-02-15'),
Timestamp('2010-02-16'),
Timestamp('2010-02-17'),
Timestamp('2010-02-18'),
Timestamp('2010-02-19'),
Timestamp('2010-04-05'),
Timestamp('2010-05-03'),
Timestamp('2010-06-14'),
Timestamp('2010-06-15'),
Timestamp('2010-06-16'),
Timestamp('2010-09-22'),
Timestamp('2010-09-23'),
Timestamp('2010-09-24'),
Timestamp('2010-10-01'),
Timestamp('2010-10-04'),
Timestamp('2010-10-05'),
Timestamp('2010-10-06'),
Timestamp('2010-10-07'),
Timestamp('2011-01-03'),
Timestamp('2011-02-02'),
Timestamp('2011-02-03'),
Timestamp('2011-02-04'),
Timestamp('2011-02-07'),
Timestamp('2011-02-08'),
Timestamp('2011-04-04'),
Timestamp('2011-04-05'),
Timestamp('2011-05-02'),
Timestamp('2011-06-06'),
Timestamp('2011-09-12'),
Timestamp('2011-10-03'),
Timestamp('2011-10-04'),
Timestamp('2011-10-05'),
Timestamp('2011-10-06'),
Timestamp('2011-10-07'),
Timestamp('2012-01-02'),
Timestamp('2012-01-03'),
Timestamp('2012-01-23'),
Timestamp('2012-01-24'),
Timestamp('2012-01-25'),
Timestamp('2012-01-26'),
Timestamp('2012-01-27'),
Timestamp('2012-04-02'),
Timestamp('2012-04-03'),
Timestamp('2012-04-04'),
Timestamp('2012-04-30'),
Timestamp('2012-05-01'),
Timestamp('2012-06-22'),
Timestamp('2012-10-01'),
Timestamp('2012-10-02'),
Timestamp('2012-10-03'),
Timestamp('2012-10-04'),
Timestamp('2012-10-05'),
Timestamp('2013-01-01'),
Timestamp('2013-01-02'),
Timestamp('2013-01-03'),
Timestamp('2013-02-11'),
Timestamp('2013-02-12'),
Timestamp('2013-02-13'),
Timestamp('2013-02-14'),
Timestamp('2013-02-15'),
Timestamp('2013-04-04'),
Timestamp('2013-04-05'),
Timestamp('2013-04-29'),
Timestamp('2013-04-30'),
Timestamp('2013-05-01'),
Timestamp('2013-06-10'),
Timestamp('2013-06-11'),
Timestamp('2013-06-12'),
Timestamp('2013-09-19'),
Timestamp('2013-09-20'),
Timestamp('2013-10-01'),
Timestamp('2013-10-02'),
Timestamp('2013-10-03'),
Timestamp('2013-10-04'),
Timestamp('2013-10-07'),
Timestamp('2014-01-01'),
Timestamp('2014-01-31'),
Timestamp('2014-02-03'),
Timestamp('2014-02-04'),
Timestamp('2014-02-05'),
Timestamp('2014-02-06'),
Timestamp('2014-04-07'),
Timestamp('2014-05-01'),
Timestamp('2014-05-02'),
Timestamp('2014-06-02'),
Timestamp('2014-09-08'),
Timestamp('2014-10-01'),
Timestamp('2014-10-02'),
Timestamp('2014-10-03'),
Timestamp('2014-10-06'),
Timestamp('2014-10-07'),
Timestamp('2015-01-01'),
Timestamp('2015-01-02'),
Timestamp('2015-02-18'),
Timestamp('2015-02-19'),
Timestamp('2015-02-20'),
Timestamp('2015-02-23'),
Timestamp('2015-02-24'),
Timestamp('2015-04-06'),
Timestamp('2015-05-01'),
Timestamp('2015-09-03'),
Timestamp('2015-09-04'),
Timestamp('2015-10-01'),
Timestamp('2015-10-02'),
Timestamp('2015-10-05'),
Timestamp('2015-10-06'),
Timestamp('2015-10-07'),
Timestamp('2016-01-01'),
Timestamp('2016-02-08'),
Timestamp('2016-02-09'),
Timestamp('2016-02-10'),
Timestamp('2016-02-11'),
Timestamp('2016-02-12'),
Timestamp('2016-04-04'),
Timestamp('2016-05-02'),
Timestamp('2016-06-09'),
Timestamp('2016-06-10'),
Timestamp('2016-09-15'),
Timestamp('2016-09-16'),
Timestamp('2016-10-03'),
Timestamp('2016-10-04'),
Timestamp('2016-10-05'),
Timestamp('2016-10-06'),
Timestamp('2016-10-07'),
Timestamp('2017-01-02'),
Timestamp('2017-01-27'),
Timestamp('2017-01-30'),
Timestamp('2017-01-31'),
Timestamp('2017-02-01'),
Timestamp('2017-02-02'),
Timestamp('2017-04-03'),
Timestamp('2017-04-04'),
Timestamp('2017-05-01'),
Timestamp('2017-05-29'),
Timestamp('2017-05-30'),
Timestamp('2017-10-02'),
Timestamp('2017-10-03'),
Timestamp('2017-10-04'),
Timestamp('2017-10-05'),
Timestamp('2017-10-06'),
Timestamp('2018-01-01'),
Timestamp('2018-02-15'),
Timestamp('2018-02-16'),
Timestamp('2018-02-19'),
Timestamp('2018-02-20'),
Timestamp('2018-02-21'),
Timestamp('2018-04-05'),
Timestamp('2018-04-06'),
Timestamp('2018-04-30'),
Timestamp('2018-05-01'),
Timestamp('2018-06-18'),
Timestamp('2018-09-24'),
Timestamp('2018-10-01'),
Timestamp('2018-10-02'),
Timestamp('2018-10-03'),
Timestamp('2018-10-04'),
Timestamp('2018-10-05'),
Timestamp('2018-12-30'),
Timestamp('2018-12-31'),
Timestamp('2019-01-01'),
Timestamp('2019-02-04'),
Timestamp('2019-02-05'),
Timestamp('2019-02-06'),
Timestamp('2019-02-07'),
Timestamp('2019-02-08'),
Timestamp('2019-02-09'),
Timestamp('2019-02-10'),
Timestamp('2019-04-05'),
Timestamp('2019-05-01'),
Timestamp('2019-05-02'),
Timestamp('2019-05-03'),
Timestamp('2019-06-07'),
Timestamp('2019-09-13'),
Timestamp('2019-10-01'),
Timestamp('2019-10-02'),
Timestamp('2019-10-03'),
Timestamp('2019-10-04'),
Timestamp('2019-10-05'),
Timestamp('2019-10-06'),
Timestamp('2019-10-07'),
]
# The following holidays are based on Solar terms or Chinese lunisolar calendar,
# so pre-calculated mappings to Gregorian calendar are kept here from 2019-2099
# Spring Festival
sf_mapping = {
1960: Timestamp('1960-01-28'),
1961: Timestamp('1961-02-15'),
1962: Timestamp('1962-02-05'),
1963: Timestamp('1963-01-25'),
1964: Timestamp('1964-02-13'),
1965: Timestamp('1965-02-02'),
1966: Timestamp('1966-01-21'),
1967: Timestamp('1967-02-09'),
1968: Timestamp('1968-01-30'),
1969: Timestamp('1969-02-17'),
1970: Timestamp('1970-02-06'),
1971: Timestamp('1971-01-27'),
1972: Timestamp('1972-02-15'),
1973: Timestamp('1973-02-03'),
1974: Timestamp('1974-01-23'),
1975: Timestamp('1975-02-11'),
1976: Timestamp('1976-01-31'),
1977: Timestamp('1977-02-18'),
1978: Timestamp('1978-02-07'),
1979: Timestamp('1979-01-28'),
1980: Timestamp('1980-02-16'),
1981: Timestamp('1981-02-05'),
1982: Timestamp('1982-01-25'),
1983: Timestamp('1983-02-13'),
1984: Timestamp('1984-02-02'),
1985: Timestamp('1985-02-20'),
1986: Timestamp('1986-02-09'),
1987: Timestamp('1987-01-29'),
1988: Timestamp('1988-02-17'),
1989: Timestamp('1989-02-06'),
1990: Timestamp('1990-01-27'),
1991: Timestamp('1991-02-15'),
1992: Timestamp('1992-02-04'),
1993: Timestamp('1993-01-23'),
1994: Timestamp('1994-02-10'),
1995: Timestamp('1995-01-31'),
1996: Timestamp('1996-02-19'),
1997: Timestamp('1997-02-07'),
1998: Timestamp('1998-01-28'),
1999: Timestamp('1999-02-16'),
2000: Timestamp('2000-02-05'),
2001: Timestamp('2001-01-24'),
2002: Timestamp('2002-02-12'),
2003: Timestamp('2003-02-01'),
2004: Timestamp('2004-01-22'),
2005: Timestamp('2005-02-09'),
2006: Timestamp('2006-01-29'),
2007: Timestamp('2007-02-18'),
2008: Timestamp('2008-02-07'),
2009: Timestamp('2009-01-26'),
2010: Timestamp('2010-02-14'),
2011: Timestamp('2011-02-03'),
2012: Timestamp('2012-01-23'),
2013: Timestamp('2013-02-10'),
2014: Timestamp('2014-01-31'),
2015: Timestamp('2015-02-19'),
2016: Timestamp('2016-02-08'),
2017: Timestamp('2017-01-28'),
2018: Timestamp('2018-02-16'),
2019: Timestamp('2019-02-05'),
2020: Timestamp('2020-01-25'),
2021: Timestamp('2021-02-12'),
2022: Timestamp('2022-02-01'),
2023: Timestamp('2023-01-22'),
2024: Timestamp('2024-02-10'),
2025: Timestamp('2025-01-29'),
2026: Timestamp('2026-02-17'),
2027: Timestamp('2027-02-06'),
2028: Timestamp('2028-01-26'),
2029: Timestamp('2029-02-13'),
2030: Timestamp('2030-02-03'),
2031: Timestamp('2031-01-23'),
2032: Timestamp('2032-02-11'),
2033: Timestamp('2033-01-31'),
2034: Timestamp('2034-02-19'),
2035: Timestamp('2035-02-08'),
2036: Timestamp('2036-01-28'),
2037: Timestamp('2037-02-15'),
2038: Timestamp('2038-02-04'),
2039: Timestamp('2039-01-24'),
2040: Timestamp('2040-02-12'),
2041: Timestamp('2041-02-01'),
2042: Timestamp('2042-01-22'),
2043: Timestamp('2043-02-10'),
2044: Timestamp('2044-01-30'),
2045: Timestamp('2045-02-17'),
2046: Timestamp('2046-02-06'),
2047: Timestamp('2047-01-26'),
2048: Timestamp('2048-02-14'),
2049: Timestamp('2049-02-02'),
2050: Timestamp('2050-01-23'),
2051: Timestamp('2051-02-11'),
2052: Timestamp('2052-02-01'),
2053: Timestamp('2053-02-19'),
2054: Timestamp('2054-02-08'),
2055: Timestamp('2055-01-28'),
2056: Timestamp('2056-02-15'),
2057: Timestamp('2057-02-04'),
2058: Timestamp('2058-01-24'),
2059: Timestamp('2059-02-12'),
2060: Timestamp('2060-02-02'),
2061: Timestamp('2061-01-21'),
2062: Timestamp('2062-02-09'),
2063: Timestamp('2063-01-29'),
2064: Timestamp('2064-02-17'),
2065: Timestamp('2065-02-05'),
2066: Timestamp('2066-01-26'),
2067: Timestamp('2067-02-14'),
2068: Timestamp('2068-02-03'),
2069: Timestamp('2069-01-23'),
2070: Timestamp('2070-02-11'),
2071: Timestamp('2071-01-31'),
2072: Timestamp('2072-02-19'),
2073: Timestamp('2073-02-07'),
2074: Timestamp('2074-01-27'),
2075: Timestamp('2075-02-15'),
2076: Timestamp('2076-02-05'),
2077: Timestamp('2077-01-24'),
2078: Timestamp('2078-02-12'),
2079: Timestamp('2079-02-02'),
2080: Timestamp('2080-01-22'),
2081: Timestamp('2081-02-09'),
2082: Timestamp('2082-01-29'),
2083: Timestamp('2083-02-17'),
2084: Timestamp('2084-02-06'),
2085: Timestamp('2085-01-26'),
2086: Timestamp('2086-02-14'),
2087: Timestamp('2087-02-03'),
2088: Timestamp('2088-01-24'),
2089: Timestamp('2089-02-10'),
2090: Timestamp('2090-01-30'),
2091: Timestamp('2091-02-18'),
2092: Timestamp('2092-02-07'),
2093: Timestamp('2093-01-27'),
2094: Timestamp('2094-02-15'),
2095: Timestamp('2095-02-05'),
2096: Timestamp('2096-01-25'),
2097: Timestamp('2097-02-12'),
2098: Timestamp('2098-02-01'),
2099: Timestamp('2099-01-21'),
}
# Tomb-sweeping Day, "Qingming"
tsd_mapping = {
1960: Timestamp('1960-04-05'),
1961: Timestamp('1961-04-05'),
1962: Timestamp('1962-04-05'),
1963: Timestamp('1963-04-05'),
1964: Timestamp('1964-04-05'),
1965: Timestamp('1965-04-05'),
1966: Timestamp('1966-04-05'),
1967: Timestamp('1967-04-05'),
1968: Timestamp('1968-04-05'),
1969: Timestamp('1969-04-05'),
1970: Timestamp('1970-04-05'),
1971: Timestamp('1971-04-05'),
1972: Timestamp('1972-04-05'),
1973: Timestamp('1973-04-05'),
1974: Timestamp('1974-04-05'),
1975: Timestamp('1975-04-05'),
1976: Timestamp('1976-04-04'),
1977: Timestamp('1977-04-05'),
1978: Timestamp('1978-04-05'),
1979: Timestamp('1979-04-05'),
1980: Timestamp('1980-04-04'),
1981: Timestamp('1981-04-05'),
1982: Timestamp('1982-04-05'),
1983: Timestamp('1983-04-05'),
1984: Timestamp('1984-04-04'),
1985: Timestamp('1985-04-05'),
1986: Timestamp('1986-04-05'),
1987: Timestamp('1987-04-05'),
1988: Timestamp('1988-04-04'),
1989: Timestamp('1989-04-05'),
1990: Timestamp('1990-04-05'),
1991: Timestamp('1991-04-05'),
1992: Timestamp('1992-04-04'),
1993: Timestamp('1993-04-05'),
1994: Timestamp('1994-04-05'),
1995: Timestamp('1995-04-05'),
1996: Timestamp('1996-04-04'),
1997: Timestamp('1997-04-05'),
1998: Timestamp('1998-04-05'),
1999: Timestamp('1999-04-05'),
2000: Timestamp('2000-04-04'),
2001: Timestamp('2001-04-05'),
2002: Timestamp('2002-04-05'),
2003: Timestamp('2003-04-05'),
2004: Timestamp('2004-04-04'),
2005: Timestamp('2005-04-05'),
2006: Timestamp('2006-04-05'),
2007: Timestamp('2007-04-05'),
2008: Timestamp('2008-04-04'),
2009: Timestamp('2009-04-04'),
2010: Timestamp('2010-04-05'),
2011: Timestamp('2011-04-05'),
2012: Timestamp('2012-04-04'),
2013: Timestamp('2013-04-04'),
2014: Timestamp('2014-04-05'),
2015: Timestamp('2015-04-05'),
2016: Timestamp('2016-04-04'),
2017: Timestamp('2017-04-04'),
2018: Timestamp('2018-04-05'),
2019: Timestamp('2019-04-05'),
2020: Timestamp('2020-04-04'),
2021: Timestamp('2021-04-04'),
2022: Timestamp('2022-04-05'),
2023: Timestamp('2023-04-05'),
2024: Timestamp('2024-04-04'),
2025: Timestamp('2025-04-04'),
2026: Timestamp('2026-04-05'),
2027: Timestamp('2027-04-05'),
2028: Timestamp('2028-04-04'),
2029: Timestamp('2029-04-04'),
2030: Timestamp('2030-04-05'),
2031: Timestamp('2031-04-05'),
2032: Timestamp('2032-04-04'),
2033: Timestamp('2033-04-04'),
2034: Timestamp('2034-04-05'),
2035: Timestamp('2035-04-05'),
2036: Timestamp('2036-04-04'),
2037: Timestamp('2037-04-04'),
2038: Timestamp('2038-04-05'),
2039: Timestamp('2039-04-05'),
2040: Timestamp('2040-04-04'),
2041: Timestamp('2041-04-04'),
2042: Timestamp('2042-04-04'),
2043: Timestamp('2043-04-05'),
2044: Timestamp('2044-04-04'),
2045: Timestamp('2045-04-04'),
2046: Timestamp('2046-04-04'),
2047: Timestamp('2047-04-05'),
2048: Timestamp('2048-04-04'),
2049: Timestamp('2049-04-04'),
2050: Timestamp('2050-04-04'),
2051: Timestamp('2051-04-05'),
2052: Timestamp('2052-04-04'),
2053: Timestamp('2053-04-04'),
2054: Timestamp('2054-04-04'),
2055: Timestamp('2055-04-05'),
2056: Timestamp('2056-04-04'),
2057: Timestamp('2057-04-04'),
2058: Timestamp('2058-04-04'),
2059: Timestamp('2059-04-05'),
2060: Timestamp('2060-04-04'),
2061: Timestamp('2061-04-04'),
2062: Timestamp('2062-04-04'),
2063: Timestamp('2063-04-05'),
2064: Timestamp('2064-04-04'),
2065: Timestamp('2065-04-04'),
2066: Timestamp('2066-04-04'),
2067: Timestamp('2067-04-05'),
2068: Timestamp('2068-04-04'),
2069: Timestamp('2069-04-04'),
2070: Timestamp('2070-04-04'),
2071: Timestamp('2071-04-05'),
2072: Timestamp('2072-04-04'),
2073: Timestamp('2073-04-04'),
2074: Timestamp('2074-04-04'),
2075: Timestamp('2075-04-04'),
2076: Timestamp('2076-04-04'),
2077: Timestamp('2077-04-04'),
2078: Timestamp('2078-04-04'),
2079: Timestamp('2079-04-04'),
2080: Timestamp('2080-04-04'),
2081: Timestamp('2081-04-04'),
2082: Timestamp('2082-04-04'),
2083: Timestamp('2083-04-04'),
2084: Timestamp('2084-04-04'),
2085: Timestamp('2085-04-04'),
2086: Timestamp('2086-04-04'),
2087: Timestamp('2087-04-04'),
2088: Timestamp('2088-04-04'),
2089: Timestamp('2089-04-04'),
2090: Timestamp('2090-04-04'),
2091: Timestamp('2091-04-04'),
2092: Timestamp('2092-04-04'),
2093: Timestamp('2093-04-04'),
2094: Timestamp('2094-04-04'),
2095: Timestamp('2095-04-04'),
2096: Timestamp('2096-04-04'),
2097: Timestamp('2097-04-04'),
2098: Timestamp('2098-04-04'),
2099: Timestamp('2099-04-04'),
}
# Dragon Boat Festival, "Duanwu"
dbf_mapping = {
1960: Timestamp('1960-05-29'),
1961: Timestamp('1961-06-17'),
1962: Timestamp('1962-06-06'),
1963: Timestamp('1963-06-25'),
1964: Timestamp('1964-06-14'),
1965: Timestamp('1965-06-04'),
1966: Timestamp('1966-06-23'),
1967: Timestamp('1967-06-12'),
1968: Timestamp('1968-05-31'),
1969: Timestamp('1969-06-19'),
1970: Timestamp('1970-06-08'),
1971: Timestamp('1971-05-28'),
1972: Timestamp('1972-06-15'),
1973: Timestamp('1973-06-05'),
1974: Timestamp('1974-06-24'),
1975: Timestamp('1975-06-14'),
1976: Timestamp('1976-06-02'),
1977: Timestamp('1977-06-21'),
1978: Timestamp('1978-06-10'),
1979: Timestamp('1979-05-30'),
1980: Timestamp('1980-06-17'),
1981: Timestamp('1981-06-06'),
1982: Timestamp('1982-06-25'),
1983: Timestamp('1983-06-15'),
1984: Timestamp('1984-06-04'),
1985: Timestamp('1985-06-22'),
1986: Timestamp('1986-06-11'),
1987: Timestamp('1987-05-31'),
1988: Timestamp('1988-06-18'),
1989: Timestamp('1989-06-08'),
1990: Timestamp('1990-05-28'),
1991: Timestamp('1991-06-16'),
1992: Timestamp('1992-06-05'),
1993: Timestamp('1993-06-24'),
1994: Timestamp('1994-06-13'),
1995: Timestamp('1995-06-02'),
1996: Timestamp('1996-06-20'),
1997: Timestamp('1997-06-09'),
1998: Timestamp('1998-05-30'),
1999: Timestamp('1999-06-18'),
2000: Timestamp('2000-06-06'),
2001: Timestamp('2001-06-25'),
2002: Timestamp('2002-06-15'),
2003: Timestamp('2003-06-04'),
2004: Timestamp('2004-06-22'),
2005: Timestamp('2005-06-11'),
2006: Timestamp('2006-05-31'),
2007: Timestamp('2007-06-19'),
2008: Timestamp('2008-06-08'),
2009: Timestamp('2009-05-28'),
2010: Timestamp('2010-06-16'),
2011: Timestamp('2011-06-06'),
2012: Timestamp('2012-06-23'),
2013: Timestamp('2013-06-12'),
2014: Timestamp('2014-06-02'),
2015: Timestamp('2015-06-20'),
2016: Timestamp('2016-06-09'),
2017: Timestamp('2017-05-30'),
2018: Timestamp('2018-06-18'),
2019: Timestamp('2019-06-07'),
2020: Timestamp('2020-06-25'),
2021: Timestamp('2021-06-14'),
2022: Timestamp('2022-06-03'),
2023: Timestamp('2023-06-22'),
2024: Timestamp('2024-06-10'),
2025: Timestamp('2025-05-31'),
2026: Timestamp('2026-06-19'),
2027: Timestamp('2027-06-09'),
2028: Timestamp('2028-05-28'),
2029: Timestamp('2029-06-16'),
2030: Timestamp('2030-06-05'),
2031: Timestamp('2031-06-24'),
2032: Timestamp('2032-06-12'),
2033: Timestamp('2033-06-01'),
2034: Timestamp('2034-06-20'),
2035: Timestamp('2035-06-10'),
2036: Timestamp('2036-05-30'),
2037: Timestamp('2037-06-18'),
2038: Timestamp('2038-06-07'),
2039: Timestamp('2039-05-27'),
2040: Timestamp('2040-06-14'),
2041: Timestamp('2041-06-03'),
2042: Timestamp('2042-06-22'),
2043: Timestamp('2043-06-11'),
2044: Timestamp('2044-05-31'),
2045: Timestamp('2045-06-19'),
2046: Timestamp('2046-06-08'),
2047: Timestamp('2047-05-29'),
2048: Timestamp('2048-06-15'),
2049: Timestamp('2049-06-04'),
2050: Timestamp('2050-06-23'),
2051: Timestamp('2051-06-13'),
2052: Timestamp('2052-06-01'),
2053: Timestamp('2053-06-20'),
2054: Timestamp('2054-06-10'),
2055: Timestamp('2055-05-30'),
2056: Timestamp('2056-06-17'),
2057: Timestamp('2057-06-06'),
2058: Timestamp('2058-06-25'),
2059: Timestamp('2059-06-14'),
2060: Timestamp('2060-06-03'),
2061: Timestamp('2061-06-22'),
2062: Timestamp('2062-06-11'),
2063: Timestamp('2063-06-01'),
2064: Timestamp('2064-06-19'),
2065: Timestamp('2065-06-08'),
2066: Timestamp('2066-05-28'),
2067: Timestamp('2067-06-16'),
2068: Timestamp('2068-06-04'),
2069: Timestamp('2069-06-23'),
2070: Timestamp('2070-06-13'),
2071: Timestamp('2071-06-02'),
2072: Timestamp('2072-06-20'),
2073: Timestamp('2073-06-10'),
2074: Timestamp('2074-05-30'),
2075: Timestamp('2075-06-17'),
2076: Timestamp('2076-06-06'),
2077: Timestamp('2077-06-24'),
2078: Timestamp('2078-06-14'),
2079: Timestamp('2079-06-04'),
2080: Timestamp('2080-06-22'),
2081: Timestamp('2081-06-11'),
2082: Timestamp('2082-06-01'),
2083: Timestamp('2083-06-19'),
2084: Timestamp('2084-06-07'),
2085: Timestamp('2085-05-27'),
2086: Timestamp('2086-06-15'),
2087: Timestamp('2087-06-05'),
2088: Timestamp('2088-06-23'),
2089: Timestamp('2089-06-13'),
2090: Timestamp('2090-06-02'),
2091: Timestamp('2091-06-21'),
2092: Timestamp('2092-06-09'),
2093: Timestamp('2093-05-29'),
2094: Timestamp('2094-06-17'),
2095: Timestamp('2095-06-06'),
2096: Timestamp('2096-06-24'),
2097: Timestamp('2097-06-14'),
2098: Timestamp('2098-06-04'),
2099: Timestamp('2099-06-23'),
}
# Mid-autumn Festival, "Zhongqiu"
maf_mapping = {
1960: Timestamp('1960-10-05'),
1961: Timestamp('1961-09-24'),
1962: Timestamp('1962-09-13'),
1963: Timestamp('1963-10-02'),
1964: Timestamp('1964-09-20'),
1965: Timestamp('1965-09-10'),
1966: Timestamp('1966-09-29'),
1967: Timestamp('1967-09-18'),
1968: Timestamp('1968-10-06'),
1969: Timestamp('1969-09-26'),
1970: Timestamp('1970-09-15'),
1971: Timestamp('1971-10-03'),
1972: Timestamp('1972-09-22'),
1973: Timestamp('1973-09-11'),
1974: Timestamp('1974-09-30'),
1975: Timestamp('1975-09-20'),
1976: Timestamp('1976-09-08'),
1977: Timestamp('1977-09-27'),
1978: Timestamp('1978-09-17'),
1979: Timestamp('1979-10-05'),
1980: Timestamp('1980-09-23'),
1981: Timestamp('1981-09-12'),
1982: Timestamp('1982-10-01'),
1983: Timestamp('1983-09-21'),
1984: Timestamp('1984-09-10'),
1985: Timestamp('1985-09-29'),
1986: Timestamp('1986-09-18'),
1987: Timestamp('1987-10-07'),
1988: Timestamp('1988-09-25'),
1989: Timestamp('1989-09-14'),
1990: Timestamp('1990-10-03'),
1991: Timestamp('1991-09-22'),
1992: Timestamp('1992-09-11'),
1993: Timestamp('1993-09-30'),
1994: Timestamp('1994-09-20'),
1995: Timestamp('1995-09-09'),
1996: Timestamp('1996-09-27'),
1997: Timestamp('1997-09-16'),
1998: Timestamp('1998-10-05'),
1999: Timestamp('1999-09-24'),
2000: Timestamp('2000-09-12'),
2001: Timestamp('2001-10-01'),
2002: Timestamp('2002-09-21'),
2003: Timestamp('2003-09-11'),
2004: Timestamp('2004-09-28'),
2005: Timestamp('2005-09-18'),
2006: Timestamp('2006-10-06'),
2007: Timestamp('2007-09-25'),
2008: Timestamp('2008-09-14'),
2009: Timestamp('2009-10-03'),
2010: Timestamp('2010-09-22'),
2011: Timestamp('2011-09-12'),
2012: Timestamp('2012-09-30'),
2013: Timestamp('2013-09-19'),
2014: Timestamp('2014-09-08'),
2015: Timestamp('2015-09-27'),
2016: Timestamp('2016-09-15'),
2017: Timestamp('2017-10-04'),
2018: Timestamp('2018-09-24'),
2019: Timestamp('2019-09-13'),
2020: Timestamp('2020-10-01'),
2021: Timestamp('2021-09-21'),
2022: Timestamp('2022-09-10'),
2023: Timestamp('2023-09-29'),
2024: Timestamp('2024-09-17'),
2025: Timestamp('2025-10-06'),
2026: Timestamp('2026-09-25'),
2027: Timestamp('2027-09-15'),
2028: Timestamp('2028-10-03'),
2029: Timestamp('2029-09-22'),
2030: Timestamp('2030-09-12'),
2031: Timestamp('2031-10-01'),
2032: Timestamp('2032-09-19'),
2033: Timestamp('2033-09-08'),
2034: Timestamp('2034-09-27'),
2035: Timestamp('2035-09-16'),
2036: Timestamp('2036-10-04'),
2037: Timestamp('2037-09-24'),
2038: Timestamp('2038-09-13'),
2039: Timestamp('2039-10-02'),
2040: Timestamp('2040-09-20'),
2041: Timestamp('2041-09-10'),
2042: Timestamp('2042-09-28'),
2043: Timestamp('2043-09-17'),
2044: Timestamp('2044-10-05'),
2045: Timestamp('2045-09-25'),
2046: Timestamp('2046-09-15'),
2047: Timestamp('2047-10-04'),
2048: Timestamp('2048-09-22'),
2049: Timestamp('2049-09-11'),
2050: Timestamp('2050-09-30'),
2051: Timestamp('2051-09-19'),
2052: Timestamp('2052-09-07'),
2053: Timestamp('2053-09-26'),
2054: Timestamp('2054-09-16'),
2055: Timestamp('2055-10-05'),
2056: Timestamp('2056-09-24'),
2057: Timestamp('2057-09-13'),
2058: Timestamp('2058-10-02'),
2059: Timestamp('2059-09-21'),
2060: Timestamp('2060-09-09'),
2061: Timestamp('2061-09-28'),
2062: Timestamp('2062-09-17'),
2063: Timestamp('2063-10-06'),
2064: Timestamp('2064-09-25'),
2065: Timestamp('2065-09-15'),
2066: Timestamp('2066-10-03'),
2067: Timestamp('2067-09-23'),
2068: Timestamp('2068-09-11'),
2069: Timestamp('2069-09-29'),
2070: Timestamp('2070-09-19'),
2071: Timestamp('2071-09-08'),
2072: Timestamp('2072-09-26'),
2073: Timestamp('2073-09-16'),
2074: Timestamp('2074-10-05'),
2075: Timestamp('2075-09-24'),
2076: Timestamp('2076-09-12'),
2077: Timestamp('2077-10-01'),
2078: Timestamp('2078-09-20'),
2079: Timestamp('2079-09-10'),
2080: Timestamp('2080-09-28'),
2081: Timestamp('2081-09-17'),
2082: Timestamp('2082-10-06'),
2083: Timestamp('2083-09-26'),
2084: Timestamp('2084-09-14'),
2085: Timestamp('2085-10-03'),
2086: Timestamp('2086-09-22'),
2087: Timestamp('2087-09-11'),
2088: Timestamp('2088-09-29'),
2089: Timestamp('2089-09-18'),
2090: Timestamp('2090-09-08'),
2091: Timestamp('2091-09-27'),
2092: Timestamp('2092-09-16'),
2093: Timestamp('2093-10-05'),
2094: Timestamp('2094-09-24'),
2095: Timestamp('2095-09-13'),
2096: Timestamp('2096-09-30'),
2097: Timestamp('2097-09-20'),
2098: Timestamp('2098-09-09'),
2099: Timestamp('2099-09-29'),
}
# Buddha Shakyamuni day
bsd_mapping = {
1959: Timestamp('1959-5-15'),
1960: Timestamp('1960-5-3'),
1961: Timestamp('1961-5-22'),
1962: Timestamp('1962-5-11'),
1963: Timestamp('1963-5-1'),
1964: Timestamp('1964-5-19'),
1965: Timestamp('1965-5-8'),
1966: Timestamp('1966-5-27'),
1967: Timestamp('1967-5-16'),
1968: Timestamp('1968-5-4'),
1969: Timestamp('1969-5-23'),
1970: Timestamp('1970-5-12'),
1971: Timestamp('1971-5-2'),
1972: Timestamp('1972-5-20'),
1973: Timestamp('1973-5-10'),
1974: Timestamp('1974-4-29'),
1975: Timestamp('1975-5-18'),
1976: Timestamp('1976-5-6'),
1977: Timestamp('1977-5-25'),
1978: Timestamp('1978-5-14'),
1979: Timestamp('1979-5-3'),
1980: Timestamp('1980-5-21'),
1981: Timestamp('1981-5-11'),
1982: Timestamp('1982-5-1'),
1983: Timestamp('1983-5-20'),
1984: Timestamp('1984-5-8'),
1985: Timestamp('1985-5-27'),
1986: Timestamp('1986-5-16'),
1987: Timestamp('1987-5-5'),
1988: Timestamp('1988-5-23'),
1989: Timestamp('1989-5-12'),
1990: Timestamp('1990-5-2'),
1991: Timestamp('1991-5-21'),
1992: Timestamp('1992-5-10'),
1993: Timestamp('1993-5-28'),
1994: Timestamp('1994-5-18'),
1995: Timestamp('1995-5-7'),
1996: Timestamp('1996-5-24'),
1997: Timestamp('1997-5-14'),
1998: Timestamp('1998-5-3'),
1999: Timestamp('1999-5-22'),
2000: Timestamp('2000-5-11'),
2001: Timestamp('2001-4-30'),
2002: Timestamp('2002-5-19'),
2003: Timestamp('2003-5-8'),
2004: Timestamp('2004-5-26'),
2005: Timestamp('2005-5-15'),
2006: Timestamp('2006-5-5'),
2007: Timestamp('2007-5-24'),
2008: Timestamp('2008-5-12'),
2009: Timestamp('2009-5-2'),
2010: Timestamp('2010-5-21'),
2011: Timestamp('2011-5-10'),
2012: Timestamp('2012-4-28'),
2013: Timestamp('2013-5-17'),
2014: Timestamp('2014-5-6'),
2015: Timestamp('2015-5-25'),
2016: Timestamp('2016-5-14'),
2017: Timestamp('2017-5-3'),
2018: Timestamp('2018-5-22'),
2019: Timestamp('2019-5-12'),
2020: Timestamp('2020-4-30'),
2021: Timestamp('2021-5-19'),
2022: Timestamp('2022-5-8'),
2023: Timestamp('2023-5-26'),
2024: Timestamp('2024-5-15'),
2025: Timestamp('2025-5-5'),
2026: Timestamp('2026-5-24'),
2027: Timestamp('2027-5-13'),
2028: Timestamp('2028-5-2'),
2029: Timestamp('2029-5-20'),
2030: Timestamp('2030-5-9'),
2031: Timestamp('2031-5-28'),
2032: Timestamp('2032-5-16'),
2033: Timestamp('2033-5-6'),
2034: Timestamp('2034-5-25'),
2035: Timestamp('2035-5-15'),
2036: Timestamp('2036-5-3'),
2037: Timestamp('2037-5-22'),
2038: Timestamp('2038-5-11'),
2039: Timestamp('2039-4-30'),
2040: Timestamp('2040-5-18'),
2041: Timestamp('2041-5-7'),
2042: Timestamp('2042-5-26'),
2043: Timestamp('2043-5-16'),
2044: Timestamp('2044-5-5'),
2045: Timestamp('2045-5-24'),
2046: Timestamp('2046-5-13'),
2047: Timestamp('2047-5-2'),
2048: Timestamp('2048-5-20'),
2049: Timestamp('2049-5-9'),
2050: Timestamp('2050-5-28'),
2051: Timestamp('2051-5-17'),
2052: Timestamp('2052-5-6'),
2053: Timestamp('2053-5-25'),
2054: Timestamp('2054-5-15'),
2055: Timestamp('2055-5-4'),
2056: Timestamp('2056-5-22'),
2057: Timestamp('2057-5-11'),
2058: Timestamp('2058-4-30'),
2059: Timestamp('2059-5-19'),
2060: Timestamp('2060-5-7'),
2061: Timestamp('2061-5-26'),
2062: Timestamp('2062-5-16'),
2063: Timestamp('2063-5-5'),
2064: Timestamp('2064-5-23'),
2065: Timestamp('2065-5-12'),
2066: Timestamp('2066-5-1'),
2067: Timestamp('2067-5-20'),
2068: Timestamp('2068-5-9'),
2069: Timestamp('2069-4-28'),
2070: Timestamp('2070-5-17'),
2071: Timestamp('2071-5-7'),
2072: Timestamp('2072-5-25'),
2073: Timestamp('2073-5-14'),
2074: Timestamp('2074-5-3'),
2075: Timestamp('2075-5-22'),
2076: Timestamp('2076-5-10'),
2077: Timestamp('2077-4-30'),
2078: Timestamp('2078-5-19'),
2079: Timestamp('2079-5-8'),
2080: Timestamp('2080-5-26'),
2081: Timestamp('2081-5-16'),
2082: Timestamp('2082-5-5'),
2083: Timestamp('2083-5-24'),
2084: Timestamp('2084-5-12'),
2085: Timestamp('2085-5-1'),
2086: Timestamp('2086-5-20'),
2087: Timestamp('2087-5-10'),
2088: Timestamp('2088-4-28'),
2089: Timestamp('2089-5-17'),
2090: Timestamp('2090-5-7'),
2091: Timestamp('2091-5-25'),
2092: Timestamp('2092-5-13'),
2093: Timestamp('2093-5-3'),
2094: Timestamp('2094-5-21'),
2095: Timestamp('2095-5-11'),
2096: Timestamp('2096-4-30'),
2097: Timestamp('2097-5-19'),
2098: Timestamp('2098-5-8'),
2099: Timestamp('2099-5-27'),
}
# Double Ninth Festival
dnf_mapping = {
1959: Timestamp('1959-10-10'),
1960: Timestamp('1960-10-28'),
1961: Timestamp('1961-10-18'),
1962: Timestamp('1962-10-7'),
1963: Timestamp('1963-10-25'),
1964: Timestamp('1964-10-14'),
1965: Timestamp('1965-10-3'),
1966: Timestamp('1966-10-22'),
1967: Timestamp('1967-10-12'),
1968: Timestamp('1968-10-30'),
1969: Timestamp('1969-10-19'),
1970: Timestamp('1970-10-8'),
1971: Timestamp('1971-10-27'),
1972: Timestamp('1972-10-15'),
1973: Timestamp('1973-10-4'),
1974: Timestamp('1974-10-23'),
1975: Timestamp('1975-10-13'),
1976: Timestamp('1976-10-31'),
1977: Timestamp('1977-10-21'),
1978: Timestamp('1978-10-10'),
1979: Timestamp('1979-10-29'),
1980: Timestamp('1980-10-17'),
1981: Timestamp('1981-10-6'),
1982: Timestamp('1982-10-25'),
1983: Timestamp('1983-10-14'),
1984: Timestamp('1984-10-3'),
1985: Timestamp('1985-10-22'),
1986: Timestamp('1986-10-12'),
1987: Timestamp('1987-10-31'),
1988: Timestamp('1988-10-19'),
1989: Timestamp('1989-10-8'),
1990: Timestamp('1990-10-26'),
1991: Timestamp('1991-10-16'),
1992: Timestamp('1992-10-4'),
1993: Timestamp('1993-10-23'),
1994: Timestamp('1994-10-13'),
1995: Timestamp('1995-11-1'),
1996: Timestamp('1996-10-20'),
1997: Timestamp('1997-10-10'),
1998: Timestamp('1998-10-28'),
1999: Timestamp('1999-10-17'),
2000: Timestamp('2000-10-6'),
2001: Timestamp('2001-10-25'),
2002: Timestamp('2002-10-14'),
2003: Timestamp('2003-10-4'),
2004: Timestamp('2004-10-22'),
2005: Timestamp('2005-10-11'),
2006: Timestamp('2006-10-30'),
2007: Timestamp('2007-10-19'),
2008: Timestamp('2008-10-7'),
2009: Timestamp('2009-10-26'),
2010: Timestamp('2010-10-16'),
2011: Timestamp('2011-10-5'),
2012: Timestamp('2012-10-23'),
2013: Timestamp('2013-10-13'),
2014: Timestamp('2014-10-2'),
2015: Timestamp('2015-10-21'),
2016: Timestamp('2016-10-9'),
2017: Timestamp('2017-10-28'),
2018: Timestamp('2018-10-17'),
2019: Timestamp('2019-10-7'),
2020: Timestamp('2020-10-25'),
2021: Timestamp('2021-10-14'),
2022: Timestamp('2022-10-4'),
2023: Timestamp('2023-10-23'),
2024: Timestamp('2024-10-11'),
2025: Timestamp('2025-10-29'),
2026: Timestamp('2026-10-18'),
2027: Timestamp('2027-10-8'),
2028: Timestamp('2028-10-26'),
2029: Timestamp('2029-10-16'),
2030: Timestamp('2030-10-5'),
2031: Timestamp('2031-10-24'),
2032: Timestamp('2032-10-12'),
2033: Timestamp('2033-10-1'),
2034: Timestamp('2034-10-20'),
2035: Timestamp('2035-10-9'),
2036: Timestamp('2036-10-27'),
2037: Timestamp('2037-10-17'),
2038: Timestamp('2038-10-7'),
2039: Timestamp('2039-10-26'),
2040: Timestamp('2040-10-14'),
2041: Timestamp('2041-10-3'),
2042: Timestamp('2042-10-22'),
2043: Timestamp('2043-10-11'),
2044: Timestamp('2044-10-29'),
2045: Timestamp('2045-10-18'),
2046: Timestamp('2046-10-8'),
2047: Timestamp('2047-10-27'),
2048: Timestamp('2048-10-16'),
2049: Timestamp('2049-10-5'),
2050: Timestamp('2050-10-24'),
2051: Timestamp('2051-10-13'),
2052: Timestamp('2052-10-30'),
2053: Timestamp('2053-10-20'),
2054: Timestamp('2054-10-9'),
2055: Timestamp('2055-10-28'),
2056: Timestamp('2056-10-17'),
2057: Timestamp('2057-10-6'),
2058: Timestamp('2058-10-25'),
2059: Timestamp('2059-10-14'),
2060: Timestamp('2060-10-2'),
2061: Timestamp('2061-10-21'),
2062: Timestamp('2062-10-11'),
2063: Timestamp('2063-10-30'),
2064: Timestamp('2064-10-18'),
2065: Timestamp('2065-10-8'),
2066: Timestamp('2066-10-27'),
2067: Timestamp('2067-10-16'),
2068: Timestamp('2068-10-4'),
2069: Timestamp('2069-10-23'),
2070: Timestamp('2070-10-12'),
2071: Timestamp('2071-10-31'),
2072: Timestamp('2072-10-20'),
2073: Timestamp('2073-10-9'),
2074: Timestamp('2074-10-28'),
2075: Timestamp('2075-10-18'),
2076: Timestamp('2076-10-6'),
2077: Timestamp('2077-10-25'),
2078: Timestamp('2078-10-14'),
2079: Timestamp('2079-10-3'),
2080: Timestamp('2080-10-21'),
2081: Timestamp('2081-10-11'),
2082: Timestamp('2082-10-30'),
2083: Timestamp('2083-10-19'),
2084: Timestamp('2084-10-8'),
2085: Timestamp('2085-10-27'),
2086: Timestamp('2086-10-16'),
2087: Timestamp('2087-10-5'),
2088: Timestamp('2088-10-22'),
2089: Timestamp('2089-10-12'),
2090: Timestamp('2090-10-31'),
2091: Timestamp('2091-10-21'),
2092: Timestamp('2092-10-9'),
2093: Timestamp('2093-10-28'),
2094: Timestamp('2094-10-17'),
2095: Timestamp('2095-10-6'),
2096: Timestamp('2096-10-24'),
2097: Timestamp('2097-10-13'),
2098: Timestamp('2098-10-3'),
2099: Timestamp('2099-10-22')
}
|
#!/usr/bin/env python
""" generated source for module GdlValidator """
# package: org.ggp.base.util.gdl
import org.ggp.base.util.symbol.grammar.Symbol
import org.ggp.base.util.symbol.grammar.SymbolAtom
import org.ggp.base.util.symbol.grammar.SymbolList
#
# * The GdlValidator class implements Gdl validation for the GdlFactory class.
# * Its purpose is to validate whether or not a Symbol can be transformed into a
# * Gdl expression without error.
#
class GdlValidator(object):
""" generated source for class GdlValidator """
#
# * Validates whether a Symbol can be transformed into a Gdl expression
# * without error using the following process:
# * <ol>
# * <li>Returns true if the Symbol is a SymbolAtom. Otherwise, treats the
# * Symbol as a SymbolList.</li>
# * <li>Checks that the SymbolList contains no sub-elements that are
# * SymbolLists which do not begin with a SymbolAtom.</li>
# * <li>Checks that neither the SymbolList nor its sub-elements contain the
# * deprecated 'or' keyword.</li>
# * </ol>
# * Note that as implemented, this method is incomplete: it only verifies a
# * subset of the correctness properties of well-formed Gdl. A more thorough
# * implementation is advisable.
# *
# * @param symbol
# * The Symbol to validate.
# * @return True if the Symbol passes validation; false otherwise.
#
def validate(self, symbol):
""" generated source for method validate """
if isinstance(symbol, (SymbolAtom, )):
return True
elif containsAnonymousList(symbol):
return False
elif containsOr(symbol):
return False
else:
return True
#
# * A recursive method that checks whether a Symbol contains SymbolList that
# * does not begin with a SymbolAtom.
# *
# * @param symbol
# * The Symbol to validate.
# * @return True if the Symbol passes validation; false otherwise.
#
def containsAnonymousList(self, symbol):
""" generated source for method containsAnonymousList """
if isinstance(symbol, (SymbolAtom, )):
return False
else:
if isinstance(symbol, (SymbolList, )):
return True
else:
while i < (symbol).size():
if self.containsAnonymousList((symbol).get(i)):
return True
i += 1
return False
#
# * A recursive method that checks whether a Symbol contains the deprecated
# * 'or' keyword.
# *
# * @param symbol
# * The Symbol to validate.
# * @return True if the Symbol passes validation; false otherwise.
#
def containsOr(self, symbol):
""" generated source for method containsOr """
if isinstance(symbol, (SymbolAtom, )):
return False
else:
if symbol.__str__().lower() == "or":
return True
elif isinstance(symbol, (SymbolList, )):
while i < (symbol).size():
if self.containsOr((symbol).get(i)):
return True
i += 1
return False
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : 2_BrowserClass.py
@Time : 2020-8-22 20:20:16
@Author : Recluse Xu
@Version : 1.0
@Contact : 444640050@qq.com
@Desc : 浏览器类相关
官方文档:https://miyakogi.github.io/pyppeteer/reference.html#pyppeteer.page.Page.target
'''
# here put the import lib
import asyncio
from pyppeteer import launch
async def main():
# 创建浏览器对象
browser = await launch({
'headless': False,
'ignorehttpserrrors': True,
'viewport': {'width': 1280, 'height': 800},
'autoClose': True,
})
# 创建一个新页面
page = await browser.newPage()
# 创建一个无痕模式浏览器内容, 以无痕模式内容浏览东西的话不会报错cookie之类的数据
# context = await browser.createIncognitoBrowserContext()
# page = await context.newPage()
# await page.goto('https://example.com')
await page.goto('http://www.baidu.com')
# 获取网页中的所有内容
c = browser.browserContexts
print(c)
# 返回浏览器进程
c = browser.process
print(c)
# 返回浏览器UerAgent
c = browser.userAgent
print(c)
# 从浏览器中获取页面对象
pages = await browser.pages()
page = pages[0]
# 断开连接
browser.disconnect()
asyncio.get_event_loop().run_until_complete(main())
|
"""
File Processing (file_processing.py):
-------------------------------------------------------------------------------
Last updated 7/2/2015
This module creates functions for generic file operations.
"""
# Packages
import os
import sys
def get_file(dirct, contains=[""]):
""" Given a directory, this function finds a file that contains each
string in an array of strings. It returns the string
:param dirct: The directory to be searched in.
:param contains: An array of strings, all of which must be contained in
the filename
"""
for filename in os.listdir(dirct):
if all(item in filename for item in contains):
return filename
# Note: move the naics_processing search_ws function here.
# Will require changing every instance it is called in.
|
#!/usr/bin/env python
import platform
import resource
from unicorn import *
import regress
# OS X: OK with 2047 iterations.
# OS X: Crashes at 2048:th iteration ("qemu: qemu_thread_create: Resource temporarily unavailable").
# Linux: No crashes observed.
class ThreadCreateCrash(regress.RegressTest):
def test(self):
for i in xrange(2048):
Uc(UC_ARCH_X86, UC_MODE_64)
self.assertTrue(True, "If not reached, then we have a crashing bug.")
if __name__ == '__main__':
regress.main()
|
import os
import compas
from compas.geometry import Point, Polygon
from compas.datastructures import Mesh
from compas.utilities import i_to_red
from compas_assembly.datastructures import Assembly, Block
from compas_view2.app import App
from compas_view2.objects import Object, NetworkObject, MeshObject
from compas_view2.objects import Collection
Object.register(Assembly, NetworkObject)
Object.register(Block, MeshObject)
FILE_I = os.path.join(os.path.dirname(__file__), 'armadillo_assembly.json')
assembly = compas.json_load(FILE_I)
# ==============================================================================
# Flatness
# ==============================================================================
# ==============================================================================
# Export
# ==============================================================================
# ==============================================================================
# Viz
# ==============================================================================
viewer = App()
nodes = []
blocks = []
interfaces = []
sides = []
colors = []
for node in assembly.nodes():
nodes.append(Point(* assembly.node_coordinates(node)))
blocks.append(assembly.node_attribute(node, 'block'))
for node in assembly.nodes():
block = assembly.node_attribute(node, 'block')
faces = sorted(block.faces(), key=lambda face: block.face_area(face))[:-2]
for face in faces:
side = Polygon(block.face_coordinates(face))
mesh = Mesh.from_polygons([side])
sides.append(mesh)
for mesh in sides:
face = list(mesh.faces())[0]
dev = mesh.face_flatness(face)
colors.append(i_to_red(dev, normalize=True))
viewer.add(Collection(blocks), show_faces=False, show_edges=True)
viewer.add(Collection(sides), colors=colors, show_edges=False)
viewer.run()
|
import random
def main():
length = get_length()
chars = parse_chars()
password = construct_pass(length, chars)
print("Your password is: " + password)
input("Press enter to exit.")
def get_length():
print("Weak: 0-7; Moderate: 8-11; Strong: 12+")
while True:
num = input("How many characters long is your password? (-1 to quit) ")
try:
num = int(num)
if num == -1:
exit(1)
elif num <= 0:
print("Please type a positive number.\n")
else:
return num
except ValueError:
print("Please type a number.\n")
def parse_chars():
lowercase = "abcdefghijkmnopqrstuvwxyz" # 'l' as in lambda removed
uppercase = "ABCDEFGHJKLMNPQRSTUVWXYZ" # 'I' as in IGLOO, 'O' as in OPRAH removed
numbers = "123456789" # 'zero' removed
special_characters = "!\"#$%&'()*+,-./:;?@[\\]^_`{|}~"
return lowercase + uppercase + numbers + special_characters
def construct_pass(length, chars):
pw = ""
for i in range(length):
pw += random.choice(chars)
return pw
|
from .clang_tidy_parser import ClangTidyParser, ClangMessage
|
from typing import MutableMapping
from app.views.handlers.list_action import ListAction
class ListAddQuestion(ListAction):
def is_location_valid(self):
if not super().is_location_valid() or self._current_location.list_item_id:
return False
return True
def handle_post(self):
list_item_id = self.questionnaire_store_updater.add_list_item(
self.parent_block["for_list"]
)
self.questionnaire_store_updater.update_answers(self.form.data, list_item_id)
return super().handle_post()
def _resolve_custom_page_title_vars(self) -> MutableMapping:
# For list add blocks, no list item id is yet available. Instead, we resolve
# `list_item_position` to the position in the list it would be if added.
list_length = len(
self._questionnaire_store.list_store[self._current_location.list_name]
)
return {"list_item_position": list_length + 1}
|
import os
import re
import pandas as pd
from scripts.task import Task
from Constants import METADATA, RESULT
class FindPatternInJudgements(Task):
def __init__(self, input_dir: str, pattern: str, label: str, line_limit: int = 0, limit_lines: bool = False,
store_result: bool = True):
super().__init__()
self.limit_lines = limit_lines
self.line_limit = line_limit
self.store_result = store_result
self.label = label
self.input_dir = input_dir
try:
self.pattern = re.compile(pattern, re.IGNORECASE)
except re.error:
raise Exception({"message": "given pattern invalid"})
def _execute(self):
# metadata = self.shared_resources.get(METADATA, None)
# assert isinstance(metadata, pd.DataFrame)
labels_found = self.shared_resources.get(RESULT, {})
for filename in os.listdir(self.input_dir):
if filename.endswith(".txt"):
judgement = os.path.join(self.input_dir, filename)
cino = filename.split('_')[0]
results = []
line_count = 0
with open(judgement) as f:
for line in f:
line = self.clean_line(line)
if not self.line_limit or line_count <= self.line_limit:
all_found = self.get_patterns(line)
if self.label == "fine_imposed":
for found in all_found:
results.append(found.replace(',', ""))
else:
results.extend(all_found)
line_count += 1
if not self.store_result:
results = True if len(results) else False
else:
current = labels_found.get(cino, {self.label: []}).get(self.label, [])
results = current + results
labels_found.setdefault(cino, {}).update({self.label: results})
self.share_next(RESULT, labels_found)
def get_patterns(self, line):
patterns = []
all_found = self.pattern.findall(line)
for found in all_found:
if type(found) is tuple:
filtered_list = list(filter(None, list(found)))
patterns.extend(filtered_list)
else:
patterns.append(found)
return list(set(patterns))
@staticmethod
def clean_line(line):
return line.\
replace("\n", ' ').\
replace(' ', ' ')
|
import pytest
from rdkit import RDLogger
from prolif.fingerprint import Fingerprint
from prolif.interactions import _INTERACTIONS, Interaction, get_mapindex
import prolif
from .test_base import ligand_mol
from . import mol2factory
# disable rdkit warnings
lg = RDLogger.logger()
lg.setLevel(RDLogger.ERROR)
@pytest.fixture(scope="module")
def mol1(request):
return getattr(mol2factory, request.param)()
@pytest.fixture(scope="module")
def mol2(request):
return getattr(mol2factory, request.param)()
class TestInteractions:
@pytest.fixture(scope="class")
def fingerprint(self):
return Fingerprint()
@pytest.mark.parametrize("func_name, mol1, mol2, expected", [
("cationic", "cation", "anion", True),
("cationic", "anion", "cation", False),
("cationic", "cation", "benzene", False),
("anionic", "cation", "anion", False),
("anionic", "anion", "cation", True),
("anionic", "anion", "benzene", False),
("cationpi", "cation", "benzene", True),
("cationpi", "cation_false", "benzene", False),
("cationpi", "benzene", "cation", False),
("cationpi", "cation", "cation", False),
("cationpi", "benzene", "benzene", False),
("pication", "benzene", "cation", True),
("pication", "benzene", "cation_false", False),
("pication", "cation", "benzene", False),
("pication", "cation", "cation", False),
("pication", "benzene", "benzene", False),
("pistacking", "benzene", "etf", True),
("pistacking", "etf", "benzene", True),
("pistacking", "ftf", "benzene", True),
("pistacking", "benzene", "ftf", True),
("facetoface", "benzene", "ftf", True),
("facetoface", "ftf", "benzene", True),
("facetoface", "benzene", "etf", False),
("facetoface", "etf", "benzene", False),
("edgetoface", "benzene", "etf", True),
("edgetoface", "etf", "benzene", True),
("edgetoface", "benzene", "ftf", False),
("edgetoface", "ftf", "benzene", False),
("hydrophobic", "benzene", "etf", True),
("hydrophobic", "benzene", "ftf", True),
("hydrophobic", "benzene", "chlorine", True),
("hydrophobic", "benzene", "anion", False),
("hydrophobic", "benzene", "cation", False),
("hbdonor", "hb_donor", "hb_acceptor", True),
("hbdonor", "hb_donor", "hb_acceptor_false", False),
("hbdonor", "hb_acceptor", "hb_donor", False),
("hbacceptor", "hb_acceptor", "hb_donor", True),
("hbacceptor", "hb_acceptor_false", "hb_donor", False),
("hbacceptor", "hb_donor", "hb_acceptor", False),
("xbdonor", "xb_donor", "xb_acceptor", True),
("xbdonor", "xb_donor", "xb_acceptor_false_xar", False),
("xbdonor", "xb_donor", "xb_acceptor_false_axd", False),
("xbdonor", "xb_acceptor", "xb_donor", False),
("xbacceptor", "xb_acceptor", "xb_donor", True),
("xbacceptor", "xb_acceptor_false_xar", "xb_donor", False),
("xbacceptor", "xb_acceptor_false_axd", "xb_donor", False),
("xbacceptor", "xb_donor", "xb_acceptor", False),
("metaldonor", "metal", "ligand", True),
("metaldonor", "metal_false", "ligand", False),
("metaldonor", "ligand", "metal", False),
("metalacceptor", "ligand", "metal", True),
("metalacceptor", "ligand", "metal_false", False),
("metalacceptor", "metal", "ligand", False),
], indirect=["mol1", "mol2"])
def test_interaction(self, fingerprint, func_name, mol1, mol2, expected):
interaction = getattr(fingerprint, func_name)
assert interaction(mol1, mol2) is expected
def test_warning_supersede(self):
old = id(_INTERACTIONS["Hydrophobic"])
with pytest.warns(UserWarning,
match="interaction has been superseded"):
class Hydrophobic(Interaction):
def detect(self):
pass
new = id(_INTERACTIONS["Hydrophobic"])
assert old != new
# fix dummy Hydrophobic class being reused in later unrelated tests
class Hydrophobic(prolif.interactions.Hydrophobic):
pass
def test_error_no_detect(self):
class Dummy(Interaction):
pass
with pytest.raises(TypeError,
match="Can't instantiate abstract class Dummy"):
Dummy()
# fix Dummy class being reused in later unrelated tests
del prolif.interactions._INTERACTIONS["Dummy"]
@pytest.mark.parametrize("index", [
0, 1, 3, 42, 78
])
def test_get_mapindex(self, index):
parent_index = get_mapindex(ligand_mol[0], index)
assert parent_index == index
|
# -*- coding: utf-8 -*-
"""
=========================
rx_sect_plot (mod: 'vmlib.seis')
=========================
basemap
cdp_spacing
distance
"""
__all__ = ['cdp', 'navmerge', 'rcv', 'src', 'traces',
'rx_sect_plot', 'rx_sect_qc', 'segy']
from .cdp import CDP_line
from .navmerge import basemap, elevation, offset_cdp_fold, amplitude_offset
from .navmerge import stacking, fold, cdp_spacing, rms_map, gathers_short
from .rcv import RCV_line
from .rx_sect_plot import section, basemap, cdp_spacing, distances
from .rx_sect_qc import get_info_from_text_header, compare_text_bin_header
from .src import SRC_line
from .traces import Line
from . import plot
from . import qc
from . import segy
|
from django.contrib.auth.models import Group
def get_group_obj(gid):
try:
return Group.objects.get(pk=gid)
except Group.DoesNotExist:
return None
|
from .BayesianLogisticRegression import BayesianLogisticRegression
from .BayesianNetwork import BayesianNetwork, MLP_mcmc
from .MLP import MLP
from .CNN import CNN, Net
from .HMC_GP import HMC_GP
|
import allure
from antiphishme.src.api_modules.ip_module import get_ip, get_ip_details
from antiphishme.tests.test_helpers import (
assert_true,
assert_equal,
assert_none,
assert_type,
assert_not_empty,
info
)
@allure.epic("api_modules")
@allure.parent_suite("Unit tests")
@allure.story('Unit')
@allure.suite("api_modules")
@allure.sub_suite("ip")
class Tests:
@allure.description("""
Test ip api module
Expect proper ip for given domain.
""")
def test_get_ip(self):
domain = "google.com"
info("Requested domain - {}".format(domain))
ip = get_ip(domain)
info("Returned ip: {}".format(ip))
assert_true(ip.count('.') == 3, "Check if returned ip is correct ip")
@allure.description("""
Test ip api module
Send unexisting domain and expect None.
""")
def test_get_ip_unexisting(self):
domain = "."
info("Requested domain - {}".format(domain))
ip = get_ip(domain)
info("Returned ip: {}".format(ip))
assert_none(ip, "Check if result for get_ip is None")
@allure.description("""
Test ip api module
Send correct ip and expect details dict.
""")
def test_get_ip_details(self):
ip = "8.8.8.8"
info("Requested ip - {}".format(ip))
details = get_ip_details(ip)
assert_type(details, dict, "Check if returned results are of type dict")
assert_not_empty(details, "Check if returned results are not empty")
@allure.description("""
Test ip api module
Send reserved ip and expect proper response.
""")
def test_get_ip_details_reserved_range(self):
ip = "127.0.0.1"
info("Requested ip - {}".format(ip))
details = get_ip_details(ip)
assert_type(details, dict, "Check if returned results are of type dict")
assert_not_empty(details, "Check if returned results are not empty")
field = 'status'
expected = 'reserved_range'
assert_equal(details[field], expected, "Check if returned status is equal to expected one")
field = 'ip'
expected = ip
assert_equal(details[field], expected, "Check if returned ip is equal to expected one")
@allure.description("""
Test ip api module
Send wrong ip and expect None.
""")
def test_get_ip_details_wrong_ip(self):
ip = "abc"
info("Requested ip - {}".format(ip))
details = get_ip_details(ip)
assert_none(details, "Check if returned results are empty/None")
|
import datetime
import pytest
real_datetime_class = datetime.datetime
class DatetimeSubclassMeta(type):
@classmethod
def __instancecheck__(mcs, obj):
return isinstance(obj, real_datetime_class)
class BaseMockDatetime(real_datetime_class):
@classmethod
def now(cls):
if hasattr(cls, '_now') and cls._now is not None:
return cls._now
else:
return real_datetime_class.now()
@classmethod
def set_now(cls, datetime_value):
cls._now = datetime_value
MockedDatetime = DatetimeSubclassMeta(
'datetime',
(BaseMockDatetime,),
{}
)
@pytest.fixture
def mock_datetime(monkeypatch):
with monkeypatch.context() as ctx:
ctx.setattr(datetime, 'datetime', MockedDatetime)
yield
del MockedDatetime._now
|
# -*- coding: utf-8 -*-
import djchoices
from django.db import models
import schools.models
import users.models
class Discount(models.Model):
class Type(djchoices.DjangoChoices):
SOCIAL = djchoices.ChoiceItem(1, 'Социальная скидка')
PARTNER = djchoices.ChoiceItem(2, 'Скидка от партнёра')
STATE = djchoices.ChoiceItem(3, 'Скидка от государства')
OLYMPIADS = djchoices.ChoiceItem(4, 'Олимпиадная скидка')
ORGANIZATION = djchoices.ChoiceItem(5, 'Частичная оплата от организации')
school = models.ForeignKey(
schools.models.School,
on_delete=models.CASCADE,
related_name='+',
)
user = models.ForeignKey(
users.models.User,
on_delete=models.CASCADE,
related_name='discounts',
)
type = models.PositiveIntegerField(choices=Type.choices, validators=[Type.validator])
# If amount = 0, discount is considered now
amount = models.PositiveIntegerField(
help_text='Размер скидки. Выберите ноль, чтобы скидка считалась «рассматриваемой»')
private_comment = models.TextField(blank=True, help_text='Не показывается школьнику')
public_comment = models.TextField(blank=True, help_text='Показывается школьнику')
@property
def type_name(self):
return self.Type.values[self.type]
def __str__(self):
return "%s %s" % (self.user, self.Type.values[self.type])
|
import blobconverter
import numpy as np
import cv2
import depthai as dai
class TextHelper:
def __init__(self) -> None:
self.bg_color = (0, 0, 0)
self.color = (255, 255, 255)
self.text_type = cv2.FONT_HERSHEY_SIMPLEX
self.line_type = cv2.LINE_AA
def putText(self, frame, text, coords):
cv2.putText(frame, text, coords, self.text_type, 0.5, self.bg_color, 4, self.line_type)
cv2.putText(frame, text, coords, self.text_type, 0.5, self.color, 1, self.line_type)
class_names = ['neutral', 'happy', 'sad', 'surprise', 'anger']
openvinoVersion = "2021.3"
p = dai.Pipeline()
p.setOpenVINOVersion(version=dai.OpenVINO.Version.VERSION_2021_3)
cam = p.create(dai.node.ColorCamera)
cam.setIspScale(2,3)
cam.setInterleaved(False)
cam.setVideoSize(720,720)
cam.setPreviewSize(720,720)
# Send color frames to the host via XLink
cam_xout = p.create(dai.node.XLinkOut)
cam_xout.setStreamName("video")
cam.video.link(cam_xout.input)
# Crop 720x720 -> 300x300
face_det_manip = p.create(dai.node.ImageManip)
face_det_manip.initialConfig.setResize(300, 300)
cam.preview.link(face_det_manip.inputImage)
# NN that detects faces in the image
face_nn = p.create(dai.node.MobileNetDetectionNetwork)
face_nn.setConfidenceThreshold(0.3)
face_nn.setBlobPath(blobconverter.from_zoo("face-detection-retail-0004", shaves=6, version=openvinoVersion))
face_det_manip.out.link(face_nn.input)
# Send ImageManipConfig to host so it can visualize the landmarks
config_xout = p.create(dai.node.XLinkOut)
config_xout.setStreamName("face_det")
face_nn.out.link(config_xout.input)
# Script node will take the output from the NN as an input, get the first bounding box
# and send ImageManipConfig to the manip_crop
image_manip_script = p.create(dai.node.Script)
face_nn.out.link(image_manip_script.inputs['nn_in'])
cam.preview.link(image_manip_script.inputs['frame'])
image_manip_script.setScript("""
import time
def limit_roi(det):
if det.xmin <= 0: det.xmin = 0.001
if det.ymin <= 0: det.ymin = 0.001
if det.xmax >= 1: det.xmax = 0.999
if det.ymax >= 1: det.ymax = 0.999
while True:
frame = node.io['frame'].get()
face_dets = node.io['nn_in'].get().detections
# node.warn(f"Faces detected: {len(face_dets)}")
for det in face_dets:
limit_roi(det)
# node.warn(f"Detection rect: {det.xmin}, {det.ymin}, {det.xmax}, {det.ymax}")
cfg = ImageManipConfig()
cfg.setCropRect(det.xmin, det.ymin, det.xmax, det.ymax)
cfg.setResize(64, 64)
cfg.setKeepAspectRatio(False)
node.io['manip_cfg'].send(cfg)
node.io['manip_img'].send(frame)
# node.warn(f"1 from nn_in: {det.xmin}, {det.ymin}, {det.xmax}, {det.ymax}")
""")
# This ImageManip will crop the mono frame based on the NN detections. Resulting image will be the cropped
# face that was detected by the face-detection NN.
manip_crop = p.create(dai.node.ImageManip)
image_manip_script.outputs['manip_img'].link(manip_crop.inputImage)
image_manip_script.outputs['manip_cfg'].link(manip_crop.inputConfig)
manip_crop.initialConfig.setResize(64, 64)
manip_crop.setWaitForConfigInput(True)
# Second NN that detcts emotions from the cropped 64x64 face
landmarks_nn = p.createNeuralNetwork()
landmarks_nn.setBlobPath(blobconverter.from_zoo("emotions-recognition-retail-0003", shaves=6, version=openvinoVersion))
manip_crop.out.link(landmarks_nn.input)
landmarks_nn_xout = p.createXLinkOut()
landmarks_nn_xout.setStreamName("emotions")
landmarks_nn.out.link(landmarks_nn_xout.input)
def frame_norm(frame, bbox):
normVals = np.full(len(bbox), frame.shape[0])
normVals[::2] = frame.shape[1]
return (np.clip(np.array(bbox), 0, 1) * normVals).astype(int)
# Pipeline is defined, now we can connect to the device
with dai.Device(p) as device:
videoQ = device.getOutputQueue(name="video", maxSize=1, blocking=False)
faceDetQ = device.getOutputQueue(name="face_det", maxSize=4, blocking=False)
emotionsQ = device.getOutputQueue(name="emotions", maxSize=4, blocking=False)
textHelper = TextHelper()
while True:
if faceDetQ.has():
detections = faceDetQ.get().detections
frame = videoQ.get().getCvFrame()
for detection in detections:
bbox = frame_norm(frame, (detection.xmin, detection.ymin, detection.xmax, detection.ymax))
cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (255, 255, 255), 1)
# Each face detection will be sent to emotion estimation model. Wait for the result
nndata = emotionsQ.get()
# [print(f"Layer name: {l.name}, Type: {l.dataType}, Dimensions: {l.dims}") for l in nndata.getAllLayers()]
results = np.array(nndata.getFirstLayerFp16())
result_conf = np.max(results)
if 0.3 < result_conf:
name = class_names[np.argmax(results)]
conf = round(100 * result_conf, 1)
textHelper.putText(frame, f"{name}, {conf}%", (bbox[0] + 10, bbox[1] + 20))
cv2.imshow("frame", frame)
# if frame is not None:
if cv2.waitKey(1) == ord('q'):
break
|
#!/usr/bin/env python
'''
Create CFIS photometric images from SKIRT IFS datacube.
Version 0.1
SED to flux conversions use the AB system following the documentation here: http://www.astro.ljmu.ac.uk/~ikb/research/mags-fluxes/
Also found here (though incorrect in places): http://mfouesneau.github.io/docs/pyphot/
Update History:
Version 0.1 - Corrected to explicitly account for the target redshift by stretching the emitted spectrum by the redshift factor and reducing the specific intensities by a factor of (1+z)**5. Now requires a redshift as input in addition to image properties. The output image AB surface brightnesses have already been scaled to account for the target redshift. ObsRealism should not then apply any brightness factor.
'''
import os,sys,string,time
import numpy as np
from astropy.io import fits
def SpecPhotCFIS(inputName,_outputName,wl_filename,cfis_cfg_path,redshift=0.05,bands=['g','r','i'],airmass=0.,overwrite=0):
'''
Generate idealized mock CFIS photometric images with same spatial scale as the original datacube. The input is a SKIRT data cube with units specific intensity units W/m2/micron/arcsec2. This code is provided as a companion to the Realism suite as it produces output in the exact format accepted by the suite.
"_outputName - formattable string: Should be a formattable string which can be updated for each band (e.g. Outputs/photo_{}.fits).
"wl_filename" - string: Path to wavelength file accompanied with SKIRT. The file should show provide the wavelengths for which each specific intensity in the datacubes is defined.
"cfis_cfg_path" - string: Path to CFIS response curve files.
"redshift" - float: Redshift by which the input spectrum is streched and dimmed.
"airmass" - float>=0: Airmass used to determine atmospheric extinction effects on the response curves. The average value for CFIS is 1.13 over all fields. If 0, the unattenuated response curves are used (only telescope/filter/ccd). This should nominally be set to zero because calibrated images account for atmospheric extinction.
"overwrite" - boolean: Overwrite output in output location.
"bands" - list object: Options are 'u','g','r','i','z', but depends on how much of the spectrum is modelled by SKIRT. Additionally, care should also be taken when interpreting 'u' or 'z' band fluxes because response functions in these bands are strongly (and non-uniformly) affected by atmospheric absorption.
'''
# useful constants / quantities
speed_of_light = 2.998e8 # [m/s]
speed_of_light = speed_of_light*1e10 # [Angstrom/s]
# wavelengths of datacube [microns] (expand by redshift factor)
wl = np.loadtxt(wl_filename).astype(float)*1e4*(1+redshift) # [Angstrom]
# wavelength bin widths [Angstrom]
dwl = np.median(np.diff(wl)) # [Angstrom]
# read IFU cube header and data
with fits.open(inputName,mode='readonly') as hdul:
# IFU header
header = hdul[0].header
# image spectral flux density in W/m2/micron/arcsec2; convert to [Jy*Hz/Angstrom/arcsec2]
ifu_data = hdul[0].data*1e22 # [Jy*Hz/Angstrom/arcsec2]
# header changes for photometry
header.remove('NAXIS3')
# calibrated flux units; easily converted to nanomaggies
header['BUNIT'] = 'AB mag/arcsec2'
header.set('FILTER', value='', comment='Transmission band')
header.set('WLEFF' , value=np.float32(0.) , comment='Effective WL of response [Angstrom]')
for band in bands:
# filter response function
filter_data = np.loadtxt(cfis_cfg_path+'{}_CFIS.res'.format(band.capitalize()))
# wavelength elements in response curve file
filter_wl = filter_data[:,0]
# filter response
filter_res = filter_data[:,1]
# filter response interpolated onto image wavelength grid
filter_res = np.interp(xp=filter_wl,x=wl,fp=filter_res,left=0,right=0) # unitless
# apply extinction correction to filters: extinction file
kkFile = cfis_cfg_path+'CFIS_extinction.txt'
# unpack wavelengths and extinctions [mag/airmass]
wl_kk,kk = np.loadtxt(kkFile,unpack=True)
# interpolated onto wavelength grid
kk = np.interp(xp=wl_kk,x=wl,fp=kk,left=np.nanmax(kk),right=0)
# 1.25 because CFIS response curves assume 1.25 airmass by default
filter_res *= 10**(0.4*((1.25-airmass)*kk))
# filter-specific pivot wavelength squared [m2]
wl_pivot2 = np.sum(filter_res*wl*dwl)/np.sum(filter_res*dwl/wl)
# now the mean photon rate density in the filter [Jy*Hz/Angstrom/arcsec2]
f_wl = np.sum(wl*filter_res*ifu_data.T*dwl,axis=2)/np.sum(wl*filter_res*dwl)
# multiplying by wl_pivot2/speed_of_light gives [Jy/arcsec2]
# convert to maggies/arcsec2 (w/ AB zeropoint) using 1 maggy ~ 3631 Jy
# apply (1+z)**-5 redshift degradation to wavelength specific intensities
f_mgys = f_wl*wl_pivot2/speed_of_light/3631.*(1+redshift)**-5 # [maggies/arcsec2]
# convert to mag/arcsec2 surface brightness for numerical ease
with np.errstate(divide='ignore'):
f_sb = -2.5*np.log10(f_mgys) # [mag/arcsec2] AB system
# fits file output
photo_out = _outputName.format(band)
if os.access(photo_out,0):os.remove(photo_out)
# create output photometry file and make band-specific header updates
hdu = fits.PrimaryHDU(f_sb)
header['FILTER'] = '{}'.format(band)
header['WLEFF'] = np.around(np.sqrt(wl_pivot2),decimals=1)
header['REDSHIFT'] = (redshift,'Redshift')
hdu.header = header
hdu.writeto(photo_out, overwrite=True)
|
#!/usr/bin/env python
"""
Summary
-------
Provides implementation of the Test Problem C Oracle for use in PyMOSO.
"""
from ..chnbase import Oracle
from math import exp, sqrt, sin
class ProbTPC(Oracle):
"""
An Oracle that simulates Test Problem C.
Attributes
----------
num_obj : int, 2
dim : int, 3
density_factor : int, 2
Parameters
----------
rng : prng.MRG32k3a object
See also
--------
chnbase.Oracle
"""
def __init__(self, rng):
self.num_obj = 2
self.dim = 3
self.density_factor = 2
super().__init__(rng)
def g(self, x, rng):
"""
Simulates one replication. PyMOSO requires that all valid
Oracles implement an Oracle.g.
Parameters
----------
x : tuple of int
rng : prng.MRG32k3a object
Returns
-------
isfeas : bool
tuple of float
simulated objective values
"""
df = self.density_factor
xr = range(-5*df, 5*df + 1)
obj1 = None
obj2 = None
isfeas = True
for xi in x:
if not xi in xr:
isfeas = False
if isfeas:
z1 = rng.normalvariate(0, 1)
z2 = rng.normalvariate(0, 1)
z3 = rng.normalvariate(0, 1)
xi = (z1**2, z2**2, z3**2)
x = tuple(i/df for i in x)
s = [sin(i) for i in x]
sum1 = [-10*xi[i]*exp(-0.2*sqrt(x[i]**2 + x[i+1]**2)) for i in [0, 1]]
sum2 = [xi[i]*(pow(abs(x[i]), 0.8) + 5*pow(s[i], 3)) for i in [0, 1, 2]]
obj1 = sum(sum1)
obj2 = sum(sum2)
return isfeas, (obj1, obj2)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.