content
stringlengths 5
1.05M
|
|---|
'''
The MIT License(MIT)
Copyright(c) 2016 Copyleaks LTD (https://copyleaks.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
from enum import IntEnum
import json
class Start:
def get_trigger(self):
'''
A list of scans that you submitted for a check-credits scan and that you would like to submit for a full scan.
'''
return self.trigger
def set_trigger(self, value):
'''
A list of scans that you submitted for a check-credits scan and that you would like to submit for a full scan.
Parameters:
value: String list.
'''
assert value
self.trigger = value
def get_error_handling(self):
'''
When set to ignore (ignore = 1) the trigger scans will start running even if some of them are in error mode, when set to cancel (cancel = 0) the request will be cancelled if any error was found.
'''
return self.errorHandling
def set_error_handling(self, value):
'''
When set to ignore (ignore = 1) the trigger scans will start running even if some of them are in error mode, when set to cancel (cancel = 0) the request will be cancelled if any error was found.
Parameters:
value: `StartErrorHandling`.
'''
assert value in StartErrorHandling
self.errorHandling = value
def toJSON(self):
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
class StartErrorHandling(IntEnum):
Cancel = 0
Ignore = 1
|
class BaseClient:
pass
|
import datetime
import io
import os
import flask
import subprocess
from app.api import api
@api.route('/config/<name>', methods=['GET'])
def get_config(name: str):
"""
Reads the file with the corresponding name that was passed.
:param name: Configuration file name
:type name: str
:return: Rendered HTML document with content of the configuration file.
:rtype: str
"""
nginx_path = flask.current_app.config['MAIN_CONFIG_PATH']
with io.open(os.path.join(nginx_path, name), 'r') as f:
_file = f.read()
return flask.render_template('config.html', name=name, file=_file), 200
@api.route('/config/<name>', methods=['POST'])
def post_config(name: str):
"""
Accepts the customized configuration and saves it in the configuration file with the supplied name.
:param name: Configuration file name
:type name: str
:return:
:rtype: werkzeug.wrappers.Response
"""
content = flask.request.get_json()
nginx_path = flask.current_app.config['MAIN_CONFIG_PATH']
with io.open(os.path.join(nginx_path, name), 'w') as f:
f.write(content['file'])
return flask.make_response({'success': True}), 200
@api.route('/env/config/<env>', methods=['GET'])
def get_env_config(env: str):
"""
Reads the file with the corresponding name that was passed.
:param env: Configuration file name
:type env: str
:return: Rendered HTML document with content of the configuration file.
:rtype: str
"""
nginx_path = flask.current_app.config['NGINX_PATH']
main_config_dir = flask.current_app.config['MAIN_CONFIG_DIR']
main_config_name = flask.current_app.config['MAIN_CONFIG_NAME']
main_config_path = os.path.join(nginx_path, env, main_config_dir, main_config_name)
with io.open(main_config_path, 'r') as f:
_file = f.read()
return flask.render_template('config.html', env=env, name=main_config_name, file=_file), 200
@api.route('/env/config/<name>', methods=['POST'])
def post_env_config(name: str):
"""
Accepts the customized configuration and saves it in the configuration file with the supplied name.
:param name: Configuration file name
:type name: str
:return:
:rtype: werkzeug.wrappers.Response
"""
content = flask.request.get_json()
nginx_path = flask.current_app.config['NGINX_PATH']
main_config_dir = flask.current_app.config['MAIN_CONFIG_DIR']
# main_config_name = flask.current_app.config['MAIN_CONFIG_NAME']
env, filename = name.split('-', maxsplit=1)
main_config_path = os.path.join(nginx_path, env, main_config_dir, filename)
with io.open(main_config_path, 'w') as f:
f.write(content['file'])
return flask.make_response({'success': True}), 200
@api.route('/sync/<env>', methods=['POST'])
def sync_conf(env: str):
"""
sync domain config to correspond nginx host
:param env: environment name
:type env: str
"""
nginx_host = flask.current_app.config['NGINX_HOST_MAP'].get(env)
local_env_dir = os.path.join(flask.current_app.config['NGINX_PATH'], env,
flask.current_app.config['DOMAIN_CONFIG_DIR'])
remote_domain_dir = flask.current_app.config['REMOTE_NGINX_DOMAIN_DIR']
sync_cmd = f"scp {local_env_dir}/*.conf stops@{nginx_host}:{remote_domain_dir}"
# print(sync_cmd)
ret = subprocess.run(sync_cmd, shell=True, stderr=subprocess.PIPE)
if ret.returncode == 0:
subprocess.run(sync_cmd, shell=True)
flask.jsonify({'success': True}), 200
else:
flask.jsonify({'success': False}), 400
@api.route('/reload/<env>', methods=['POST'])
def reload_nginx(env: str):
"""
:param env: environment name
:type env: str
"""
nginx_sbin_path = flask.current_app.config['NGINX_SBIN']
nginx_sbin = os.path.join(nginx_sbin_path, 'nginx')
nginx_host = flask.current_app.config['NGINX_HOST_MAP'].get(env)
check_cmd = f"ssh stops@{nginx_host} 'sudo {nginx_sbin} -t'"
reload_cmd = f"ssh stops@{nginx_host} 'sudo {nginx_sbin} -s reload'"
# ret = subprocess.run(check_cmd, shell=True, capture_output=True) //3.6+版本支持
print(nginx_host, check_cmd)
# 3.6版本不支持capture_output
ret = subprocess.run(check_cmd, shell=True, stderr=subprocess.PIPE)
if ret.returncode == 0:
subprocess.run(reload_cmd, shell=True)
flask.jsonify({'success': True}), 200
else:
flask.jsonify({'success': False}), 400
@api.route('/domains', methods=['GET'])
def get_domains():
"""
Reads all files from the configuration file directory and checks the state of the site configuration.
:return: Rendered HTML document with the domains
:rtype: str
"""
config_path = flask.current_app.config['DOMAIN_CONFIG_PATH']
sites_available = []
sites_enabled = []
for _ in os.listdir(config_path):
if os.path.isfile(os.path.join(config_path, _)):
domain, state = _.rsplit('.', 1)
if state == 'conf':
time = datetime.datetime.fromtimestamp(os.path.getmtime(os.path.join(config_path, _)))
sites_available.append({
'name': domain,
'time': time
})
sites_enabled.append(domain)
elif state == 'disabled':
time = datetime.datetime.fromtimestamp(os.path.getmtime(os.path.join(config_path, _)))
sites_available.append({
'name': domain.rsplit('.', 1)[0],
'time': time
})
# sort sites by name
sites_available = sorted(sites_available, key=lambda _: _['name'])
return flask.render_template('domains.html', sites_available=sites_available, sites_enabled=sites_enabled), 200
@api.route('/env/domains/<env>', methods=['GET'])
def get_env_domains(env: str):
"""
Reads all files from the configuration file directory and checks the state of the site configuration.
:return: Rendered HTML document with the domains
:rtype: str
"""
nginx_path = flask.current_app.config['NGINX_PATH']
domain_dir = flask.current_app.config['DOMAIN_CONFIG_DIR']
config_path = os.path.join(nginx_path, env, domain_dir)
sites_available = []
sites_enabled = []
for _ in os.listdir(config_path):
if os.path.isfile(os.path.join(config_path, _)):
domain, state = _.rsplit('.', 1)
if state == 'conf':
time = datetime.datetime.fromtimestamp(os.path.getmtime(os.path.join(config_path, _)))
sites_available.append({
'name': domain,
'time': time
})
sites_enabled.append(domain)
elif state == 'disabled':
time = datetime.datetime.fromtimestamp(os.path.getmtime(os.path.join(config_path, _)))
sites_available.append({
'name': domain.rsplit('.', 1)[0],
'time': time
})
# sort sites by name
sites_available = sorted(sites_available, key=lambda _: _['name'])
return flask.render_template('domains.html', env=env, sites_available=sites_available, sites_enabled=sites_enabled), 200
@api.route('/domain/<name>', methods=['GET'])
def get_domain(name: str):
"""
Takes the name of the domain configuration file and
returns a rendered HTML with the current configuration of the domain.
:param name: The domain name that corresponds to the name of the file.
:type name: str
:return: Rendered HTML document with the domain
:rtype: str
"""
config_path = flask.current_app.config['NGINX_PATH']
_file = ''
enabled = True
for _ in os.listdir(config_path):
if os.path.isfile(os.path.join(config_path, _)):
if _.startswith(name):
domain, state = _.rsplit('.', 1)
if state == 'disabled':
enabled = False
with io.open(os.path.join(config_path, _), 'r') as f:
_file = f.read()
break
return flask.render_template('domain.html', name=name, file=_file, enabled=enabled), 200
@api.route('/domain/<name>', methods=['POST'])
def post_domain(name: str):
"""
Creates the configuration file of the domain.
:param name: The domain name that corresponds to the name of the file.
:type name: str
:return: Returns a status about the success or failure of the action.
"""
config_path = flask.current_app.config['DOMAIN_CONFIG_PATH']
new_domain = flask.render_template('new_domain.j2', name=name)
name = name + '.conf.disabled'
try:
with io.open(os.path.join(config_path, name), 'w') as f:
f.write(new_domain)
response = flask.jsonify({'success': True}), 201
except Exception as ex:
response = flask.jsonify({'success': False, 'error_msg': ex}), 500
return response
@api.route('/domain/<name>', methods=['DELETE'])
def delete_domain(name: str):
"""
Deletes the configuration file of the corresponding domain.
:param name: The domain name that corresponds to the name of the file.
:type name: str
:return: Returns a status about the success or failure of the action.
"""
config_path = flask.current_app.config['DOMAIN_CONFIG_PATH']
removed = False
for _ in os.listdir(config_path):
if os.path.isfile(os.path.join(config_path, _)):
if _.startswith(name):
remove_app_path = os.path.join(config_path, _)
os.rename(remove_app_path, remove_app_path + ".bak")
removed = not os.path.exists(remove_app_path)
break
if removed:
return flask.jsonify({'success': True}), 200
else:
return flask.jsonify({'success': False}), 400
@api.route('/domain/<name>', methods=['PUT'])
def put_domain(name: str):
"""
Updates the configuration file with the corresponding domain name.
:param name: The domain name that corresponds to the name of the file.
:type name: str
:return: Returns a status about the success or failure of the action.
"""
content = flask.request.get_json()
config_path = flask.current_app.config['DOMAIN_CONFIG_PATH']
for _ in os.listdir(config_path):
if os.path.isfile(os.path.join(config_path, _)):
if _.startswith(name):
with io.open(os.path.join(config_path, _), 'w') as f:
f.write(content['file'])
return flask.make_response({'success': True}), 200
@api.route('/domain/<name>/enable', methods=['POST'])
def enable_domain(name: str):
"""
Activates the domain in Nginx so that the configuration is applied.
:param name: The domain name that corresponds to the name of the file.
:type name: str
:return: Returns a status about the success or failure of the action.
"""
content = flask.request.get_json()
config_path = flask.current_app.config['DOMAIN_CONFIG_PATH']
for _ in os.listdir(config_path):
if os.path.isfile(os.path.join(config_path, _)):
if _.startswith(name):
if content['enable']:
new_filename, disable = _.rsplit('.', 1)
os.rename(os.path.join(config_path, _), os.path.join(config_path, new_filename))
else:
os.rename(os.path.join(config_path, _), os.path.join(config_path, _ + '.disabled'))
return flask.make_response({'success': True}), 200
@api.route('/env/domain/<name>', methods=['GET'])
def get_env_domain(name: str):
"""
Takes the name of the domain configuration file and
returns a rendered HTML with the current configuration of the domain.
:param name: The domain name that corresponds to the name of the file.
:type name: str
:return: Rendered HTML document with the domain
:rtype: str
"""
nginx_path = flask.current_app.config['NGINX_PATH']
domain_dir = flask.current_app.config['DOMAIN_CONFIG_DIR']
env, filename = name.split('-')
config_path = os.path.join(nginx_path, env, domain_dir)
_file = ''
enabled = True
for _ in os.listdir(config_path):
if os.path.isfile(os.path.join(config_path, _)):
if _.startswith(filename):
domain, state = _.rsplit('.', 1)
if state == 'disabled':
enabled = False
with io.open(os.path.join(config_path, _), 'r') as f:
_file = f.read()
break
return flask.render_template('domain.html', name=filename, file=_file, enabled=enabled), 200
@api.route('/env/domain/<name>', methods=['POST'])
def post_env_domain(name: str):
"""
Creates the configuration file of the domain.
:param name: The domain name that corresponds to the name of the file.
:type name: str
:return: Returns a status about the success or failure of the action.
"""
# config_path = flask.current_app.config['DOMAIN_CONFIG_PATH']
nginx_path = flask.current_app.config['NGINX_PATH']
domain_dir = flask.current_app.config['DOMAIN_CONFIG_DIR']
env, filename = name.split('-')
config_path = os.path.join(nginx_path, env, domain_dir)
new_domain = flask.render_template('new_domain.j2', name=filename)
filename = filename + '.conf.disabled'
try:
with io.open(os.path.join(config_path, filename), 'w') as f:
f.write(new_domain)
response = flask.jsonify({'success': True}), 201
except Exception as ex:
response = flask.jsonify({'success': False, 'error_msg': ex}), 500
return response
@api.route('/env/domain/<name>', methods=['DELETE'])
def delete_env_domain(name: str):
"""
Deletes the configuration file of the corresponding domain.
:param name: The domain name that corresponds to the name of the file.
:type name: str
:return: Returns a status about the success or failure of the action.
"""
# config_path = flask.current_app.config['DOMAIN_CONFIG_PATH']
nginx_path = flask.current_app.config['NGINX_PATH']
domain_dir = flask.current_app.config['DOMAIN_CONFIG_DIR']
env, filename = name.split('-', maxsplit=1)
config_path = os.path.join(nginx_path, env, domain_dir)
removed = False
for _ in os.listdir(config_path):
if os.path.isfile(os.path.join(config_path, _)):
if _.startswith(filename):
remove_app_path = os.path.join(config_path, _)
os.rename(remove_app_path, remove_app_path + ".bak")
removed = not os.path.exists(remove_app_path)
break
if removed:
return flask.jsonify({'success': True}), 200
else:
return flask.jsonify({'success': False}), 400
@api.route('/env/domain/<name>', methods=['PUT'])
def put_env_domain(name: str):
"""
Updates the configuration file with the corresponding domain name.
:param name: The domain name that corresponds to the name of the file.
:type name: str
:return: Returns a status about the success or failure of the action.
"""
content = flask.request.get_json()
# config_path = flask.current_app.config['DOMAIN_CONFIG_PATH']
nginx_path = flask.current_app.config['NGINX_PATH']
domain_dir = flask.current_app.config['DOMAIN_CONFIG_DIR']
env, filename = name.split('-', maxsplit=1)
config_path = os.path.join(nginx_path, env, domain_dir)
for _ in os.listdir(config_path):
if os.path.isfile(os.path.join(config_path, _)):
if _.startswith(filename):
with io.open(os.path.join(config_path, _), 'w') as f:
f.write(content['file'])
return flask.make_response({'success': True}), 200
@api.route('/env/domain/<name>/enable', methods=['POST'])
def enable_env_domain(name: str):
"""
Activates the domain in Nginx so that the configuration is applied.
:param name: The domain name that corresponds to the name of the file.
:type name: str
:return: Returns a status about the success or failure of the action.
"""
content = flask.request.get_json()
# config_path = flask.current_app.config['DOMAIN_CONFIG_PATH']
nginx_path = flask.current_app.config['NGINX_PATH']
domain_dir = flask.current_app.config['DOMAIN_CONFIG_DIR']
env, filename = name.split('-', maxsplit=1)
config_path = os.path.join(nginx_path, env, domain_dir)
for _ in os.listdir(config_path):
if os.path.isfile(os.path.join(config_path, _)):
if _.startswith(filename):
if content['enable']:
new_filename, disable = _.rsplit('.', 1)
os.rename(os.path.join(config_path, _), os.path.join(config_path, new_filename))
else:
os.rename(os.path.join(config_path, _), os.path.join(config_path, _ + '.disabled'))
return flask.make_response({'success': True}), 200
|
import requests
from stable_baselines.common.env_checker import check_env
from stable_baselines.common.evaluation import evaluate_policy
from io import BytesIO
import PIL.Image
from IPython.display import display,clear_output,HTML
from IPython.display import Image as DisplayImage
import base64
import json
from io import StringIO
import ipywidgets as widgets
import sys
from plark_game import classes
import time
import imageio
import numpy as np
import matplotlib.pyplot as plt
import io
import os, sys
import helper
import logging
from gym_plark.envs import plark_env,plark_env_guided_reward,plark_env_top_left
import datetime
from stable_baselines import DQN, PPO2, A2C, ACKTR
from stable_baselines.bench import Monitor
from stable_baselines.common.vec_env import DummyVecEnv
%matplotlib inline
%load_ext autoreload
%autoreload 2
display(HTML(data="""
<style>
div#notebook-container { width: 95%; }
div#menubar-container { width: 65%; }
div#maintoolbar-container { width: 99%; }
</style>
"""))
def retrain(mean_reward, target_reward, count, env, model):
model.set_env(env)
if mean_reward < target_reward:
count = count + 1
model.learn(50)
mean_reward, n_steps = evaluate_policy(model, env, n_eval_episodes=1, deterministic=False, render=False, callback=None, reward_threshold=None, return_episode_rewards=False)
if mean_reward < target_reward:
retrain(mean_reward, target_reward, count, env, model)
return True
if __name__ == "__main__":
## The basepath of where the models are to be stored.
basepath = '/data/agents/models'
## Define the game configuration file names to be used in the multi-stage training approach.
very_easy_config = "/Components/plark-game/plark_game/game_config/10x10/pelican_very_easy.json"
easy_config = "/Components/plark-game/plark_game/game_config/10x10/pelican_easy.json"
medium_config = "/Components/plark-game/plark_game/game_config/10x10/pelican_medium.json"
## Define the logging level
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
## Define the player type we are training.
modelplayer = "PELICAN"
## Define the type of RL algorithm you are using.
modeltype = "DQN"
## Used the format to get date
basicdate = str(datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
## Stage thresholds these are based on mean reward values which are calculated when evaluating the module
## These will be switched out if using a custom evaluation process.
stage_one_threshold = 5
stage_two_threshold = 7
stage_three_threshold = 10
print("Stage 1 Training Started")
env = plark_env_guided_reward.PlarkEnvGuidedReward(config_file_path=very_easy_config)
model = DQN('CnnPolicy', env)
model.learn(50)
logger.info('STARTING STAGE 1 INITIAL EVALUATION')
stg1_mean_reward, n_steps = evaluate_policy(model, env, n_eval_episodes=1, deterministic=False, render=False, callback=None, reward_threshold=None, return_episode_rewards=False)
logger.info('FINISHING STAGE 1 INITIAL EVALUATION')
stage1result = retrain(stg1_mean_reward, stage_one_threshold, 0 ,env, model)
logger.info("Stage One Threshold Met")
if stage1result == True:
logger.info("Stage 2 Training Started")
env = plark_env_guided_reward.PlarkEnvGuidedReward(config_file_path=easy_config)
model.set_env(env)
model.learn(50)
logger.info('STARTING STAGE 2 INITIAL EVALUATION')
stg2_mean_reward, n_steps = evaluate_policy(model, env, n_eval_episodes=1, deterministic=False, render=False, callback=None, reward_threshold=None, return_episode_rewards=False)
logger.info('FINISHING STAGE 2 INITIAL EVALUATION')
stage2result = retrain(stg2_mean_reward, stage_two_threshold, 0 ,env, model)
logger.info("Stage Two Threshold Met")
if stage2result == True:
logger.info("Stage 3 Training Started")
env = plark_env_guided_reward.PlarkEnvGuidedReward(config_file_path=medium_config)
model.set_env(env)
model.learn(50)
logger.info('STARTING STAGE 3 EVALUATION')
stg3_mean_reward, n_steps = evaluate_policy(model, env, n_eval_episodes=1, deterministic=False, render=False, callback=None, reward_threshold=None, return_episode_rewards=False)
logger.info('FINISHED STAGE 3 EVALUATION')
stage3result = retrain(stg3_mean_reward, stage_three_threshold, 0 ,env, model)
if stage3result == True:
logger.info("Stage Three Threshold Met")
logger.info("Multi-Stage-Training-Complete")
model_path,model_dir, modellabel = helper.save_model_with_env_settings(basepath,model,modeltype,env,basicdate)
|
import pandas
from .constants import DOUBLE, INTEGER, STRING
from .lib import ffi, lib
def encode_odb(df: pandas.DataFrame, f, types: dict = None, rows_per_frame=10000, properties=None, **kwargs):
"""
Encode a pandas dataframe into ODB2 format
:param df: The dataframe to encode
:param f: The file-like object into which to encode the ODB2 data
:param types: An optional (sparse) dictionary. Each key-value pair maps the name of a column to
encode to an ODB2 data type to use to encode it.
:param rows_per_frame: The maximum number of rows to encode per frame. If this number is exceeded,
a sequence of frames will be encoded
:param kwargs: Accept extra arguments that may be used by the python pyodc encoder.
:return:
"""
if isinstance(f, str):
with open(f, "wb") as freal:
return encode_odb(df, freal, types=types, rows_per_frame=rows_per_frame, properties=properties, **kwargs)
# Some constants that are useful
pmissing_integer = ffi.new("long*")
pmissing_double = ffi.new("double*")
lib.odc_missing_integer(pmissing_integer)
lib.odc_missing_double(pmissing_double)
missing_integer = pmissing_integer[0]
missing_double = pmissing_double[0]
def infer_column_type(arr, override_type):
"""
Given a column of data, infer the encoding type.
:param arr: The column of data to encode
:param override_type:
:return: (return_arr, dtype)
- return_arr is the column of data to encode. This may be of a different internal type/contents
to that supplied to the function, but it will normally not be.
- The ODB2 type to encode with.
"""
return_arr = arr
dtype = override_type
if dtype is None:
if arr.dtype in ("uint64", "int64"):
dtype = INTEGER
elif arr.dtype == "float64":
if not data.isnull().all() and all(pandas.isnull(v) or float(v).is_integer() for v in arr):
dtype = INTEGER
return_arr = arr.fillna(value=missing_integer).astype("int64")
else:
dtype = DOUBLE
return_arr = arr.fillna(value=missing_double)
elif arr.dtype == "object":
if not arr.isnull().all() and all(s is None or isinstance(s, str) for s in arr):
dtype = STRING
elif arr.isnull().all():
dtype = INTEGER
if arr.dtype == "object":
# Map strings into an array that can be read in C
if dtype == STRING:
return_arr = return_arr.astype("|S{}".format(max(8, 8 * (1 + ((max(len(s) for s in arr) - 1) // 8)))))
elif dtype == INTEGER:
return_arr = return_arr.fillna(value=missing_integer).astype("int64")
if dtype is None:
raise ValueError("Unsupported value type: {}".format(arr.dtype))
return return_arr, dtype
nrows = df.shape[0]
if types is None:
types = {}
encoder = ffi.new("odc_encoder_t**")
lib.odc_new_encoder(encoder)
encoder = ffi.gc(encoder[0], lib.odc_free_encoder)
for k, v in (properties or {}).items():
lib.odc_encoder_add_property(encoder, k.encode("utf-8"), v.encode("utf-8"))
lib.odc_encoder_set_row_count(encoder, nrows)
lib.odc_encoder_set_rows_per_frame(encoder, rows_per_frame)
# We store all of the numpy arrays here. Mostly this is just another reference to an
# existing array, but some of the types require us to create a new (casted) copy, so
# we need to put it somewhere to ensure it stays alive appropriately long.
data_cache = []
for i, (name, data) in enumerate(df.items()):
data, dtype = infer_column_type(data, types.get(name, None))
data_cache.append(data)
lib.odc_encoder_add_column(encoder, name.encode("utf-8"), dtype)
lib.odc_encoder_column_set_data_array(
encoder,
i,
data.dtype.itemsize,
data.array.to_numpy().strides[0],
ffi.cast("void*", data.values.ctypes.data),
)
lib.odc_encode_to_file_descriptor(encoder, f.fileno(), ffi.NULL)
|
# -*- coding: utf-8 -*-
# from .function import coupled_logarithm as log, \
# coupled_exponential as exp, \
# coupled_entropy as entropy, \
# coupled_cross_entropy as cross_entropy, \
# coupled_kl_divergence as kl_divergence, \
# tsallis_entropy, shannon_entropy
|
# Generated by Django 3.1.7 on 2021-03-17 01:30
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('chat', '0005_auto_20210316_2206'),
]
operations = [
migrations.AlterField(
model_name='user',
name='friends',
field=models.ManyToManyField(blank=True, related_name='friends_set', to='chat.User'),
),
migrations.CreateModel(
name='UnreadMessage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('message', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='unread', to='chat.message')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='unread', to='chat.user')),
],
options={
'db_table': 'unread_message',
},
),
]
|
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from orquesta import exceptions as exc
from orquesta.expressions.functions import workflow as funcs
class ItemFunctionTest(unittest.TestCase):
def test_missing_current_item(self):
self.assertRaises(exc.ExpressionEvaluationException, funcs.item_, {})
def test_item_is_null(self):
context = {'__current_item': None}
self.assertIsNone(funcs.item_(context))
def test_item(self):
context = {'__current_item': 'foobar'}
self.assertEqual(funcs.item_(context), 'foobar')
def test_item_is_dict(self):
context = {'__current_item': {'foo': 'bar'}}
self.assertDictEqual(funcs.item_(context), {'foo': 'bar'})
def test_item_is_not_dict(self):
context = {'__current_item': 'foobar'}
self.assertRaises(exc.ExpressionEvaluationException, funcs.item_, context, key='foo')
def test_item_key(self):
context = {'__current_item': {'foo': 'bar'}}
self.assertEqual(funcs.item_(context, key='foo'), 'bar')
def test_item_bad_key(self):
context = {'__current_item': {'foo': 'bar'}}
self.assertRaises(exc.ExpressionEvaluationException, funcs.item_, context, key='bar')
|
import os
import numpy as np
from skimage.measure import compare_ssim as ssim
from sklearn.metrics import mean_squared_error
import matplotlib as mpl
mpl.use('Agg')
from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint
from keras.optimizers import Adam
from keras import backend as K
from utils.MotionCorrection.network_block import encode, encode_shared, decode
from utils.MotionCorrection.customLoss import *
from utils.Unpatching import *
from utils.MotionCorrection.plot import *
def createModel(patchSize, dHyper):
# input corrupted and non-corrupted image
x_ref = Input(shape=(1, patchSize[0], patchSize[1], patchSize[2]))
x_art = Input(shape=(1, patchSize[0], patchSize[1], patchSize[2]))
# create respective encoders
encoded_ref = encode(x_ref, patchSize)
encoded_art = encode(x_art, patchSize)
# concatenate the encoded features together
combined = concatenate([encoded_ref, encoded_art], axis=0)
# create the shared encoder
z, z_mean, z_log_var = encode_shared(combined, patchSize)
# create the decoder
decoded = decode(z, patchSize, dHyper['dropout'])
# separate the concatenated images
decoded_ref2ref = Lambda(lambda input: input[:input.shape[0]//2, :, :, :, :], output_shape=(1, patchSize[0], patchSize[1], patchSize[2]))(decoded)
decoded_art2ref = Lambda(lambda input: input[input.shape[0]//2:, :, :, :, :], output_shape=(1, patchSize[0], patchSize[1], patchSize[2]))(decoded)
# generate the VAE and encoder model
vae = Model([x_ref, x_art], [decoded_ref2ref, decoded_art2ref])
# compute kl loss
loss_kl = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
vae.add_loss(dHyper['kl_weight'] * K.mean(loss_kl))
# compute pixel to pixel loss
loss_ref2ref, loss_art2ref = compute_mse_loss(dHyper, x_ref, decoded_ref2ref, decoded_art2ref)
vae.add_loss(dHyper['mse_weight'] * (dHyper['loss_ref2ref']*loss_ref2ref + dHyper['loss_art2ref']*loss_art2ref))
# add perceptual loss
perceptual_loss_ref2ref, perceptual_loss_art2ref = compute_perceptual_loss(x_ref, decoded_ref2ref, decoded_art2ref, patchSize, dHyper['pl_network'], dHyper['loss_model'])
vae.add_loss(dHyper['perceptual_weight'] * (dHyper['loss_ref2ref']*perceptual_loss_ref2ref + dHyper['loss_art2ref']*perceptual_loss_art2ref))
return vae
def fTrain(dData, dParam, dHyper):
# parse inputs
batchSize = [128] if dParam['batchSize'] is None else dParam['batchSize']
learningRate = [0.001] if dParam['learningRate'] is None else dParam['learningRate']
epochs = 300 if dParam['epochs'] is None else dParam['epochs']
for iBatch in batchSize:
for iLearn in learningRate:
fTrainInner(dData, dParam['sOutPath'], dParam['patchSize'], epochs, iBatch, iLearn, dHyper)
def fTrainInner(dData, sOutPath, patchSize, epochs, batchSize, lr, dHyper):
train_ref = dData['train_ref']
train_art = dData['train_art']
test_ref = dData['test_ref']
test_art = dData['test_art']
train_ref = np.expand_dims(train_ref, axis=1)
train_art = np.expand_dims(train_art, axis=1)
test_ref = np.expand_dims(test_ref, axis=1)
test_art = np.expand_dims(test_art, axis=1)
vae = createModel(patchSize, dHyper)
vae.compile(optimizer=Adam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0), loss=None)
vae.summary()
print('Training with epochs {} batch size {} learning rate {}'.format(epochs, batchSize, lr))
weights_file = sOutPath + os.sep + 'vae_weight_ps_{}_bs_{}_lr_{}_{}.h5'.format(patchSize[0], batchSize, lr, dHyper['test_patient'])
lossPlot_file = weights_file[:-3] + '.png'
plotLoss = PlotLosses(lossPlot_file)
callback_list = []
# callback_list = [EarlyStopping(monitor='val_loss', patience=5, verbose=1)]
callback_list.append(ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=1, min_lr=0, verbose=1))
callback_list.append(ModelCheckpoint(weights_file, monitor='val_loss', verbose=1, period=1, save_best_only=True, save_weights_only=True))
callback_list.append(plotLoss)
history = vae.fit([train_ref, train_art],
shuffle=True,
epochs=epochs,
batch_size=batchSize,
validation_data=([test_ref, test_art], None),
verbose=1,
callbacks=callback_list)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig(weights_file[:-3] + '.png')
def fPredict(test_ref, test_art, dParam, dHyper):
weights_file = dParam['sOutPath'] + os.sep + '{}.h5'.format(dHyper['bestModel'])
patchSize = dParam['patchSize']
vae = createModel(patchSize, dHyper)
vae.compile(optimizer='adam', loss=None)
vae.load_weights(weights_file)
test_ref = np.expand_dims(test_ref, axis=1)
test_art = np.expand_dims(test_art, axis=1)
predict_ref, predict_art = vae.predict([test_ref, test_art], dParam['batchSize'][0], verbose=1)
test_ref = np.squeeze(test_ref, axis=1)
test_art = np.squeeze(test_art, axis=1)
predict_art = np.squeeze(predict_art, axis=1)
if dHyper['unpatch']:
test_ref = fRigidUnpatchingCorrection3D(dHyper['actualSize'], test_ref, dParam['patchOverlap'])
test_art = fRigidUnpatchingCorrection3D(dHyper['actualSize'], test_art, dParam['patchOverlap'])
predict_art = fRigidUnpatchingCorrection3D(dHyper['actualSize'], predict_art, dParam['patchOverlap'], mode='average')
if dHyper['evaluate']:
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(10, 5), sharex=True, sharey=True)
ax = axes.ravel()
plt.gray()
label = 'MSE: {:.2f}, SSIM: {:.2f}'
for i in range(test_ref.shape[0]):
ax[0].imshow(test_ref[i])
ax[0].set_xlabel(label.format(mean_squared_error(test_ref[i], test_ref[i]), ssim(test_ref[i], test_ref[i], data_range=(test_ref[i].max() - test_ref[i].min()))))
ax[0].set_title('reference image')
ax[1].imshow(test_art[i])
ax[1].set_xlabel(label.format(mean_squared_error(test_ref[i], test_art[i]), ssim(test_ref[i], test_art[i], data_range=(test_art[i].max() - test_art[i].min()))))
ax[1].set_title('motion-affected image')
ax[2].imshow(predict_art[i])
ax[2].set_xlabel(label.format(mean_squared_error(test_ref[i], predict_art[i]), ssim(test_ref[i], predict_art[i], data_range=(predict_art[i].max() - predict_art[i].min()))))
ax[2].set_title('corrected image')
if dParam['lSave']:
plt.savefig(dParam['sOutPath'] + os.sep + 'result' + os.sep + str(i) + '.png')
else:
plt.show()
else:
plt.figure()
plt.gray()
for i in range(predict_art.shape[0]):
plt.imshow(predict_art[i])
if dParam['lSave']:
plt.savefig(dParam['sOutPath'] + os.sep + 'result' + os.sep + str(i) + '.png')
else:
plt.show()
else:
nPatch = predict_art.shape[0]
for i in range(nPatch//4):
fig, axes = plt.subplots(nrows=4, ncols=2)
plt.gray()
cols_title = ['original_art', 'predicted_art']
for ax, col in zip(axes[0], cols_title):
ax.set_title(col)
for j in range(4):
axes[j, 0].imshow(test_art[4*i+j])
axes[j, 1].imshow(predict_art[4*i+j])
if dParam['lSave']:
plt.savefig(dParam['sOutPath'] + os.sep + 'result' + os.sep + str(i) + '.png')
else:
plt.show()
|
import os
"""
Basic config
"""
APP_ID = "seckill-order"
HOST = os.environ.get("SERVER_HOST", None)
PORT = int(os.environ.get("SERVER_PORT", 8505))
TRACE_ALL = os.environ.get("TRACE_ALL", False)
JAEGER_HOST = os.environ.get("JAEGER_HOST", "localhost")
DB_CONFIG = {
"host": os.environ.get("MYSQL_SERVICE_HOST", "localhost"),
"user": os.environ.get("MYSQL_SERVICE_USER", "mysql"),
"password": os.environ.get("MYSQL_SERVICE_PASSWORD", None),
"port": int(os.environ.get("MYSQL_SERVICE_PORT", 3306)),
"database": os.environ.get("MYSQL_SERVICE_DB_NAME", "seckill_order"),
}
SWAGGER = {
"version": "1.0.0",
"title": "SECKILL ORDER API",
"description": "SECKILL ORDER SERVICE API BASED ON SANIC",
"terms_of_service": "Use with caution!",
"termsOfService": ["application/json"],
"contact_email": "shady@camfire.com",
}
ACCESS_CONTROL_ALLOW_ORIGIN = os.environ.get("ACCESS_CONTROL_ALLOW_ORIGIN", "")
ACCESS_CONTROL_ALLOW_HEADERS = os.environ.get("ACCESS_CONTROL_ALLOW_HEADERS", "")
ACCESS_CONTROL_ALLOW_METHODS = os.environ.get("ACCESS_CONTROL_ALLOW_METHODS", "")
CONSUL_AGENT_HOST = os.environ.get("CONSUL_AGENT_HOST", "127.0.0.1")
CONSUL_AGENT_PORT = os.environ.get("CONSUL_AGENT_PORT", 8500)
|
from app.api import blunder, comment, feedback, glob, session, user, pack
|
from keycloak import KeycloakOpenID
from pm4pyws.user_iam.interface.user_management import UserManagement
import traceback
class KeycloakUserManagement(UserManagement):
def __init__(self, ex, parameters=None):
if parameters is None:
parameters = {}
self.ex = ex
self.server_url = parameters["server_url"]
self.client_id = parameters["client_id"]
self.realm_name = parameters["realm_name"]
self.client_secret_key = parameters["client_secret_key"]
self.keycloak_manager = KeycloakOpenID(server_url=self.server_url, client_id=self.client_id,
realm_name=self.realm_name, client_secret_key=self.client_secret_key)
UserManagement.__init__(self, ex)
def do_login(self, user, password):
"""
Logs in a user and returns a session id
Parameters
------------
user
Username
password
Password
Returns
------------
session_id
Session ID
"""
try:
token = self.keycloak_manager.token(user, password)
return token['access_token']
except:
# traceback.print_exc()
pass
return None
def check_session_validity(self, session_id):
"""
Checks the validity of a session
Parameters
------------
session_id
Session ID
Returns
------------
boolean
Boolean value
"""
validity = False
try:
if not (str(session_id) == "null"):
userinfo = self.keycloak_manager.userinfo(session_id)
if type(userinfo["preferred_username"]) is str:
if userinfo["preferred_username"]:
validity = True
except:
# traceback.print_exc()
pass
return validity
def get_user_from_session(self, session_id):
"""
Gets the user from the session
Parameters
------------
session_id
Session ID
Returns
------------
user
User ID
"""
user = None
try:
if not (str(session_id) == "null"):
userinfo = self.keycloak_manager.userinfo(session_id)
if type(userinfo["preferred_username"]) is str:
if userinfo["preferred_username"]:
user = userinfo["preferred_username"]
except:
# traceback.print_exc()
pass
return user
|
# Copyright 2014 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask.ext.login import current_user, logout_user
from flask.ext.restful import Resource
# End the Flask-Logins session
class Logout(Resource):
def __init__(self):
super(Logout, self).__init__()
def get(self):
if not current_user.is_authenticated():
return "Must be logged in to log out", 200
logout_user()
return "Logged Out", 200
|
from django.apps import AppConfig
class WyrUsersConfig(AppConfig):
name = 'WYR_Users'
|
def ler_numeros():
numeros = [] # numeros = [ 4.0, 5.0 ]
while True:
numero = input('Digite um número ou aperte enter para finalizar: ')
if numero == '':
break
else:
numero = float(numero)
numeros.append(numero)
return numeros
numeros = ler_numeros()
quantidade_de_numeros = len(numeros) # quantidade_de_numeros = 2
print(f'Quantidade de numeros inseridos: {quantidade_de_numeros}')
if quantidade_de_numeros == 0:
print('Não é possível calcular média e máximo pois não foi inserido nenhum valor')
else:
candidato_a_maximo = numeros[0] # candidato_a_maximo = 2
for n in numeros: # n = 1
if candidato_a_maximo < n: # [1, 2, 1]
candidato_a_maximo = n
print(f'Valor máximo inserido foi {candidato_a_maximo}')
media = sum(numeros) / quantidade_de_numeros # 9 / 2 = 4.5
print(f'A média dos valores inserido é {media}')
|
import sys
sys.path.append('/Users/phanquochuy/Projects/minimind/prototypes/george/build/lib.macosx-10.10-x86_64-2.7')
import numpy as np
import george
from george.kernels import ExpSquaredKernel
# Generate some fake noisy data.
x = 10 * np.sort(np.random.rand(10))
yerr = 0.2 * np.ones_like(x)
y = np.sin(x) + yerr * np.random.randn(len(x))
# Set up the Gaussian process.
kernel = ExpSquaredKernel(1.0)
gp = george.GP(kernel)
# Pre-compute the factorization of the matrix.
gp.compute(x, yerr)
gp.optimize(x, y, verbose=True)
# Compute the log likelihood.
print(gp.lnlikelihood(y))
t = np.linspace(0, 10, 500)
mu, cov = gp.predict(y, t)
std = np.sqrt(np.diag(cov))
|
import discord
from discord.ext import commands
class AntiExe(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_message(self, ctx):
x = ctx.attachments
for i in x:
if i.content_type == "application/x-msdos-program" or i.content_type == "application/rar":
await ctx.delete()
try:
await ctx.guild.ban(ctx.author, reason="anti exe file")
except discord.errors.Forbidden:
return
def setup(bot):
bot.add_cog(AntiExe(bot))
|
'''
Test that demo notebook cells execute without errors
Using https://www.thedataincubator.com/blog/2016/06/09/testing-jupyter-notebooks/
'''
import os
from pathlib import Path
import subprocess
import tempfile
import pytest
import nbformat
# Assumes test are called from project root directory
notebook_dir = Path(os.getcwd()) / 'notebooks'
@pytest.mark.parametrize(
'notebook_path',
[
notebook_dir / 'bayesian-network.ipynb',
notebook_dir / 'conditional-probability-tables-with-constraints.ipynb'
]
)
def test_ipynb(notebook_path):
nb, errors = _notebook_run(notebook_path)
assert errors == []
def _notebook_run(path):
"""
Execute a notebook via nbconvert and collect output.
"""
with tempfile.NamedTemporaryFile(suffix=".ipynb") as fout:
args = [
"python", "-m", "nbconvert", "--to", "notebook", "--execute",
"--ExecutePreprocessor.timeout=60",
"--output", fout.name, path
]
subprocess.check_call(args)
fout.seek(0)
nb = nbformat.read(fout, nbformat.current_nbformat)
errors = [
output for cell in nb.cells if "outputs" in cell
for output in cell["outputs"] if output.output_type == "error"
]
return nb, errors
|
import factory
from eth_account import Account
from factory.django import DjangoModelFactory
from gnosis.eth.tests.clients.mocks import sourcify_safe_metadata
from ..models import Contract, ContractAbi
class ContractAbiFactory(DjangoModelFactory):
class Meta:
model = ContractAbi
abi = sourcify_safe_metadata['output']['abi']
description = 'Gnosis Safe v1.2.0 ABI'
relevance = 1
class ContractFactory(DjangoModelFactory):
class Meta:
model = Contract
address = factory.LazyFunction(lambda: Account.create().address)
name = factory.Faker('cryptocurrency_name')
display_name = ''
logo = factory.django.ImageField(color='green')
contract_abi = factory.SubFactory(ContractAbiFactory)
|
#Write a function which accepts an input string consisting of alphabetic
#characters and returns the string with all the spaces removed.
#Do NOT use any string methods for this problem.
def remove_spaces(line):
final_line = ""
for x in line:
if x != " ":
final_line += x
return final_line
print(remove_spaces("The Hobbit es una muy buena pelicula"))
|
# !/usr/bin/env python
# !-*-coding:utf-8-*-
import time
from django.utils.deprecation import MiddlewareMixin
from django.core.cache import cache
from utils.tools import my_response, Logger
# class PermissionCheck(MiddlewareMixin):
# """
# 中间件,用于检查请求权限
# """
# cookie_time = 2 * 3600
# @staticmethod
# def process_request(request):
# """
# :param request:
# :return:
# """
# # print "start", time.time()
# if "login" in request.path:
# return
# # request.COOKIES["sid"] = "9342c00a6cb65a2d35e2bd48cc2ab163"
# sid = request.COOKIES.get("sid")
# content = cache.get(sid)
# if content:
# username = content.get("username")
# Logger.debug("{0}: request, url is: {1}".format(username, request.path.encode("utf-8")))
# request.COOKIES["username"] = username
# else:
# return my_response(code=-1, msg="登录超时!")
# @staticmethod
# def process_response(request, response):
# sid = request.COOKIES.get("sid")
# if sid and "logout" not in request.path:
# cache.expire(sid, timeout=PermissionCheck.cookie_time)
# response.set_cookie("sid", sid, max_age=PermissionCheck.cookie_time - 10)
# # print "end time", time.time()
# return response
class PrintCheck(MiddlewareMixin):
"""
中间件,用于检查请求权限
"""
cookie_time = 2 * 3600
@staticmethod
def process_request(request):
"""
:param request:
:return:
"""
# print "start", time.time()
if "login" in request.path:
return
# request.COOKIES["sid"] = "9342c00a6cb65a2d35e2bd48cc2ab163"
sid = request.COOKIES.get("sid")
content = cache.get(sid)
if content:
chinese_name = content.get("chinese_name")
Logger.debug("{0}: request, url is: {1}".format(username, request.path.encode("utf-8")))
request.COOKIES["chinese_name"] = chinese_name
else:
return my_response(code=-1, msg="登录超时!")
|
from threading import Timer
import os
import sys
from data_source.static_images import image_source
# import image_acquisition
# import load_model
import logging
log = logging.getLogger()
log.setLevel("INFO")
exists=(os.path.isfile("Inception-BN-symbol.json") and os.path.isfile("Inception-BN-0000.params") and os.path.isfile("synset.txt"))
def predict():
model_path = './'
source = image_source.ImageSource()
global_model = load_model.ImagenetModel(model_path + 'synset.txt', model_path + 'Inception-BN')
image = source.get_image()
predictions = global_model.predict_from_image(image, N=1)
predictions = global_model.predict_from_cam()
log.info('predictions: {}'.format(predictions))
result=map(lambda x: {'confidence':str(x[0]),'prediction':str(x[1])}, predictions)
print(predictions)
Timer(5, predict).start()
if not exists:
import urllib.request
urllib.request.urlretrieve ("http://data.mxnet.io/models/imagenet/inception-bn/Inception-BN-symbol.json", "Inception-BN-symbol.json")
urllib.request.urlretrieve ("http://data.mxnet.io/models/imagenet/inception-bn/Inception-BN-0126.params", "Inception-BN-0000.params")
urllib.request.urlretrieve ("http://data.mxnet.io/mxnet/models/imagenet/synset.txt", "synset.txt")
log.info("Models downloaded")
predict()
|
import re
def checkFasta(fastas):
status = True
lenList = set()
for i in fastas:
lenList.add(len(i[1]))
if len(lenList) == 1:
return True
else:
return False
def minSequenceLength(fastas):
minLen = 10000
for i in fastas:
if minLen > len(i[1]):
minLen = len(i[1])
return minLen
def minSequenceLengthWithNormalAA(fastas):
minLen = 10000
for i in fastas:
if minLen > len(re.sub('-', '', i[1])):
minLen = len(re.sub('-', '', i[1]))
return minLen
|
from flask import request
from project import db
from project.errors import bp
from project.api.errors import error_response as api_error_response
def wants_json_response():
return request.accept_mimetypes['application/json'] >= \
request.accept_mimetypes['text/html']
@bp.app_errorhandler(404)
def not_found_error(error):
return api_error_response(404)
@bp.app_errorhandler(500)
def internal_error(error):
db.session.rollback()
return api_error_response(500)
|
import cv2
import numpy as np
import os
import pandas as pd
from matplotlib import pyplot as plt
import time
import datetime
# Images Properties
def plt_show(image, title=""):
if len(image.shape) == 3:
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
plt.axis("off")
plt.title(title)
plt.imshow(image, cmap="Greys_r")
plt.show()
# face detection
class FaceDetector(object):
def __init__(self, xml_path):
self.classifier = cv2.CascadeClassifier(xml_path)
def detect(self, image, biggest_only=True):
scale_factor = 1.2
min_neighbors = 5
min_size = (75, 75)
biggest_only = True
flags = cv2.CASCADE_FIND_BIGGEST_OBJECT | cv2.CASCADE_DO_ROUGH_SEARCH if biggest_only else cv2.CASCADE_SCALE_IMAGE
faces_coord = self.classifier.detectMultiScale(image,
scaleFactor=scale_factor,
minNeighbors=min_neighbors,
minSize=min_size,
flags=flags)
return faces_coord
# Video Camera
class VideoCamera(object):
def __init__(self, index=1):
self.video = cv2.VideoCapture(index)
self.index = index
print (self.video.isOpened())
def __del__(self):
self.video.release()
def get_frame(self, in_grayscale=False):
_, frame = self.video.read()
if in_grayscale:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
return frame
# Crop Images
def cut_faces(image, faces_coord):
faces = []
for (x, y, w, h) in faces_coord:
w_rm = int(0.3 * w / 2)
faces.append(image[y: y + h, x + w_rm: x + w - w_rm])
return faces
# Normalize Images
def normalize_intensity(images):
images_norm = []
for image in images:
is_color = len(image.shape) == 3
if is_color:
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
images_norm.append(cv2.equalizeHist(image))
return images_norm
# Resize Images
def resize(images, size=(100, 100)):
images_norm = []
for image in images:
if image.shape < size:
image_norm = cv2.resize(image, size,
interpolation=cv2.INTER_AREA)
else:
image_norm = cv2.resize(image, size,
interpolation=cv2.INTER_CUBIC)
images_norm.append(image_norm)
return images_norm
# Normalize Faces
def normalize_faces(frame, faces_coord):
faces = cut_faces(frame, faces_coord)
faces = normalize_intensity(faces)
faces = resize(faces)
return faces
# Rectangle Line
def draw_rectangle(image, coords):
for (x, y, w, h) in coords:
w_rm = int(0.2 * w / 2)
cv2.rectangle(image, (x + w_rm, y), (x + w - w_rm, y + h),
(102, 255, 0), 1)
# acquire images from dataset
def collect_dataset():
images = []
labels = []
labels_dic = {}
members = [person for person in os.listdir("members/")]
for i, person in enumerate(members): # loop over
labels_dic[i] = person
for image in os.listdir("members/" + person):
images.append(cv2.imread("members/" + person + '/' + image,
0))
labels.append(i)
return (images, np.array(labels), labels_dic)
images, labels, labels_dic = collect_dataset()
# train image (algorithm sets)
rec_eig = cv2.face.EigenFaceRecognizer_create()
rec_eig.train(images, labels)
rec_fisher = cv2.face.FisherFaceRecognizer_create()
rec_fisher.train(images, labels)
rec_lbph = cv2.face.LBPHFaceRecognizer_create()
rec_lbph.train(images, labels)
print ("Models Trained Succesfully")
# cascade face and mask
detector = FaceDetector("xml/frontal_face.xml")
detector_mask = cv2.CascadeClassifier("xml/mask_cascade.xml")
# 0 webcam Laptop
# 1 back cam (usb)
# 2 front cam (usb)
webcam0 = VideoCamera(0)
webcam1 = VideoCamera(1)
ts = time.time()
date = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d')
timeStamp = datetime.datetime.fromtimestamp(ts).strftime('%H:%M:%S')
# Mask detection and Face recognition (in)
a= False
while True:
frame0 = webcam0.get_frame()
mask = detector_mask.detectMultiScale(frame0,
scaleFactor=1.2,
minNeighbors=5,
minSize=(100, 100),
maxSize=(150, 150),
flags=cv2.CASCADE_SCALE_IMAGE)
for(x1,y1,x2,y2) in mask:
cv2.rectangle(frame0,(x1,y1),(x1+x2,y1+y2),(0,255,0),2)
cv2.putText(frame0, 'Using Mask',(x1, y1+y2 + 30), cv2.FONT_HERSHEY_PLAIN, 1.5, (255,255,255), 2)
a= True
faces_coord = detector.detect(frame0, False) # detect more than one face
col_names = ['Name','Date','Time','Mask']
attendance = pd.DataFrame(columns = col_names)
if len(faces_coord):
faces = normalize_faces(frame0, faces_coord) # norm pipeline
for i, face in enumerate(faces): # for each detected face
collector = cv2.face.StandardCollector_create()
rec_lbph.predict_collect(face, collector) # chosen algorithm
conf = collector.getMinDist()
pred = collector.getMinLabel()
threshold = 76 # eigen, fisher, lbph [mean 3375,1175,65] [high lbph 76]
print ("Prediction Entry: " + labels_dic[pred].capitalize() + "\nConfidence Entry: " + str(round(conf))+ "\nMask : " + str(a))
if conf > threshold: # apply threshold
cv2.putText(frame0, labels_dic[pred].capitalize(),
(faces_coord[i][0], faces_coord[i][1] - 20),
cv2.FONT_HERSHEY_DUPLEX, 1.0, (102, 255, 0), 1)
attendance.loc[len(attendance)] = [labels_dic[pred],date,timeStamp,str(a)]
Hour,Minute,Second=timeStamp.split(":")
fileName="attendancein\Attendance_"+labels_dic[pred]+"-"+date+"_"+Hour+"-"+Minute+"-"+Second+".csv" # write to output file (in)
attendance.to_csv(fileName,index=False)
else:
cv2.putText(frame0, "Unknown",
(faces_coord[i][0], faces_coord[i][1] - 10),
cv2.FONT_HERSHEY_DUPLEX, 1.0, (66, 55, 245), 1)
draw_rectangle(frame0, faces_coord) # rectangle around face
cv2.putText(frame0, "ESC to exit", (5, frame0.shape[0] - 5),
cv2.FONT_HERSHEY_DUPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
cv2.imshow("Entry Cam", frame0) # live feed in external
if cv2.waitKey(33) & 0xFF == 27:
cv2.destroyAllWindows()
break
# mask detection and face recognition (out)
frame1 = webcam1.get_frame()
mask = detector_mask.detectMultiScale(frame1,
scaleFactor=1.2,
minNeighbors=5,
minSize=(100, 100),
maxSize=(150, 150),
flags=cv2.CASCADE_SCALE_IMAGE)
for(x1,y1,x2,y2) in mask:
cv2.rectangle(frame1,(x1,y1),(x1+x2,y1+y2),(0,255,0),2)
cv2.putText(frame1, 'Using Mask',(x1, y1+y2 + 30), cv2.FONT_HERSHEY_PLAIN, 1.5, (255,255,255), 2)
faces_coord = detector.detect(frame1, False) # detect more than one face
col_names = ['Name','Date','Time']
attendance = pd.DataFrame(columns = col_names)
if len(faces_coord):
faces = normalize_faces(frame1, faces_coord) # norm pipeline
for i, face in enumerate(faces): # for each detected face
collector = cv2.face.StandardCollector_create()
rec_lbph.predict_collect(face, collector) # chosen algorithm
conf = collector.getMinDist()
pred = collector.getMinLabel()
threshold = 75 # eigen, fisher, lbph [mean 3375,1175,65] [high lbph 76]
print ("Prediction Exit: " + labels_dic[pred].capitalize() + "\nConfidence Exit: " + str(round(conf)))
if conf > threshold: # apply threshold
cv2.putText(frame1, labels_dic[pred].capitalize(),
(faces_coord[i][0], faces_coord[i][1] - 20),
cv2.FONT_HERSHEY_DUPLEX, 1.0, (102, 255, 0), 1)
attendance.loc[len(attendance)] = [labels_dic[pred],date,timeStamp]
Hour,Minute,Second=timeStamp.split(":")
fileName="attendanceout\Attendance_"+labels_dic[pred]+"-"+date+"_"+Hour+"-"+Minute+"-"+Second+".csv" # write to output file (out)
attendance.to_csv(fileName,index=False)
else:
cv2.putText(frame1, "Unknown",
(faces_coord[i][0], faces_coord[i][1] - 10),
cv2.FONT_HERSHEY_DUPLEX, 1.0, (66, 55, 245), 1)
draw_rectangle(frame1, faces_coord) # rectangle around face
cv2.putText(frame1, "ESC to exit", (5, frame1.shape[0] - 5),
cv2.FONT_HERSHEY_DUPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
cv2.imshow("Exit Cam", frame1) # live feed in external
if cv2.waitKey(33) & 0xFF == 27:
cv2.destroyAllWindows()
break
del webcam0
del webcam1
|
"""
21.调整数组顺序使奇数位于偶数前面
时间复杂度:O(n)
空间复杂度:O(n)
"""
# -*- coding:utf-8 -*-
# 常规法,要求奇数与奇数,偶数与偶数的相对位置不变
# 时间复杂度:O(n^2)
class Solution1:
def reOrderArray(self, array):
# write code here
ret = list()
index = 0
for i in range(len(array)):
if array[i] % 2:
ret.insert(index, array[i])
index += 1
else:
ret.append(array[i])
return ret
# 相对位置可以变, 算法时间复杂度: O(n)
class Solution2:
def reOrderArray(self, array):
# write code here
i, j = 0, len(array) - 1
while i < j:
while ((array[i] & 1) == 1):
i += 1
while ((array[j] & 1) == 0):
j -= 1
if i < j:
array[i], array[j] = array[j], array[i]
i += 1
j -= 1
else:
break
return array
if __name__ == "__main__":
array = [1, 3, 6, 1, 2, 5, 7]
s = Solution2()
ret = s.reOrderArray(array)
print(ret)
|
import falcon
import os
class ReputashaEndpoint(object):
def on_get(self, req, resp):
stream = os.popen('reputasha.sh ' + req.params['person'])
output = stream.read()
resp.body = output
api = falcon.API()
reputasha_endpoint = ReputashaEndpoint()
api.add_route('/dosearch', reputasha_endpoint)
|
# -*- coding: utf-8 -*-
import unittest
from outtest import OutputTestCase
import uri_1021
class TestUri1021(OutputTestCase):
def test_output1(self):
self.input(['576.73'])
uri_1021.main()
self.assert_output([
'NOTAS:',
'5 nota(s) de R$ 100.00',
'1 nota(s) de R$ 50.00',
'1 nota(s) de R$ 20.00',
'0 nota(s) de R$ 10.00',
'1 nota(s) de R$ 5.00',
'0 nota(s) de R$ 2.00',
'MOEDAS:',
'1 moeda(s) de R$ 1.00',
'1 moeda(s) de R$ 0.50',
'0 moeda(s) de R$ 0.25',
'2 moeda(s) de R$ 0.10',
'0 moeda(s) de R$ 0.05',
'3 moeda(s) de R$ 0.01'
])
def test_output2(self):
self.input(['4.00'])
uri_1021.main()
self.assert_output([
'NOTAS:',
'0 nota(s) de R$ 100.00',
'0 nota(s) de R$ 50.00',
'0 nota(s) de R$ 20.00',
'0 nota(s) de R$ 10.00',
'0 nota(s) de R$ 5.00',
'2 nota(s) de R$ 2.00',
'MOEDAS:',
'0 moeda(s) de R$ 1.00',
'0 moeda(s) de R$ 0.50',
'0 moeda(s) de R$ 0.25',
'0 moeda(s) de R$ 0.10',
'0 moeda(s) de R$ 0.05',
'0 moeda(s) de R$ 0.01'
])
def test_output3(self):
self.input(['91.01'])
uri_1021.main()
self.assert_output([
'NOTAS:',
'0 nota(s) de R$ 100.00',
'1 nota(s) de R$ 50.00',
'2 nota(s) de R$ 20.00',
'0 nota(s) de R$ 10.00',
'0 nota(s) de R$ 5.00',
'0 nota(s) de R$ 2.00',
'MOEDAS:',
'1 moeda(s) de R$ 1.00',
'0 moeda(s) de R$ 0.50',
'0 moeda(s) de R$ 0.25',
'0 moeda(s) de R$ 0.10',
'0 moeda(s) de R$ 0.05',
'1 moeda(s) de R$ 0.01'
])
if __name__ == '__main__':
unittest.main()
|
# Copyright 2020, 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This program configures and builds Emacs.
It’s an internal implementation detail of the Bazel rules for Emacs Lisp; don’t
use it outside the rules or depend on its behavior."""
import argparse
import pathlib
import shlex
import shutil
import subprocess
import tempfile
def main() -> None:
"""Configures and builds Emacs."""
parser = argparse.ArgumentParser(allow_abbrev=False)
parser.add_argument('--source', type=pathlib.Path, required=True)
parser.add_argument('--install', type=pathlib.Path, required=True)
parser.add_argument('--cc', type=pathlib.Path, required=True)
parser.add_argument('--cflags', required=True)
parser.add_argument('--ldflags', required=True)
parser.add_argument('--module-header', type=pathlib.Path)
args = parser.parse_args()
with tempfile.TemporaryDirectory() as temp:
temp = pathlib.Path(temp)
build = temp / 'build'
install = args.install.resolve()
shutil.copytree(args.source, build)
def run(*command: str) -> None:
try:
subprocess.run(command, check=True, cwd=build,
stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
encoding='utf-8', errors='backslashescape')
except subprocess.CalledProcessError as ex:
print('command', ' '.join(map(shlex.quote, command)),
'failed, output follows:')
print(ex.stdout)
print()
print('config.log follows:')
config_log = build / 'config.log'
print(config_log.read_text('utf-8', 'backslashescape'))
raise
run('./configure', '--prefix=' + str(install),
'--without-all', '--without-ns', '--with-x-toolkit=no',
# Enable threads explicitly to work around
# https://debbugs.gnu.org/cgi/bugreport.cgi?bug=30106 in older
# Emacs versions.
'--with-modules', '--with-threads',
'--disable-build-details',
'CC=' + str(args.cc.resolve()),
'CFLAGS=' + args.cflags,
'LDFLAGS=' + args.ldflags)
run('make', 'install')
# Delete source files that have a corresponding compiled file, as these
# files don’t work well with Coverage (see
# e.g. https://debbugs.gnu.org/cgi/bugreport.cgi?bug=40766).
for compiled in install.glob('share/emacs/*/lisp/**/*.elc'):
compiled.with_suffix('.el').unlink()
# Sanity check to verify that the resulting binary works.
subprocess.run([install / 'bin/emacs', '--quick', '--batch'],
check=True, stdin=subprocess.DEVNULL)
if args.module_header:
# Copy emacs-module.h to the desired location.
shutil.copy(install / 'include/emacs-module.h', args.module_header)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
# https://www.hackerrank.com/challenges/py-set-intersection-operation
_ = input ().strip ()
a = set (map (int, input ().strip ().split ()))
_ = input ().strip ()
b = set (map (int, input ().strip ().split ()))
print (len (a.difference (b)))
|
#! /usr/bin/env python
# -*- coding: cp1252 -*-
#############################################################################
# Copyright (C) 2007-2013 German Aerospace Center (DLR/SC)
#
# Created: 2013-03-13 Martin Siggel <martin.siggel@dlr.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#############################################################################
import unittest
import subprocess
from tiglwrapper import *
from tixiwrapper import *
skipSlowFunctions = True # Saves you 14 minutes(if True), but leaves out 8 functions
class TestSimpleCpacs(unittest.TestCase):
def setUp(self):
self.tixi = Tixi()
self.tigl = Tigl()
self.tixi.open('TestData/simpletest.cpacs.xml')
self.tigl.open(self.tixi, '')
def tearDown(self):
self.tigl.close()
self.tixi.close()
def test_tiglGetVersion(self):
print('TIGL-Version: ' + self.tigl.version)
def test_objectCount(self):
self.assertEqual(self.tigl.getWingCount(),1)
self.assertEqual(self.tigl.getFuselageCount(),1)
self.assertEqual(self.tigl.wingGetComponentSegmentCount(1),1)
########## Exports are faster in this class
def test_exportIGES(self):
filenamePtr = "TestData/export/export0.igs"
self.tigl.exportIGES(filenamePtr)
def test_exportFusedWingFuselageIGES(self):
if not skipSlowFunctions:
filenamePtr = "TestData/export/export.igs"
self.tigl.exportFusedWingFuselageIGES(filenamePtr)
def test_exportSTEP(self):
filenamePtr = 'TestData/export/export.step'
self.tigl.exportSTEP(filenamePtr)
def test_exportMeshedWingSTL(self):
if not skipSlowFunctions:
wingIndex = 1
filenamePtr = "TestData/export/export.stl"
deflection = 0.01
self.tigl.exportMeshedWingSTL(wingIndex, filenamePtr, deflection)
def test_exportMeshedGeometrySTL(self):
if not skipSlowFunctions:
filenamePtr = "TestData/export/export3.stl"
deflection = 0.01
self.tigl.exportMeshedGeometrySTL(filenamePtr, deflection)
def test_exportMeshedWingVTKByUID(self):
wingUID = "Wing"
filenamePtr ="TestData/export/export.vtk"
deflection = 0.01
self.tigl.exportMeshedWingVTKByUID(wingUID, filenamePtr, deflection)
def test_exportMeshedGeometryVTK(self):
if not skipSlowFunctions:
filenamePtr = "TestData/export/export4.vtk"
deflection = 0.01
self.tigl.exportMeshedGeometryVTK(filenamePtr, deflection)
def test_exportMeshedWingVTKSimpleByUID(self):
wingUID = "Wing"
filenamePtr = "TestData/export/export5.vtk"
deflection = 0.01
self.tigl.exportMeshedWingVTKSimpleByUID(wingUID, filenamePtr, deflection)
def test_exportMeshedGeometryVTKSimple(self):
if not skipSlowFunctions:
filenamePtr = "TestData/export/export7.vtk"
deflection = 0.01
self.tigl.exportMeshedGeometryVTKSimple(filenamePtr, deflection)
def test_getMaterialUID(self):
compSegmentUID = "WING_CS1"
eta = 0.25
xsi = 0.9
nmaterials = self.tigl.wingComponentSegmentGetMaterialCount(compSegmentUID, TiglStructureType.UPPER_SHELL, eta, xsi )
self.assertEqual(nmaterials, 1)
material = self.tigl.wingComponentSegmentGetMaterialUID(compSegmentUID, TiglStructureType.UPPER_SHELL, eta, xsi, 1)
self.assertEqual(material, 'MyCellMat')
######
# Tests for tigl logging mechanism
class TestTiglLogging(unittest.TestCase):
def get_logs(self, loglevel):
process=subprocess.Popen(['python', 'test_logging.py', "%s" % loglevel], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err=process.communicate()
status=process.returncode
# seperate output lines
out=out.decode("UTF-8").rstrip('\n').split('\n')
err=err.decode("UTF-8").rstrip('\n').split('\n')
return (status, out, err)
def test_test_script(self):
for lev in range(0,4):
self.assertEqual(self.get_logs(lev)[0], 0)
def test_info(self):
status, out, err = self.get_logs(3)
self.assertTrue(len(out), 1)
self.assertTrue(len(err), 4)
self.assertTrue(out[0].startswith('INF'))
self.assertTrue(out[0].strip().endswith('No far-field defined.'))
self.assertTrue(err[0].startswith('WRN'))
self.assertTrue(err[0].strip().endswith('CPACS dataset version is higher than TIGL library version!'))
self.assertTrue(err[1].startswith('WRN'))
self.assertTrue(err[1].strip().endswith("The points in profile PointListExampleAirfoil don't seem to be ordered in a mathematical positive sense."))
self.assertTrue(err[2].startswith('ERR'))
self.assertTrue(err[2].strip().endswith('Error: Invalid uid in tiglWingComponentSegmentPointGetSegmentEtaXsi'))
def test_warning(self):
status, out, err = self.get_logs(2)
self.assertTrue(len(out), 0)
self.assertTrue(len(err), 4)
self.assertTrue(err[0].startswith('WRN'))
self.assertTrue(err[0].strip().endswith('CPACS dataset version is higher than TIGL library version!'))
self.assertTrue(err[1].startswith('WRN'))
self.assertTrue(err[1].strip().endswith("The points in profile PointListExampleAirfoil don't seem to be ordered in a mathematical positive sense."))
self.assertTrue(err[2].startswith('ERR'))
self.assertTrue(err[2].strip().endswith('Error: Invalid uid in tiglWingComponentSegmentPointGetSegmentEtaXsi'))
def test_error(self):
status, out, err = self.get_logs(1)
self.assertTrue(len(out), 0)
self.assertTrue(len(err), 1)
self.assertTrue(err[0].startswith('ERR'))
self.assertTrue(err[0].strip().endswith('Error: Invalid uid in tiglWingComponentSegmentPointGetSegmentEtaXsi'))
def test_silent(self):
status, out, err = self.get_logs(0)
self.assertTrue(len(out), 0)
self.assertTrue(len(err), 0)
def test_file_logging(self):
status, out, err = self.get_logs(0)
f=open("test_logging.log", 'r')
lines=f.readlines()
f.close()
lout=[line.rstrip('\n') for line in lines]
self.assertTrue(len(lout), 6)
self.assertTrue(lout[0].startswith('TiGL log file created at'))
self.assertTrue(lout[1].startswith('WRN'))
self.assertTrue(lout[1].strip().endswith('CPACS dataset version is higher than TIGL library version!'))
self.assertTrue(lout[2].startswith('WRN'))
self.assertTrue(lout[2].strip().endswith("The points in profile PointListExampleAirfoil don't seem to be ordered in a mathematical positive sense."))
self.assertTrue(lout[3].startswith('INF'))
self.assertTrue(lout[3].strip().endswith('No far-field defined.'))
self.assertTrue(lout[4].startswith('ERR'))
self.assertTrue(lout[4].strip().endswith('Error: Invalid uid in tiglWingComponentSegmentPointGetSegmentEtaXsi'))
# ----------------------------------------------------------------------- #
# The following tests should only check, if the python api is correct.
# Correctness of TIGL functions is not tested here (this is done in the c-tests)
class TestTiglApi(unittest.TestCase):
def setUp(self):
self.tigl = Tigl()
self.tixi = Tixi()
self.tigl.logSetVerbosity(TiglLogLevel.TILOG_SILENT)
self.tixi.open('TestData/CPACS_21_D150.xml')
self.tigl.open(self.tixi, 'D150_VAMP')
def tearDown(self):
self.tigl.close()
self.tixi.close()
def test_getWingCount(self):
self.assertEqual(self.tigl.getWingCount(),3)
def test_getFuselageCount(self):
self.assertEqual(self.tigl.getFuselageCount(),1)
def test_wingGetSegmentCount(self):
self.assertEqual(self.tigl.wingGetSegmentCount(1),3)
self.assertEqual(self.tigl.wingGetSegmentCount(2),1)
self.assertEqual(self.tigl.wingGetSegmentCount(3),1)
# check illegal wing index error handling
error = TiglReturnCode.TIGL_SUCCESS
try:
self.tigl.wingGetSegmentCount(-1);
except TiglException as e:
error = e.code
self.assertEqual(error, TiglReturnCode.TIGL_INDEX_ERROR)
def test_wingGetComponentSegmentCount(self):
self.assertEqual(self.tigl.wingGetComponentSegmentCount(1),1)
self.assertEqual(self.tigl.wingGetComponentSegmentCount(2),1)
self.assertEqual(self.tigl.wingGetComponentSegmentCount(3),1)
def test_wingGetComponentSegmentUID(self):
wingCompUID = self.tigl.wingGetComponentSegmentUID(1,1)
self.assertEqual(wingCompUID, 'D150_VAMP_W1_CompSeg1')
wingCompUID = self.tigl.wingGetComponentSegmentUID(2,1)
self.assertEqual(wingCompUID, 'D150_VAMP_HL1_CompSeg1')
wingCompUID = self.tigl.wingGetComponentSegmentUID(3,1)
self.assertEqual(wingCompUID, 'D150_VAMP_SL1_CompSeg1')
def test_wingGetComponentSegmentIndex(self):
compSegIndex = self.tigl.wingGetComponentSegmentIndex(1,'D150_VAMP_W1_CompSeg1')
self.assertEqual(compSegIndex,1)
def test_wingGetUpperPoint(self):
(x, y, z) = self.tigl.wingGetUpperPoint(1,1,0.5,0.5);
def test_wingGetLowerPoint(self):
(x, y, z) = self.tigl.wingGetLowerPoint(1,1,0.5,0.5);
def test_wingGetInnerConnectedSegmentCount(self):
segmentCount = self.tigl.wingGetInnerConnectedSegmentCount(1,1)
self.assertEqual(segmentCount,0)
def test_wingGetOuterConnectedSegmentCount(self):
segmentCount = self.tigl.wingGetOuterConnectedSegmentCount(1,1)
self.assertEqual(segmentCount,1)
def test_wingGetInnerConnectedSegmentIndex(self):
try:
segIndex = self.tigl.wingGetInnerConnectedSegmentIndex(1,1,0)
self.assertEqual(True, False)
except TiglException as e:
self.assertEqual(e.code, TiglReturnCode.TIGL_INDEX_ERROR)
def test_wingGetOuterConnectedSegmentIndex(self):
segIndex = self.tigl.wingGetOuterConnectedSegmentIndex(1,1,1)
self.assertEqual(segIndex,2)
def test_wingGetInnerSectionAndElementIndex(self):
(secindex, elementindex) = self.tigl.wingGetInnerSectionAndElementIndex(1,1)
self.assertEqual(secindex, 1)
self.assertEqual(elementindex,1)
def test_wingGetOuterSectionAndElementIndex(self):
(secindex, elementindex) = self.tigl.wingGetOuterSectionAndElementIndex(1,1)
self.assertEqual(secindex, 2)
self.assertEqual(elementindex,1)
def test_wingGetInnerSectionAndElementUID(self):
(secUID, elementUID) = self.tigl.wingGetInnerSectionAndElementUID(1,1)
self.assertEqual(secUID, 'D150_VAMP_W1_Sec1')
self.assertEqual(elementUID, 'D150_VAMP_W1_Sec1_Elem1')
def test_wingGetOuterSectionAndElementUID(self):
(secUID, elementUID) = self.tigl.wingGetOuterSectionAndElementUID(1,1)
self.assertEqual(secUID, 'D150_VAMP_W1_Sec2')
self.assertEqual(elementUID, 'D150_VAMP_W1_Sec2_Elem1')
def test_wingGetProfileName(self):
profileName = self.tigl.wingGetProfileName(1,1,1)
self.assertEqual(profileName, 'NameD150_VAMP_W_SupCritProf1')
def test_wingGetUID(self):
wingUID = self.tigl.wingGetUID(1)
self.assertEqual(wingUID, 'D150_VAMP_W1')
def test_wingGetIndex(self):
wingIndex = self.tigl.wingGetIndex('D150_VAMP_W1')
self.assertEqual(wingIndex, 1)
def test_wingGetSegmentUID(self):
segmentUID = self.tigl.wingGetSegmentUID(1,1)
self.assertEqual(segmentUID, 'D150_VAMP_W1_Seg1')
def test_wingGetSegmentIndex(self):
segmentIndex, wingIndex = self.tigl.wingGetSegmentIndex('D150_VAMP_W1_Seg1')
self.assertEqual(segmentIndex, 1)
self.assertEqual(wingIndex, 1)
def test_wingGetSectionUID(self):
sectionUID = self.tigl.wingGetSectionUID(1,1)
self.assertEqual(sectionUID, 'D150_VAMP_W1_Sec1')
def test_wingGetSymmetry(self):
symm = self.tigl.wingGetSymmetry(1)
self.assertEqual(symm, TiglSymmetryAxis.TIGL_X_Z_PLANE)
def test_wingComponentSegmentFindSegment(self):
(x, y, z) = self.tigl.wingGetUpperPoint(1,1,0.5,0.5);
(segUID, wingUID) = self.tigl.wingComponentSegmentFindSegment('D150_VAMP_W1_CompSeg1',x,y,z)
self.assertEqual(segUID, 'D150_VAMP_W1_Seg1')
self.assertEqual(wingUID, 'D150_VAMP_W1')
def test_wingComponentSegmentPointGetSegmentEtaXsi(self):
(wingUID, segmentUID, eta, xsi, errorDist) = self.tigl.wingComponentSegmentPointGetSegmentEtaXsi('D150_VAMP_W1_CompSeg1', 0.0, 0.0)
self.assertEqual(wingUID, 'D150_VAMP_W1')
self.assertEqual(segmentUID, 'D150_VAMP_W1_Seg1')
self.assertAlmostEqual(eta, 0.0)
self.assertAlmostEqual(xsi, 0.0)
self.assertAlmostEqual(errorDist, 0.0)
def test_wingSegmentPointGetComponentSegmentEtaXsi(self):
(eta, xsi) = self.tigl.wingSegmentPointGetComponentSegmentEtaXsi('D150_VAMP_W1_Seg1', 'D150_VAMP_W1_CompSeg1', 0.0, 0.0)
self.assertAlmostEqual(eta, 0.0)
self.assertAlmostEqual(xsi, 0.0)
def test_getFuselageCount(self):
fc = self.tigl.getFuselageCount()
self.assertEqual(fc,1)
def test_fuselageGetSegmentCount(self):
sc = self.tigl.fuselageGetSegmentCount(1)
self.assertEqual(sc,59)
def test_fuselageGetPoint(self):
for iseg in range(0,59):
(x,y,z) = self.tigl.fuselageGetPoint(1,iseg+1,0.5,0.5)
#check for illegal index
try:
self.tigl.fuselageGetPoint(1,60,0.5,0.5)
self.assertEqual(False, True)
except TiglException as e:
self.assertEqual(e.code, TiglReturnCode.TIGL_INDEX_ERROR)
def test_fuselageGetPointAngle(self):
wingIDX = 1
segIDX = 40
(x,y,z) = self.tigl.fuselageGetPointAngle(wingIDX, segIDX, 0.5, 90.0)
def test_fuselageGetPointAngleTranslated(self):
fuselIDX = 1
segIDX = 40
ytrans = 0.1;
ztrans = -0.1;
(x,y,z) = self.tigl.fuselageGetPointAngleTranslated(fuselIDX, segIDX, 0.5, 90.0, ytrans, ztrans)
def test_fuselageGetPointOnXPlane(self):
fuselIDX = 1
segIDX = 40
zpos = 0.7
(x,y,z) = self.tigl.fuselageGetPointOnXPlane(fuselIDX, segIDX, 0.5, zpos, 1)
self.assertAlmostEqual(zpos, z)
def test_fuselageGetNumPointsOnXPlane(self):
fuselIDX = 1
segIDX = 40
zpos = 0.7
num = self.tigl.fuselageGetNumPointsOnXPlane(fuselIDX, segIDX, 0.5, zpos)
self.assertGreater(num, 0)
def test_fuselageGetPointOnYPlane(self):
fuselIDY = 1
segIDY = 40
zpos = 0.7
(x,y,z) = self.tigl.fuselageGetPointOnYPlane(fuselIDY, segIDY, 0.5, zpos, 1)
self.assertAlmostEqual(zpos, z)
def test_fuselageGetNumPointsOnYPlane(self):
fuselIDY = 1
segIDY = 40
zpos = 0.7
num = self.tigl.fuselageGetNumPointsOnYPlane(fuselIDY, segIDY, 0.5, zpos)
self.assertGreater(num, 0)
############################ ANFANG
def test_fuselageGetCircumference(self):
fuselageIndex = 1
segmentIndex = 1
eta = 0.5
ret = self.tigl.fuselageGetCircumference(fuselageIndex, segmentIndex, eta)
self.assertEqual(isinstance(ret, float),True)
def test_fuselageGetStartConnectedSegmentCount(self):
fuselageIndex = 1
segmentIndex = 1
ret = self.tigl.fuselageGetStartConnectedSegmentCount(fuselageIndex, segmentIndex)
self.assertEqual(ret,0)
def test_fuselageGetEndConnectedSegmentCount(self):
fuselageIndex = 1
segmentIndex = 1
ret = self.tigl.fuselageGetEndConnectedSegmentCount(fuselageIndex, segmentIndex)
self.assertEqual(ret,1)
def test_fuselageGetStartConnectedSegmentIndex(self):
fuselageIndex = 1
segmentIndex = 2
n = 1
ret = self.tigl.fuselageGetStartConnectedSegmentIndex(fuselageIndex, segmentIndex, n)
self.assertEqual(ret,1)
def test_fuselageGetEndConnectedSegmentIndex(self):
fuselageIndex = 1
segmentIndex = 1
n = 1
ret = self.tigl.fuselageGetEndConnectedSegmentIndex(fuselageIndex, segmentIndex, n)
self.assertEqual(ret,2)
def test_fuselageGetStartSectionAndElementUID(self):
fuselageIndex = 1
segmentIndex = 1
n = 1
ret = self.tigl.fuselageGetStartSectionAndElementUID(fuselageIndex, segmentIndex)
self.assertEqual(ret,('D150_VAMP_FL1_Sec1', 'D150_VAMP_FL1_Sec1_Elem1'))
def test_fuselageGetEndSectionAndElementUID(self):
fuselageIndex = 1
segmentIndex = 1
ret = self.tigl.fuselageGetEndSectionAndElementUID(fuselageIndex, segmentIndex)
self.assertEqual(ret,('D150_VAMP_FL1_Sec2', 'D150_VAMP_FL1_Sec2_Elem1'))
def test_fuselageGetStartSectionAndElementIndex(self):
fuselageIndex = 1
segmentIndex = 2
ret = self.tigl.fuselageGetStartSectionAndElementIndex(fuselageIndex, segmentIndex)
self.assertEqual(ret,(2,1))
def test_fuselageGetEndSectionAndElementIndex(self):
fuselageIndex = 1
segmentIndex = 1
ret = self.tigl.fuselageGetEndSectionAndElementIndex(fuselageIndex, segmentIndex)
self.assertEqual(ret,(2,1))
def test_fuselageGetProfileName(self):
fuselageIndex = 1
sectionIndex = 1
elementIndex = 1
ret = self.tigl.fuselageGetProfileName(fuselageIndex, sectionIndex, elementIndex)
self.assertEqual(ret,"Circle")
def test_fuselageGetUID(self):
fuselageIndex = 1
ret = self.tigl.fuselageGetUID(fuselageIndex)
self.assertEqual(ret,'D150_VAMP_FL1')
def test_fuselageGetSegmentUID(self):
fuselageIndex = 1
segmentIndex = 1
ret = self.tigl.fuselageGetSegmentUID(fuselageIndex, segmentIndex)
self.assertEqual(ret,'D150_VAMP_FL1_Seg1')
def test_fuselageGetSectionUID(self):
fuselageIndex = 1
sectionIndex = 1
ret = self.tigl.fuselageGetSectionUID(fuselageIndex, sectionIndex)
self.assertEqual(isinstance(ret,str),True)
def test_fuselageGetSymmetry(self):
fuselageIndex = 1
ret = self.tigl.fuselageGetSymmetry(fuselageIndex)
self.assertEqual(ret,0)
## Raises TiglException: TIGL_ERROR (1) (both give the same Error)
## def test_componentIntersectionPoint(self):
## print("IntersectionPoint")
## componentUidOne = self.tigl.fuselageGetUID(1)
## componentUidTwo = self.tigl.wingGetUID(1)
## lineID = 1
## eta = 0.5
## ret = self.tigl.componentIntersectionPoint(componentUidOne, componentUidTwo, lineID, eta)
## self.assertEqual((len(ret)==3),True)
##
## def test_componentIntersectionLineCount(self):
## print("IntersectionLine")
## componentUidOne = self.tigl.fuselageGetUID(1)
## componentUidTwo = self.tigl.wingGetUID(1)
## ret = self.tigl.componentIntersectionLineCount(componentUidOne, componentUidTwo)
def test_wingGetVolume(self):
wingIndex = 1
ret = self.tigl.wingGetVolume(wingIndex)
self.assertEqual(isinstance(ret,float),True)
def test_wingGetSegmentVolume(self):
wingIndex = 1
segmentIndex = 1
ret = self.tigl.wingGetSegmentVolume(wingIndex, segmentIndex)
self.assertEqual(isinstance(ret,float),True)
def test_fuselageGetSegmentVolume(self):
fuselageIndex = 1
segmentIndex = 1
ret = self.tigl.fuselageGetSegmentVolume(fuselageIndex, segmentIndex)
self.assertEqual(isinstance(ret,float),True)
def test_wingGetSurfaceArea(self):
wingIndex = 1
ret = self.tigl.wingGetSurfaceArea(wingIndex)
self.assertEqual(isinstance(ret,float),True)
def test_wingGetSegmentSurfaceArea(self):
wingIndex = 1
segmentIndex = 1
ret = self.tigl.wingGetSegmentSurfaceArea(wingIndex, segmentIndex)
self.assertEqual(isinstance(ret,float),True)
def test_fuselageGetSegmentSurfaceArea(self):
fuselageIndex = 1
segmentIndex = 1
ret = self.tigl.fuselageGetSegmentSurfaceArea(fuselageIndex, segmentIndex)
self.assertEqual(isinstance(ret,float),True)
def test_componentGetHashCode(self):
componentUID = self.tigl.wingGetUID(1)
ret = self.tigl.componentGetHashCode(componentUID)
self.assertEqual(isinstance(ret,int),True)
def test_configurationGetLength(self):
ret = self.tigl.configurationGetLength()
self.assertEqual(isinstance(ret,float),True)
def test_wingGetSpan(self):
wingUID = self.tigl.wingGetUID(1)
ret = self.tigl.wingGetSpan(wingUID)
self.assertEqual(isinstance(ret,float),True)
def test_wingGetSurfaceArea(self):
area = self.tigl.wingGetSurfaceArea(1);
self.assertGreater(area, 125.)
self.assertLess(area, 135.)
def test_wingGetReferenceArea(self):
area = self.tigl.wingGetReferenceArea(1, TiglSymmetryAxis.TIGL_X_Y_PLANE);
self.assertGreater(area, 60.)
self.assertLess(area, 70.)
def test_exportMeshedWingVTKByIndex(self):
self.tigl.exportMeshedWingVTKByIndex(1, 'TestData/export/D150modelID_wing1_python.vtp', 0.01)
###### SLOW Function, basically fuselage based due to complex fuselage geometry
def test_MergedTests(self):
if skipSlowFunctions:
return
print( "Slow Functions (14min)")
#exportMeshedFuselageSTL
fuselageIndex = 1
filenamePtr = "TestData/export/export2.stl"
deflection = 0.01
self.tigl.exportMeshedFuselageSTL(fuselageIndex, filenamePtr, deflection)
# Fuselage VTK by Index
fuselageIndex = 1
filenamePtr = "TestData/export/export2.vtk"
deflection = 0.01
self.tigl.exportMeshedFuselageVTKByIndex(fuselageIndex, filenamePtr, deflection)
# Fuselage VTk by UID
fuselageUID = "D150_VAMP_FL1"
filenamePtr = "TestData/export/export3.vtk"
deflection = 0.01
self.tigl.exportMeshedFuselageVTKByUID(fuselageUID, filenamePtr, deflection)
# Fuselage Simple-VTK by UID
fuselageUID = "D150_VAMP_FL1"
filenamePtr ="TestData/export/export6.vtk"
deflection = 0.01
self.tigl.exportMeshedFuselageVTKSimpleByUID(fuselageUID, filenamePtr, deflection)
# Distance to ground
fuselageUID = self.tigl.fuselageGetUID(1)
axisPntX = 0
axisPntY = 0
axisPntZ = 10
axisDirX = 0
axisDirY = 0
axisDirZ = 1
angle = 0
ret = self.tigl.fuselageGetMinumumDistanceToGround(fuselageUID, axisPntX, axisPntY, axisPntZ, axisDirX, axisDirY, axisDirZ, angle)
# Volume
ret = self.tigl.fuselageGetVolume(fuselageIndex)
self.assertEqual(isinstance(ret,float),True)
# Wetted Area
print("50%")
wingUID = self.tigl.wingGetUID(1)
ret = self.tigl.wingGetWettedArea(wingUID)
self.assertEqual(isinstance(ret,float),True)
# Fuselage Surface Area
fuselageIndex = 1
ret = self.tigl.fuselageGetSurfaceArea(fuselageIndex)
self.assertEqual(isinstance(ret,float),True)
print("100%")
# ----------------------------------------------------------------------- #
if __name__ == '__main__':
unittest.main()
|
def tipe_rumah(tipe):
dp = 20*(tipe/100)
print("Dp yang harus dibayar minimal Rp : {:,}".format(int(dp)))
valid = True
while(valid):
try:
print("jumlah dp yang akan di bayar ")
dp_rumah = int(input("isi tanpa titik atau koma, contoh-> 6000000 : "))
if(dp_rumah < dp):
print("pembayaran dp minimal {}".format(dp))
valid = True
else:
valid = False
except:
print("nilai dp harus berupa angka")
bayar_dp = tipe - dp_rumah
valid1 = True
while (valid1):
try:
anggsuran = int(input("lama angsuran (11/18/24) Bulan : "))
if (anggsuran ==11 or anggsuran ==18 or anggsuran == 24):
bayar_anggsuran = bayar_dp / anggsuran
bayar_anggsuran = round(bayar_anggsuran)
print("jumlah pembayaran setiap bulan selama {} bulan adalah Rp {:,}".format(anggsuran, bayar_anggsuran))
valid1 = False
else:
print("{} tidak ada dalam angsuran".format(anggsuran))
valid1=True
except:
print("nilai anggsuran harus berupa angka")
print("Type Rumah \t Harga")
print(" A \t 120.000.000")
print(" B \t 110.000.000")
print(" c \t 100.000.000")
print()
validasi = True
while (validasi):
type_rumah = input("Pilih type Rumah : ")
print()
print("anda memilih type rumah {}".format(type_rumah))
type_a = 120000000
type_b = 110000000
type_c = 100000000
if (type_rumah.upper() =="A"):
tipe_rumah(type_a)
validasi=False
elif (type_rumah.upper() =="B"):
tipe_rumah(type_b)
validasi=False
elif (type_rumah.upper() =="C"):
tipe_rumah(type_c)
validasi=False
else:
print("type rumah tidak ada dalam pilihan")
validasi = True
|
# Generated by Django 3.1.7 on 2021-04-03 08:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('parking', '0003_auto_20210328_1041'),
]
operations = [
migrations.AddField(
model_name='parkingspot',
name='destination',
field=models.CharField(blank=True, max_length=30, null=True),
),
]
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data classes for job-related abstractions."""
import abc
from typing import Any, Awaitable, Callable, Dict, List, Mapping, Optional, Sequence, Tuple, TypeVar, Union
import attr
from xmanager.xm import pattern_matching
from xmanager.xm import utils
UserArgs = Union[Mapping, Sequence, 'SequentialArgs']
class SequentialArgs:
"""A sequence of positional and keyword arguments for a binary.
Unix command line arguments are just a list of strings. But it is very common
to simulate keyword arguments in a --key=value form. It is not uncommon to
only have keyword arguments. Therefore we allow providing args as:
Dicts:
{'foo': 'space bar', 'with_magic': True} -> --foo='space bar' --with_magic
Argument order is preserved.
Lists:
['--foo', 'space bar'] -> --foo 'space bar'
SequentialArgs (which allows to represent a mix of the two above):
xm.merge_args({'foo': 'bar'}, ['--'], {'n': 16}) -> --foo=bar -- --n=16
SequentialArgs provides a convenient merging semantics: if a value is given
for an existing keyword argument, it will be overriden rather than appended,
which allows to specify default values and override them later:
xm.merge_args({'foo': '1', 'bar': '42'}, {'foo': '2'}) -> --foo=2 --bar=42
SequentialArgs is immutable, but you can get a copy with updated value:
args = xm.merge_args({'foo': '1', 'bar': '42'})
args = xm.merge_args(args, {'foo': '2'})
We only allow appending new arguments (positional and keyword) and overriding
keyword arguments. Removal and inserting to the middle is not supported.
"""
@attr.s(auto_attribs=True)
class _RegularItem:
value: Any
@attr.s(auto_attribs=True)
class _KeywordItem:
name: str
def __init__(self) -> None:
"""Constucts an empty SequentialArgs.
Prefer using xm.merge_args to construct SequentialArgs objects.
"""
self._items: List[Union[SequentialArgs._RegularItem,
SequentialArgs._KeywordItem]] = []
self._kwvalues: Dict[str, Any] = {}
def _ingest_regular_item(self, value: Any) -> None:
self._items.append(SequentialArgs._RegularItem(value))
def _ingest_keyword_item(self, name: str, value: Any) -> None:
if name not in self._kwvalues:
self._items.append(SequentialArgs._KeywordItem(name))
self._kwvalues[name] = value
def _merge_from(self, args: 'SequentialArgs') -> None:
"""Merges another instance of SequentialArgs into self."""
def import_regular_item(item: SequentialArgs._RegularItem):
self._ingest_regular_item(item.value)
def import_keyword_item(item: SequentialArgs._KeywordItem):
self._ingest_keyword_item(item.name, args._kwvalues[item.name]) # pylint: disable=protected-access
matcher = pattern_matching.match(
import_regular_item,
import_keyword_item,
)
for item in args._items: # pylint: disable=protected-access
matcher(item)
@staticmethod
def from_collection(collection: Optional[UserArgs]) -> 'SequentialArgs':
"""Populates a new instance from a given collection."""
result = SequentialArgs()
if collection is None:
return result
def check_for_string(args: str) -> None:
raise ValueError(
f'Tried to construct xm.SequentialArgs from a string: {args!r}. '
f'Wrap it in a list: [{args!r}] to make it a single argument.')
def import_sequential_args(args: SequentialArgs) -> None:
result._merge_from(args) # pylint: disable=protected-access
def import_mapping(collection: Mapping[Any, Any]) -> None:
for key, value in collection.items():
result._ingest_keyword_item(str(key), value) # pylint: disable=protected-access
def import_sequence(collection: Sequence[Any]) -> None:
for value in collection:
result._ingest_regular_item(value) # pylint: disable=protected-access
matcher = pattern_matching.match(check_for_string, import_sequential_args,
import_mapping, import_sequence)
matcher(collection)
return result
def to_list(
self,
escaper: Callable[[Any], str],
kwargs_joiner: Callable[[str, str], str] = utils.trivial_kwargs_joiner
) -> List[str]:
"""Exports items as a list ready to be passed into the command line."""
def export_regular_item(item: SequentialArgs._RegularItem) -> str:
return escaper(item.value)
def export_keyword_item(item: SequentialArgs._KeywordItem) -> str:
value = self._kwvalues[item.name]
if isinstance(value, bool):
return escaper(f"--{'' if value else 'no'}{item.name}")
else:
return kwargs_joiner(escaper(f'--{item.name}'), escaper(value))
matcher = pattern_matching.match(
export_regular_item,
export_keyword_item,
)
return [matcher(item) for item in self._items]
def to_dict(self, kwargs_only: bool = False) -> Dict[str, Any]:
"""Exports items as a dictionary.
Args:
kwargs_only: Whether to skip positional arguments.
Returns:
The sought dictionary.
"""
if kwargs_only:
return self._kwvalues
def export_regular_item(
item: SequentialArgs._RegularItem) -> Tuple[str, Any]:
return (str(item.value), True)
def export_keyword_item(
item: SequentialArgs._KeywordItem) -> Tuple[str, Any]:
return (item.name, self._kwvalues[item.name])
matcher = pattern_matching.match(
export_regular_item,
export_keyword_item,
)
return dict([matcher(item) for item in self._items])
def __eq__(self, other) -> bool:
return isinstance(other, SequentialArgs) and all([
self._items == other._items,
self._kwvalues == other._kwvalues,
])
def __repr__(self) -> str:
return f"[{', '.join(self.to_list(repr))}]"
def merge_args(*operands: Union[SequentialArgs, UserArgs]) -> SequentialArgs:
"""Merges several arguments collections into one left-to-right."""
result = SequentialArgs()
for operand in operands:
if not isinstance(operand, SequentialArgs):
operand = SequentialArgs.from_collection(operand)
result._merge_from(operand) # pylint: disable=protected-access
return result
class ExecutableSpec(abc.ABC):
"""Executable specification describes what code / computation to run.
Use one of the functions declared in xm/packagables.py to create a spec:
* xm.binary - a prebuilt executable program.
* xm.bazel_binary - an executable built with Bazel.
* xm.container - a prebuilt Docker container.
* xm.bazel_container - a Docker container built with Bazel.
* xm.python_container - a Docker container running python code.
* xm.dockerfile_container - a Docker container built with dockerfile.
An executable spec must be turned into an Executable using
Experiment.package() in order to be used in a Job.
WARNING: `ExecutableSpec`s are supposed to be implementation-agnostic. That
means there should be no backend-specific class inheriting `ExecutableSpec`.
"""
@property
@abc.abstractmethod
def name(self) -> str:
raise NotImplementedError
@attr.s(auto_attribs=True)
class Executable(abc.ABC):
"""Executable describes the final location of a packaged executable spec.
An executable depends on the executable specification and the executor
specification. Experiment's implementation knows how to handle each type of
executable.
Attributes:
name: An automatically populated name for the executable. Used for assigning
default names to `Job`s.
"""
name: str
class ExecutorSpec(abc.ABC):
"""Executor spec describes the location of the runtime environment.
For a list of supported ExecutorSpecs see a list of executors below.
"""
class Executor(abc.ABC):
"""Executor describes the runtime environment of a Job.
Concrete supported executors are listed in xm_local/executors.py:
* xm_local.Local
* xm_local.Vertex
* xm_local.Kubernetes
"""
@classmethod
@abc.abstractmethod
def Spec(cls) -> ExecutorSpec: # pylint: disable=invalid-name
raise NotImplementedError
@attr.s(auto_attribs=True)
class Packageable:
"""Packageable describes what to build and its static parameters."""
executable_spec: ExecutableSpec
executor_spec: ExecutorSpec
args: SequentialArgs = attr.ib(
factory=list, converter=SequentialArgs.from_collection) # pytype: disable=annotation-type-mismatch
env_vars: Dict[str, str] = attr.ib(converter=dict, default=attr.Factory(dict))
class Constraint(abc.ABC):
"""Constraint describes the requirements for where a job group can run.
Some examples of constraints include:
* same virtual machine;
* same virtual private Cloud subnetwork;
* same network fabric;
* same geographic location.
"""
# Job generators are async functions returning None.
# Pylint doesn't distinguish async and sync contexts so Optional[Awaitable] has
# to be used to accomodate both cases.
JobGeneratorType = Callable[..., Optional[Awaitable]]
JobType = Union['Job', 'JobGroup', JobGeneratorType]
@attr.s(auto_attribs=True)
class Job:
"""Job describes a unit of computation to be run.
Attributes:
executable: What to run -- one of `xm.Experiment.package` results.
executor: Where to run -- one of `xm.Executor` subclasses.
name: Name of the job. Must be unique within the context (work unit). By
default it is constructed from the executable. Used for naming related
entities such as newly created containers.
args: Command line arguments to pass. This can be dict, list or
xm.SequentialArgs. Dicts are most convenient for keyword flags.
{'batch_size': 16} is passed as --batch_size=16. If positional arguments
are needed one can use a list or xm.SequentialArgs.
env_vars: Environment variables to apply.
"""
executable: Executable
executor: Executor
name: Optional[str] = None
args: SequentialArgs = attr.ib(
factory=list, converter=SequentialArgs.from_collection) # pytype: disable=annotation-type-mismatch
env_vars: Dict[str, str] = attr.Factory(dict)
class JobGroup:
"""JobGroup describes a set of jobs that run under shared constraints.
Use named arguments to give jobs meaningful names:
```
JobGroup(
learner=Job(learner_executable, executor),
actor=Job(actor_executable, executor),
)
```
JobGroups provide the gang scheduling concept: Jobs inside them would be
scheduled / descheduled simultaneously. Note that schedulers may not always be
able to enforce that.
JobGroups may include more fine grained constraints:
```
JobGroup(
learner=Job(tpu_learner_executable, executor),
preprocessor=Job(preprocessor_executable, executor),
constraints=[xm_impl.SameMachine()],
)
```
To express sophisticated requirements JobGroups can be nested:
```
JobGroup(
eval=Job(eval_executable, executor),
colocated_learner_and_actor=JobGroup(
learner=Job(tpu_learner_executable, executor),
actor=Job(actor_executable, executor),
constraints=[xm_impl.SameMachine()],
),
)
```
Attributes:
jobs: A mapping of names to jobs.
constraints: A list of additional scheduling constraints.
"""
jobs: Dict[str, JobType]
constraints: List[Constraint]
def __init__(self,
*,
constraints: Optional[Sequence[Constraint]] = None,
**jobs: JobType) -> None:
"""Builds a JobGroup.
Args:
constraints: List of additional scheduling constraints. Keyword only arg.
**jobs: Jobs / job groups that constitute the group passed as kwargs.
"""
self.jobs = jobs
self.constraints = list(constraints) if constraints else []
JobTypeVar = TypeVar('JobTypeVar', Job, JobGroup, JobGeneratorType)
|
import json
import logging
import os
import re
import string
import time
import matplotlib.pyplot as pyplot
import nltk
from nltk.stem.snowball import SnowballStemmer
from sklearn.cluster import KMeans
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.manifold import TSNE
# from aarhus.aarhus import custom_stopwords
logging.basicConfig(format='%(asctime)s : %(levelname)s :: %(message)s', level=logging.DEBUG)
def strip_proppers(text):
# first tokenize by sentence, then by word to ensure that punctuation is caught as it'sown token
tokens = [word for sent in nltk.sent_tokenize(text) for word in nltk.word_tokenize(sent)
if word.islower()]
return "".join(
[" " + i if not i.startswith("'") and i not in string.punctuation else i for i in tokens]).strip()
def tokenize_and_stem(text):
# first tokenize by sentence, then by word to ensure that punctuation is caught as it'sown token
tokens = [word for sent in nltk.sent_tokenize(text) for word in nltk.word_tokenize(sent)]
filtered_tokens = []
# filter out any tokens not containing letters (e.g., numeric tokens, raw punctuation)
for token in tokens:
if re.search('[a-zA-Z]', token):
filtered_tokens.append(token)
result = [stemmer.stem(t) for t in filtered_tokens]
return result
def clean_address(arg_value):
result = str(arg_value)
for token in clean_address_tokens:
if token in result:
result = result.replace(token, ' ')
return result.lower().strip()
start_time = time.time()
stemmer = SnowballStemmer("english")
# touchup_list = custom_stopwords.get_specific_stopwords()
# https://groups.google.com/forum/#!topic/microsoft.public.outlookexpress.general/oig7-xNFISg
clean_address_tokens = ['=?us-ascii?Q?', '=0D=0A_=28', '=?utf-8?Q?', '=29?=', '=0D=0A']
with open('./sklearn_kmeans_clustering.json') as data_file:
data = json.load(data_file)
logging.debug(data)
input_folder = data['input_folder']
max_file_count = data['max_file_count']
# model_file_name = data['model_file_name']
cluster_count = data['cluster_count']
random_state = data['random_state']
max_df = data['max_df']
min_df = data['min_df']
max_features = data['max_features']
n_components = data['n_components']
target_encoding = 'utf-8'
# stopwords = custom_stopwords.get_stopwords()
file_names = [os.path.join(root, current) for root, subdirectories, files in os.walk(input_folder) for current in files]
logging.debug('we have %d files', len(file_names))
# truncate
if max_file_count < len(file_names) and max_file_count != -1:
file_names = file_names[:max_file_count]
logging.debug('we are using %d files', len(file_names))
documents = [open(file_name, 'r').read() for file_name in file_names]
tfidf_vectorizer = TfidfVectorizer(max_df=max_df, max_features=max_features, min_df=min_df, stop_words='english',
use_idf=True, tokenizer=tokenize_and_stem, ngram_range=(1, 2), decode_error='ignore',
strip_accents='ascii')
tfidf_matrix = tfidf_vectorizer.fit_transform(documents)
X_reduced = TruncatedSVD(n_components=n_components, random_state=random_state).fit_transform(tfidf_matrix)
X_embedded = TSNE(n_components=2, perplexity=40, verbose=2).fit_transform(X_reduced)
fig = pyplot.figure(figsize=(10, 10))
ax = pyplot.axes(frameon=False)
pyplot.setp(ax, xticks=(), yticks=())
pyplot.subplots_adjust(left=0.0, bottom=0.0, right=1.0, top=0.9,
wspace=0.0, hspace=0.0)
pyplot.scatter(X_embedded[:, 0], X_embedded[:, 1], marker="x")
pyplot.show()
model = KMeans(n_clusters=cluster_count, random_state=random_state)
model.fit(tfidf_matrix)
#
# # todo write terms to a file for later viewing
# terms = tfidf_vectorizer.get_feature_names()
# logging.debug('we have %d terms/feature names' % len(terms))
# terms_out_file = 'sklearn_kmeans_terms.csv'
# with open(terms_out_file, 'w') as terms_out_fp:
# for item in terms:
# terms_out_fp.write("%s\n" % item)
# logging.debug('wrote terms to %s' % terms_out_file)
#
# clusters = model.labels_.tolist()
#
# logging.debug('files: %d cluster values: %d' % (len(file_names), len(clusters)))
# cluster_counter = Counter()
# for item in clusters:
# cluster_counter[item] += 1
#
# logging.debug(cluster_counter)
# logging.debug('smallest cluster has %d items; largest cluster has %d items' % (
# min(cluster_counter.values()), max(cluster_counter.values())))
#
|
import asyncio
from inspect import isawaitable
import pytest
from sanic_routing.exceptions import NotFound
from sanic import Blueprint
from sanic.exceptions import InvalidSignal, SanicException
def test_add_signal(app):
def sync_signal(*_):
...
app.add_signal(sync_signal, "foo.bar.baz")
assert len(app.signal_router.routes) == 1
def test_add_signal_decorator(app):
@app.signal("foo.bar.baz")
def sync_signal(*_):
...
@app.signal("foo.bar.baz")
async def async_signal(*_):
...
assert len(app.signal_router.routes) == 2
assert len(app.signal_router.dynamic_routes) == 1
@pytest.mark.parametrize(
"signal",
(
"<foo>.bar.bax",
"foo.<bar>.baz",
"foo",
"foo.bar",
"foo.bar.baz.qux",
),
)
def test_invalid_signal(app, signal):
with pytest.raises(InvalidSignal, match=f"Invalid signal event: {signal}"):
@app.signal(signal)
def handler():
...
@pytest.mark.asyncio
async def test_dispatch_signal_triggers_multiple_handlers(app):
counter = 0
@app.signal("foo.bar.baz")
def sync_signal(*_):
nonlocal counter
counter += 1
@app.signal("foo.bar.baz")
async def async_signal(*_):
nonlocal counter
counter += 1
app.signal_router.finalize()
await app.dispatch("foo.bar.baz")
assert counter == 2
@pytest.mark.asyncio
async def test_dispatch_signal_triggers_triggers_event(app):
counter = 0
@app.signal("foo.bar.baz")
def sync_signal(*args):
nonlocal app
nonlocal counter
group, *_ = app.signal_router.get("foo.bar.baz")
for signal in group:
counter += signal.ctx.event.is_set()
app.signal_router.finalize()
await app.dispatch("foo.bar.baz")
assert counter == 1
@pytest.mark.asyncio
async def test_dispatch_signal_triggers_dynamic_route(app):
counter = 0
@app.signal("foo.bar.<baz:int>")
def sync_signal(baz):
nonlocal counter
counter += baz
app.signal_router.finalize()
await app.dispatch("foo.bar.9")
assert counter == 9
@pytest.mark.asyncio
async def test_dispatch_signal_triggers_with_requirements(app):
counter = 0
@app.signal("foo.bar.baz", condition={"one": "two"})
def sync_signal(*_):
nonlocal counter
counter += 1
app.signal_router.finalize()
await app.dispatch("foo.bar.baz")
assert counter == 0
await app.dispatch("foo.bar.baz", condition={"one": "two"})
assert counter == 1
@pytest.mark.asyncio
async def test_dispatch_signal_triggers_with_context(app):
counter = 0
@app.signal("foo.bar.baz")
def sync_signal(amount):
nonlocal counter
counter += amount
app.signal_router.finalize()
await app.dispatch("foo.bar.baz", context={"amount": 9})
assert counter == 9
@pytest.mark.asyncio
async def test_dispatch_signal_triggers_with_context_fail(app):
counter = 0
@app.signal("foo.bar.baz")
def sync_signal(amount):
nonlocal counter
counter += amount
app.signal_router.finalize()
with pytest.raises(TypeError):
await app.dispatch("foo.bar.baz", {"amount": 9})
@pytest.mark.asyncio
async def test_dispatch_signal_triggers_on_bp(app):
bp = Blueprint("bp")
app_counter = 0
bp_counter = 0
@app.signal("foo.bar.baz")
def app_signal():
nonlocal app_counter
app_counter += 1
@bp.signal("foo.bar.baz")
def bp_signal():
nonlocal bp_counter
bp_counter += 1
app.blueprint(bp)
app.signal_router.finalize()
await app.dispatch("foo.bar.baz")
assert app_counter == 1
assert bp_counter == 1
await bp.dispatch("foo.bar.baz")
assert app_counter == 1
assert bp_counter == 2
@pytest.mark.asyncio
async def test_dispatch_signal_triggers_event(app):
app_counter = 0
@app.signal("foo.bar.baz")
def app_signal():
...
async def do_wait():
nonlocal app_counter
await app.event("foo.bar.baz")
app_counter += 1
app.signal_router.finalize()
await app.dispatch("foo.bar.baz")
waiter = app.event("foo.bar.baz")
assert isawaitable(waiter)
fut = asyncio.ensure_future(do_wait())
await app.dispatch("foo.bar.baz")
await fut
assert app_counter == 1
@pytest.mark.asyncio
async def test_dispatch_signal_triggers_event_on_bp(app):
bp = Blueprint("bp")
bp_counter = 0
@bp.signal("foo.bar.baz")
def bp_signal():
...
async def do_wait():
nonlocal bp_counter
await bp.event("foo.bar.baz")
bp_counter += 1
app.blueprint(bp)
app.signal_router.finalize()
signal_group, *_ = app.signal_router.get(
"foo.bar.baz", condition={"blueprint": "bp"}
)
await bp.dispatch("foo.bar.baz")
waiter = bp.event("foo.bar.baz")
assert isawaitable(waiter)
fut = asyncio.ensure_future(do_wait())
for signal in signal_group:
signal.ctx.event.set()
await fut
assert bp_counter == 1
def test_bad_finalize(app):
counter = 0
@app.signal("foo.bar.baz")
def sync_signal(amount):
nonlocal counter
counter += amount
with pytest.raises(
RuntimeError, match="Cannot finalize signals outside of event loop"
):
app.signal_router.finalize()
assert counter == 0
@pytest.mark.asyncio
async def test_event_not_exist(app):
with pytest.raises(NotFound, match="Could not find signal does.not.exist"):
await app.event("does.not.exist")
@pytest.mark.asyncio
async def test_event_not_exist_on_bp(app):
bp = Blueprint("bp")
app.blueprint(bp)
with pytest.raises(NotFound, match="Could not find signal does.not.exist"):
await bp.event("does.not.exist")
@pytest.mark.asyncio
async def test_event_not_exist_with_autoregister(app):
app.config.EVENT_AUTOREGISTER = True
try:
await app.event("does.not.exist", timeout=0.1)
except asyncio.TimeoutError:
...
@pytest.mark.asyncio
async def test_dispatch_signal_triggers_non_exist_event_with_autoregister(app):
@app.signal("some.stand.in")
async def signal_handler():
...
app.config.EVENT_AUTOREGISTER = True
app_counter = 0
app.signal_router.finalize()
async def do_wait():
nonlocal app_counter
await app.event("foo.bar.baz")
app_counter += 1
fut = asyncio.ensure_future(do_wait())
await app.dispatch("foo.bar.baz")
await fut
assert app_counter == 1
@pytest.mark.asyncio
async def test_dispatch_not_exist(app):
@app.signal("do.something.start")
async def signal_handler():
...
app.signal_router.finalize()
await app.dispatch("does.not.exist")
def test_event_on_bp_not_registered():
bp = Blueprint("bp")
@bp.signal("foo.bar.baz")
def bp_signal():
...
with pytest.raises(
SanicException,
match="<Blueprint bp> has not yet been registered to an app",
):
bp.event("foo.bar.baz")
@pytest.mark.parametrize(
"event,expected",
(
("foo.bar.baz", True),
("server.init.before", False),
("http.request.start", False),
("sanic.notice.anything", True),
),
)
def test_signal_reservation(app, event, expected):
if not expected:
with pytest.raises(
InvalidSignal,
match=f"Cannot declare reserved signal event: {event}",
):
app.signal(event)(lambda: ...)
else:
app.signal(event)(lambda: ...)
|
from __future__ import unicode_literals
from . import BaseProvider
from . import date_time
import random
import hashlib
class Provider(BaseProvider):
language_codes = ('cn', 'de', 'en', 'es', 'fr', 'it', 'pt', 'ru')
@classmethod
def boolean(cls, chance_of_getting_true=50):
return random.randint(1, 100) <= chance_of_getting_true
@classmethod
def null_boolean(cls):
return {
0: None,
1: True,
-1: False
}[random.randint(-1, 1)]
@classmethod
def md5(cls, raw_output=False):
"""
Calculates the md5 hash of a given string
:example 'cfcd208495d565ef66e7dff9f98764da'
"""
res = hashlib.md5(str(random.random()).encode('utf-8'))
if raw_output:
return res.digest()
return res.hexdigest()
@classmethod
def sha1(cls, raw_output=False):
"""
Calculates the sha1 hash of a given string
:example 'b5d86317c2a144cd04d0d7c03b2b02666fafadf2'
"""
res = hashlib.sha1(str(random.random()).encode('utf-8'))
if raw_output:
return res.digest()
return res.hexdigest()
@classmethod
def sha256(cls, raw_output=False):
"""
Calculates the sha256 hash of a given string
:example '85086017559ccc40638fcde2fecaf295e0de7ca51b7517b6aebeaaf75b4d4654'
"""
res = hashlib.sha256(str(random.random()).encode('utf-8'))
if raw_output:
return res.digest()
return res.hexdigest()
def locale(self):
return self.language_code() + '_' + self.country_code()
@classmethod
def country_code(cls):
return cls.random_element(date_time.Provider.countries)['code']
@classmethod
def language_code(cls):
return cls.random_element(cls.language_codes)
|
# -*- coding: utf-8 -*-
import io
from os.path import abspath, dirname, join
from django.apps import apps
from django.template import TemplateDoesNotExist
from django.template.loaders.base import Loader as BaseLoader
class AppTemplateLoader(BaseLoader):
"""
Template loader which allow to specify application label from which
template should be used (for example when extending).
Usage: specify template path as ``<app_label>:<template_path>``
App label should be specified in Django AppConfig. Check
https://docs.djangoproject.com/en/1.8/ref/applications/ for more details
Example ::
{% extends "ralph_admin:admin/base.html" %}
To use this loader add it in your settings ::
TEMPLATES = [
{
...
'OPTIONS': {
...
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'ralph.lib.template.loaders.AppTemplateLoader',
],
...
},
...
},
]
Loader is based on this snippet: https://djangosnippets.org/snippets/1376/
"""
is_usable = True
def get_template_path(self, template_name, template_dirs=None):
"""
Try to split template name by ':' to get app_label
"""
template_parts = template_name.split(":", 1)
if len(template_parts) != 2:
raise TemplateDoesNotExist()
app_label, template_name = template_parts
app = apps.get_app_config(app_label)
app_dir = dirname(app.module.__file__)
template_dir = abspath(join(app_dir, 'templates'))
return join(template_dir, template_name)
def load_template_source(self, template_name, template_dirs=None):
filepath = self.get_template_path(template_name, template_dirs)
try:
with io.open(filepath, encoding=self.engine.file_charset) as fp:
return fp.read(), filepath
except IOError:
raise TemplateDoesNotExist(template_name)
|
import theano
import theano.tensor as tensor
import numpy
import math
import random
import scipy
import time
import os
import denet.common as common
import denet.common.logging as logging
import denet.common.theano_util as theano_util
from denet.layer import AbstractLayer, InitialLayer, get_train
from denet.layer.convolution import ConvLayer
#used for timing info
detect_time=0
detect_num=0
#import c_code optimizations
c_code = common.import_c(os.path.join(os.path.dirname(__file__), "denet_detect.cc"))
assert not c_code is None
class DeNetDetectLayer(AbstractLayer):
type_name = "denet-detect"
def __init__(self, layers, class_num=10, overlap_threshold = 0.5, cost_factor=1.0, bbox_factor=0.0, indfit_factor=0.0,
use_jointfit=False, use_bounded_iou=False, json_param={}):
super().__init__(layer_index=len(layers))
#passthrough layer
self.input = layers[-1].output
self.input_shape = layers[-1].output_shape
self.output = layers[-1].output
self.output_shape = layers[-1].output_shape
self.cost_factor = json_param.get("costFactor", cost_factor)
self.bbox_factor = json_param.get("bboxFactor", bbox_factor)
self.class_num = json_param.get("classNum", class_num)
self.overlap_threshold = json_param.get("overlapThreshold", overlap_threshold)
#Joint Fitness and Bounded IoU additions
self.use_jointfit = json_param.get("useJointFitness", use_jointfit)
self.use_bounded_iou = json_param.get("useBoundedIoU", use_bounded_iou)
self.indfit_factor = json_param.get("fitnessFactor", indfit_factor)
self.use_indfit = (self.indfit_factor > 0.0)
assert not (self.use_indfit and self.use_jointfit), "Cannot enable both fitness methods at once!"
#find sparse / instcount / compare layer
self.sparse_layer = common.find_layers(layers, "denet-sparse", False)
assert self.sparse_layer != None, "Error: Requires denet-sparse layer to be specified before denet-detect layer!"
self.use_bbox_reg = (self.bbox_factor > 0.0)
self.batch_size = self.sparse_layer.batch_size
self.sample_num = self.sparse_layer.sample_num
self.null_class = self.class_num
if self.use_jointfit:
self.fitness_num = 5
self.null_class = self.class_num*self.fitness_num
s0 = self.class_num*self.fitness_num+1
else:
self.fitness_num = 6
self.null_class = self.class_num
s0 = self.class_num+1
#
s1 = 4 if self.use_bbox_reg else 0
s2 = self.fitness_num if self.use_indfit else 0
self.layers = [ConvLayer([InitialLayer(self.input, self.input_shape)], (s0+s1+s2, self.input_shape[1], 1, 1), (1,1), True, "valid", 0.0)]
#class assignment log(Pr(X | sample_j, sample_i)) where X is either the class distribution or the class-fitness distribution for joint fitness
self.det_shape = (self.batch_size, s0, self.sample_num, self.sample_num)
self.det_lh = self.layers[-1].output[:, :s0 ,...]
self.det_lh = self.layers[-1].output[:, :s0 ,...]
self.det_pr = theano_util.log_softmax(self.det_lh, axis=[1])
self.det_pr_target = theano.shared(numpy.zeros(self.det_shape, dtype=numpy.float32))
#bbox regression
if self.use_bbox_reg:
self.bbox_shape = (self.batch_size, s1, self.sample_num, self.sample_num)
self.bbox_reg = self.layers[-1].output[:, s0:(s0+s1), ...]
#updated sample bounding boxes
sample_cx = 0.5*(self.sparse_layer.sample_bbox[:,:,:,0] + self.sparse_layer.sample_bbox[:,:,:,2])
sample_cy = 0.5*(self.sparse_layer.sample_bbox[:,:,:,1] + self.sparse_layer.sample_bbox[:,:,:,3])
sample_w = self.sparse_layer.sample_bbox[:,:,:,2] - self.sparse_layer.sample_bbox[:,:,:,0]
sample_h = self.sparse_layer.sample_bbox[:,:,:,3] - self.sparse_layer.sample_bbox[:,:,:,1]
predict_cx = self.bbox_reg[:,0,:,:]*sample_w + sample_cx
predict_cy = self.bbox_reg[:,1,:,:]*sample_h + sample_cy
predict_w = tensor.exp(self.bbox_reg[:,2,:,:])*sample_w
predict_h = tensor.exp(self.bbox_reg[:,3,:,:])*sample_h
predict_x0 = predict_cx - predict_w*0.5
predict_y0 = predict_cy - predict_h*0.5
predict_x1 = predict_cx + predict_w*0.5
predict_y1 = predict_cy + predict_h*0.5
self.bbox_predict = tensor.concatenate([predict_x0[...,None], predict_y0[...,None], predict_x1[...,None], predict_y1[...,None]], axis=len(self.bbox_shape)-1)
#independent fitness distribution
if self.use_indfit:
self.indfit_shape = (self.batch_size, s2, self.sample_num, self.sample_num)
self.indfit_lh = self.layers[-1].output[:,(s0+s1):(s0+s1+s2), ...]
self.indfit_pr = theano_util.log_softmax(self.indfit_lh, axis=[1])
self.indfit_pr_target = theano.shared(numpy.zeros(self.indfit_shape, dtype=numpy.float32))
self.detect_func = None
self.nms_func = None
logging.verbose("Adding", self)
#parse layer desciptor
def parse_desc(layers, name, tags, params):
if name != "DND":
return False
overlap_threshold = params.get(0, 0.5)
cost_factor = params.get(1, 1.0)
bbox_factor = params.get(2, 0.0)
indfit_factor = params.get(3, 0.0)
use_jointfit = "J" in tags
use_bounded_iou = "B" in tags
layers.append(DeNetDetectLayer(layers, params.get("classNum"), overlap_threshold, cost_factor, bbox_factor, indfit_factor,
use_jointfit, use_bounded_iou))
return True
def import_json(self, json_param):
super().import_json(json_param)
#backward compatibility
if "conv" in json_param:
self.layers[0].import_json(json_param["conv"])
def export_json(self):
json=super().export_json()
json.update({"costFactor" : self.cost_factor,
"bboxFactor" : self.bbox_factor,
"fitnessFactor" : self.indfit_factor,
"useJointFitness" : self.use_jointfit,
"useBoundedIoU" : self.use_bounded_iou,
"classNum": self.class_num,
"overlapThreshold": self.overlap_threshold})
return json
def get_target(self, model, samples, metas):
timer = common.Timer()
#build sample
det_pr = numpy.zeros(self.det_shape, dtype=numpy.float32)
det_pr[:, self.null_class, ...] = 1.0
if self.use_bbox_reg:
bbox_valid = numpy.zeros((self.batch_size, self.sample_num, self.sample_num), dtype=numpy.float32)
bbox_reg = numpy.zeros((self.batch_size, 8, self.sample_num, self.sample_num), dtype=numpy.float32)
bbox_reg[:, 2, ...]=1.0
bbox_reg[:, 3, ...]=1.0
bbox_reg[:, 6, ...]=1.0
bbox_reg[:, 7, ...]=1.0
if self.use_indfit:
indfit_pr = numpy.zeros(self.indfit_shape, dtype=numpy.float32)
indfit_pr[:,0,...] = 1.0
for b, meta in enumerate(metas):
samples = [bbox for _,bbox in self.sparse_layer.sample_bbox_list[b]]
if len(meta["bbox"]) > 0 and len(samples) > 0:
overlap = theano_util.get_overlap_iou(meta["bbox"], samples)
bbox_indexs, sample_indexs = numpy.where(overlap > self.overlap_threshold[0])
for obj,index in zip(bbox_indexs.tolist(), sample_indexs.tolist()):
sample_i = index % self.sparse_layer.sample_num
sample_j = index // self.sparse_layer.sample_num
sample_cls = meta["class"][obj]
sample_bbox = samples[index]
sample_f = (overlap[obj, index] - self.overlap_threshold[0]) / (1.0 - self.overlap_threshold[0])
if self.use_jointfit:
f = max(0, min(int(self.fitness_num*sample_f), self.fitness_num-1))
det_pr[b, sample_cls*self.fitness_num + f, sample_j, sample_i] = 1.0
det_pr[b, self.null_class, sample_j, sample_i] = 0.0
else:
det_pr[b, sample_cls, sample_j, sample_i] = 1.0
det_pr[b, self.null_class, sample_j, sample_i] = 0.0
if self.use_indfit:
f = 1 + int(math.floor((self.fitness_num-1)*sample_f))
f = max(1, min(f, self.fitness_num-1))
indfit_pr[b, 0, sample_j, sample_i] = 0.0
indfit_pr[b, f, sample_j, sample_i] = 1.0
if self.use_bbox_reg:
overlap_max = overlap.argmax(axis=0)
for index in range(len(samples)):
obj = overlap_max[index]
if overlap[obj, index] <= self.overlap_threshold[1]:
continue
sample = samples[index]
target = meta["bbox"][obj]
sample_i = index % self.sparse_layer.sample_num
sample_j = index // self.sparse_layer.sample_num
bbox_valid[b, sample_j, sample_i] = 1.0
bbox_reg[b, 0, sample_j, sample_i] = 0.5*(target[0]+target[2])
bbox_reg[b, 1, sample_j, sample_i] = 0.5*(target[1]+target[3])
bbox_reg[b, 2, sample_j, sample_i] = target[2] - target[0]
bbox_reg[b, 3, sample_j, sample_i] = target[3] - target[1]
bbox_reg[b, 4, sample_j, sample_i] = 0.5*(sample[0]+sample[2])
bbox_reg[b, 5, sample_j, sample_i] = 0.5*(sample[1]+sample[3])
bbox_reg[b, 6, sample_j, sample_i] = sample[2] - sample[0]
bbox_reg[b, 7, sample_j, sample_i] = sample[3] - sample[1]
#normalize probabilities
det_pr /= det_pr.sum(axis=1)[:,None,...]
if self.use_indfit:
indfit_pr /= indfit_pr.sum(axis=1)[:,None,...]
#normalize by number of samples
nfactor = self.sample_num*self.sample_num
det_pr /= nfactor
if self.use_bbox_reg:
bbox_valid /= nfactor
if self.use_indfit:
indfit_pr /= nfactor
#pack indices / values
yt_value = det_pr.flatten()
if self.use_bbox_reg:
yt_value = numpy.concatenate((yt_value, bbox_valid.flatten(), bbox_reg.flatten()))
if self.use_indfit:
yt_value = numpy.concatenate((yt_value, indfit_pr.flatten()))
return numpy.array([], dtype=numpy.int64), yt_value
#
def get_errors(self, yt_index, yt_value):
#unpack indexs and values
shapes = [self.det_shape]
if self.use_bbox_reg:
shapes += [(self.batch_size, self.sample_num, self.sample_num), (self.batch_size, 8, self.sample_num, self.sample_num)]
if self.use_indfit:
shapes += [self.indfit_shape]
v = common.ndarray_unpack(yt_value, shapes)
det_pr = v[0]
index = 1
if self.use_bbox_reg:
bbox_valid, bbox_reg = v[index:(index+2)]
index += 2
if self.use_indfit:
indfit_pr = v[index:(index+1)]
#Detection Cost:
det_errors = -tensor.sum(det_pr*self.det_pr, axis=1) / math.log(self.det_shape[1])
#Bounding Box Regression Cost:
bbox_errors = None
if self.use_bbox_reg and self.bbox_factor > 0.0:
bbox_target = bbox_reg[:,0:4,...]
bbox_sample = bbox_reg[:,4:8,...]
bbox_errors = tensor.zeros((self.batch_size, self.sample_num, self.sample_num), dtype=numpy.float32)
if self.use_bounded_iou:
target_x = bbox_target[:,0,:,:]
target_y = bbox_target[:,1,:,:]
target_w = bbox_target[:,2,:,:]
target_h = bbox_target[:,3,:,:]
predict_x = 0.5*(self.bbox_predict[:,:,:,0] + self.bbox_predict[:,:,:,2])
predict_y = 0.5*(self.bbox_predict[:,:,:,1] + self.bbox_predict[:,:,:,3])
predict_w = self.bbox_predict[:,:,:,2] - self.bbox_predict[:,:,:,0]
predict_h = self.bbox_predict[:,:,:,3] - self.bbox_predict[:,:,:,1]
dx = target_x - predict_x
dy = target_y - predict_y
eps = 0.001
#ORIGINAL Paper used 4*dx, proper implementation is 2*dx
cost_x = tensor.switch(dx >= 0.0, 2*dx / (target_w + dx + eps), -2*dx / (target_w - dx + eps))
cost_y = tensor.switch(dy >= 0.0, 2*dy / (target_h + dy + eps), -2*dy / (target_h - dy + eps))
cost_w = 1.0 - tensor.minimum(target_w / (predict_w + eps), predict_w / (target_w + eps))
cost_h = 1.0 - tensor.minimum(target_h / (predict_h + eps), predict_h / (target_h + eps))
cost = tensor.concatenate([cost_x[:,None,:,:], cost_y[:,None,:,:], cost_w[:,None,:,:], cost_h[:,None,:,:]], axis=1)
bbox_errors += self.bbox_factor*bbox_valid*tensor.sum(theano_util.smooth_L1(cost), axis=1)
else:
#standard Fast R-CNN style cost
tx = (bbox_target[:, 0, ...] - bbox_sample[:, 0, ...]) / bbox_sample[:, 2, ...]
ty = (bbox_target[:, 1, ...] - bbox_sample[:, 1, ...]) / bbox_sample[:, 3, ...]
tw = tensor.log(bbox_target[:, 2, ...] / bbox_sample[:, 2, ...])
th = tensor.log(bbox_target[:, 3, ...] / bbox_sample[:, 3, ...])
t = tensor.concatenate([tx[:,None, ...], ty[:,None, ...], tw[:,None, ...], th[:,None, ...]], axis=1)
dt = t - self.bbox_reg
bbox_errors += self.bbox_factor*bbox_valid*tensor.sum(theano_util.smooth_L1(dt), axis=1)
indfit_errors = None
if self.use_indfit:
indfit_errors = -tensor.sum(indfit_pr*self.indfit_pr, axis=1) / math.log(self.fitness_num)
return det_errors, bbox_errors, indfit_errors
#return negative log-likelihood training cost
def cost(self, yt_index, yt_value):
det_errors, bbox_errors, indfit_errors = self.get_errors(yt_index, yt_value)
cost = tensor.as_tensor_variable(0.0)
if not det_errors is None:
cost += theano.printing.Print('DET Cost:')(self.cost_factor * det_errors.sum() / self.batch_size)
if not bbox_errors is None:
cost += theano.printing.Print('BBOX Cost:')(self.bbox_factor * bbox_errors.sum() / self.batch_size)
if not indfit_errors is None:
cost += theano.printing.Print('Ind. Fitness Cost:')(self.indfit_factor * indfit_errors.sum() / self.batch_size)
return cost
#returns a list of most likely (class,bounding boxes)
def get_detections(self, model, data_x, data_m, params):
pr_threshold = params.get("prThreshold", 0.01)
nms_threshold = params.get("nmsThreshold", 0.5)
corner_threshold = params.get("cornerThreshold", self.sparse_layer.corner_threshold)
corner_max = params.get("cornerMax", 1024)
use_soft_nms = params.get("useSoftNMS", 0) == 1
t = (pr_threshold, nms_threshold, corner_threshold, corner_max)
logging.verbose("Using detection params - pr threshold: %f, nms threshold: %f, corner_threshold: %f, corner_max: %i"%t)
first_detect = False
if self.detect_func is None:
#get all model outputs
outputs=[]
if self.use_jointfit:
det_fit = self.det_pr
det_fit_null = det_fit[:, self.null_class, :, :]
det_fit = det_fit[:,:self.class_num*self.fitness_num, :, :]
det_fit = det_fit.reshape((self.batch_size, self.class_num, self.fitness_num, self.sample_num, self.sample_num))
det_fit_pr = tensor.exp(det_fit)
m = tensor.max(det_fit, axis=2)
det_pr = m + tensor.log(tensor.sum(tensor.exp(det_fit - m[:,:,None,:,:]), axis=2))
det_pr = tensor.concatenate([det_pr, det_fit_null[:,None,:,:]], axis=1)
outputs.append(det_pr)
val = [self.overlap_threshold[0] + i*(1.0 - self.overlap_threshold[0])/self.fitness_num for i in range(self.fitness_num)]
fitness_val = theano.shared(numpy.array(val, dtype=numpy.float32))
fitness = tensor.log(tensor.sum(det_fit_pr*fitness_val[None,None,:,None,None], axis=2))
outputs.append(fitness)
else:
outputs.append(self.det_pr)
if self.use_bbox_reg:
outputs.append(self.bbox_predict)
if self.use_indfit:
outputs.append(tensor.exp(self.indfit_pr))
logging.info("Building detection function")
self.detect_func = theano.function([model.input], outputs, givens=[(get_train(), tensor.cast(0, 'int8'))], on_unused_input='ignore')
logging.verbose("Exporting graph...")
with open("detect_graph.txt", "w") as f:
theano.printing.debugprint(self.detect_func, file=f, print_type=True)
first_detect = True
#get sampling bounding boxs
logging.verbose("Detecting sample bboxs (%.2f)"%corner_threshold)
timer = common.Timer()
sample_bboxs = self.sparse_layer.get_samples(data_x, train=False, store_shared=True)
timer.mark()
logging.verbose("Found sample bboxs: {}".format([len(bbox) for bbox in sample_bboxs]))
#upload sampling bounding boxs
bboxs = self.sparse_layer.set_samples(sample_bboxs)
timer.mark()
#classify sampling bounding boxs
r = list(self.detect_func(data_x))
#get outputs
if self.use_jointfit:
det_pr = r[0]
fitness = r[1]
r_index = 2
else:
det_pr = r[0]
fitness = numpy.copy(det_pr)
r_index = 1
if self.use_bbox_reg:
bboxs = r[r_index]
r_index += 1
else:
bboxs = self.sparse_layer.get_bbox_array(sample_bboxs)
if self.use_indfit:
indfit_pr = r[r_index]
fitness_val = numpy.array([0.0] + [self.overlap_threshold[0] + i * (1.0 - self.overlap_threshold[0])/(self.fitness_num-1) for i in range(self.fitness_num-1)])
fitness_exp = numpy.sum(indfit_pr*fitness_val[None,:,None,None], axis=1).astype(numpy.float32)
fitness += numpy.log(fitness_exp)[:,None,:,:]
r_index += 1
timer.mark()
sample_bbox_num = [len(s) for s in sample_bboxs]
detlists = c_code.build_detections_nms(pr_threshold, nms_threshold, use_soft_nms, det_pr, fitness, bboxs, sample_bbox_num)
timer.mark()
logging.verbose("Found detections:", [len(detlist) for detlist in detlists])
logging.verbose("FPS=%.1f, Timing (ms) - get samples: %i, upload: %i, classify: %i, build+nms %i"%tuple([self.batch_size / timer.current()] + timer.deltas_ms()))
if not first_detect:
global detect_time, detect_num
detect_time += timer.current()
detect_num += self.batch_size
logging.info("Average FPS=%.1f"%(detect_num / detect_time))
#results format
results=[]
for i, detlist in enumerate(detlists):
results.append({"detections":detlist, "meta":data_m[i]})
return results
|
import os
import base64
import plugin.databaseConnect as database
from pathlib import Path
from PIL import Image
##Class which is use to handle profile pictures##
class imageHandler:
def __init__(self, user_id):
self.user_id = user_id
self.p = Path(__file__).parents[1]
self.default_path = (str(self.p) + "\\resources\\profiles\\noData.jpg")
self.path = (str(self.p) + "\\resources\\profiles\\"+user_id+".jpg")
##Read Image File from a path, if picture is accepted, we writeData##
def readImageFile(self, path):
blob_value = open(path, 'rb').read()
blob_data = base64.encodestring(blob_value)
size = os.path.getsize(path)
im = Image.open(path)
dimension = im.size
if(size > 200000):
return "ERRORSIZE"
if(dimension[0] < 225 or dimension[0] > 235 or dimension[1] < 275 or dimension[1]> 285):
return "ERRORDIMENSION"
else:
return self.writeData(blob_data)
##Write our picture ata to our Database##
def writeData(self, blob):
db = database.databaseUser()
imageWrite = db.editProfilePicture(blob, self.user_id)
db.disconnect()
return imageWrite
##We create an imageFile from data in the database##
def createImageFile(self):
db = database.databaseUser()
blob = db.getProfilePicture(self.user_id)
if(blob[0].picture != None):
file = open(self.path, "wb")
file.write(base64.decodestring(blob[0].picture))
file.close()
else:
self.path = self.default_path
##We delete data as soon as we use it to protect user data and privacy##
def deleteData(self):
try:
self.path = (str(self.p) + "\\resources\\profiles\\"+self.user_id+".jpg")
os.remove(self.path)
except FileNotFoundError:
pass
##Get File Location as relative paths##
def getPath(self):
return self.path
|
from __future__ import unicode_literals
from django.core.management.base import NoArgsCommand
from codebase.actions import stop_eclipse
class Command(NoArgsCommand):
help = "Stop Eclipse"
def handle_noargs(self, **options):
stop_eclipse()
|
class ISyntaxInterpreter(object):
def get_class_definition(self, string_content):
raise NotImplementedError()
def has_entry_points(self, string_content):
raise NotImplementedError()
def get_methods_definitions(self, string_content):
raise NotImplementedError()
def get_properties(self, string_content):
raise NotImplementedError()
|
from django.db import models
from django.db.models.signals import post_save, post_delete
from django.dispatch import receiver
class PostQueryset(models.QuerySet):
def valid(self):
return self.filter(valid=True)
def in_valid(self):
return self.filter(valid=False)
class PostManager(models.Manager):
# def get_queryset(self):
# return super().get_queryset()
def get_queryset(self):
return PostQueryset(self.model, using=self._db)
def valid(self):
return self.get_queryset().valid()
def in_valid(self):
return self.get_queryset().in_valid()
class Post(models.Model):
title = models.CharField(max_length=20)
valid = models.BooleanField(default=False)
objects = PostManager()
def __str__(self):
return self.title
def post_model_post_save_receiver(sender, *args, **kwargs):
print("The save method was called")
post_save.connect(post_model_post_save_receiver, sender=Post)
@receiver(post_delete)
def post_model_post_delete_receiver(sender, *args, **kwargs):
print("The delete method was called")
|
# Copyright 2021 Coastal Carolina University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the “Software”), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import os
import pathlib
class TransactionLock:
def __init__(self, lockfile):
self.path = pathlib.PosixPath(lockfile)
#
def check_lock(self):
result = False
if self.path.exists():
this_pid = os.getpid()
check_pid = 0
with open(self.path, 'r') as fh:
check_pid = int(fh.read().strip())
#
if this_pid == check_pid:
# We already hold the lock
result = True
else:
# See if we have a stale lock
procpath = pathlib.PosixPath('/proc').joinpath(str(check_pid))
if procpath.exists():
data = ''
with open(procpath.joinpath('status')) as fh:
data = fh.read()
#
if 'python' not in data:
# pid has been reused, so it should be safe to clean up the lock file
result = True
self.path.unlink(missing_ok=True)
#
else:
# Stale lock: clean it up
result = True
self.path.unlink(missing_ok=True)
#####
else:
# Not locked, so we're good to proceed
result = True
#
return result
#
def lock(self):
result = False
if self.check_lock():
with open(self.path, 'w') as fh:
pid = os.getpid()
fh.write(str(pid) + '\n')
#
# We really shouldn't have a close race condition with this application, since it is anticipated that the
# system administrator will run it manually (or perhaps via cron, but even then not especially frequently).
result = self.check_lock()
if result:
self.in_transaction = True
#
#
return result
#
def unlock(self):
result = False
# Check that we're the process holding the lock
if self.check_lock():
self.path.unlink(missing_ok=True)
result = True
self.in_transaction = False
#
return result
#
#
|
from contentbase.auditor import (
AuditFailure,
audit_checker,
)
term_mapping = {
"head": "UBERON:0000033",
"limb": "UBERON:0002101",
"salivary gland": "UBERON:0001044",
"male accessory sex gland": "UBERON:0010147",
"testis": "UBERON:0000473",
"female gonad": "UBERON:0000992",
"digestive system": "UBERON:0001007",
"arthropod fat body": "UBERON:0003917",
"antenna": "UBERON:0000972",
"adult maxillary segment": "FBbt:00003016",
"female reproductive system": "UBERON:0000474",
"male reproductive system": "UBERON:0000079",
"nucleus": "GO:0005634",
"cytosol": "GO:0005829",
"chromatin": "GO:0000785",
"membrane": "GO:0016020",
"mitochondria": "GO:0005739",
"nuclear matrix": "GO:0016363",
"nucleolus": "GO:0005730",
"nucleoplasm": "GO:0005654",
"polysome": "GO:0005844",
"insoluble cytoplasmic fraction": "NTR:0002594"
}
@audit_checker('biosample', frame='object')
def audit_biosample_term(value, system):
'''
Biosample_term_id and biosample_term_name
and biosample_type should all be present.
This should be handled by schemas.
Biosample_term_id should be in the ontology.
Biosample_term_name should match biosample_term_id.
'''
if value['status'] in ['deleted']:
return
if 'biosample_term_id' not in value:
return
ontology = system['registry']['ontology']
term_id = value['biosample_term_id']
term_name = value.get('biosample_term_name')
if term_id.startswith('NTR:'):
detail = 'Biosample {} has a New Term Request {} - {}'.format(
value['@id'],
term_id,
term_name)
raise AuditFailure('NTR biosample', detail, level='DCC_ACTION')
if term_id not in ontology:
detail = 'Biosample {} has biosample_term_id of {} which is not in ontology'.format(
value['@id'],
term_id)
raise AuditFailure('term_id not in ontology', term_id, level='DCC_ACTION')
ontology_term_name = ontology[term_id]['name']
if ontology_term_name != term_name and term_name not in ontology[term_id]['synonyms']:
detail = 'Biosample {} has a mismatch between biosample_term_id "{}" and biosample_term_name "{}"'.format(
value['@id'],
term_id,
term_name,
)
raise AuditFailure('mismatched biosample_term', detail, level='DCC_ACTION')
@audit_checker('biosample', frame='object')
def audit_biosample_culture_date(value, system):
'''
A culture_harvest_date should not precede
a culture_start_date.
This should move to the schema.
'''
if value['status'] in ['deleted']:
return
if ('culture_start_date' not in value) or ('culture_harvest_date' not in value):
return
if value['culture_harvest_date'] <= value['culture_start_date']:
detail = 'Biosample {} has a culture_harvest_date {} which precedes the culture_start_date {}'.format(
value['@id'],
value['culture_harvest_date'],
value['culture_start_date'])
raise AuditFailure('invalid dates', detail, level='ERROR')
@audit_checker('biosample', frame=['organism', 'donor', 'donor.organism', 'donor.mutated_gene', 'donor.mutated_gene.organism'])
def audit_biosample_donor(value, system):
'''
A biosample should have a donor.
The organism of donor and biosample should match.
Pooled_from biosamples do not need donors??
'''
if value['status'] in ['deleted']:
return
if ('donor' not in value) and (value['pooled_from']):
return
if ('donor' not in value) and (not value['pooled_from']):
detail = 'Biosample {} requires a donor'.format(value['@id'])
raise AuditFailure('missing donor', detail, level='ERROR')
return
donor = value['donor']
if value['organism']['name'] != donor['organism']['name']:
detail = 'Biosample {} is organism {}, yet its donor {} is organism {}. Biosamples require a donor of the same species'.format(
value['@id'],
value['organism']['name'],
donor['@id'],
donor['organism']['name'])
raise AuditFailure('mismatched organism', detail, level='ERROR')
if 'mutated_gene' not in donor:
return
if value['organism']['name'] != donor['mutated_gene']['organism']['name']:
detail = 'Biosample {} is organism {}, but its donor {} mutated_gene is in {}. Donor mutated_gene should be of the same species as the donor and biosample'.format(
value['@id'],
value['organism']['name'],
donor['@id'],
donor['mutated_gene']['organism']['name'])
raise AuditFailure('mismatched mutated_gene organism', detail, level='ERROR')
for i in donor['mutated_gene']['investigated_as']:
if i in ['histone modification', 'tag', 'control', 'recombinant protein', 'nucleotide modification', 'other post-translational modification']:
detail = 'Donor {} has an invalid mutated_gene {}. Donor mutated_genes should not be tags, controls, recombinant proteins or modifications'.format(
donor['@id'],
donor['mutated_gene']['name'])
raise AuditFailure('invalid donor mutated_gene', detail, level='ERROR')
@audit_checker('biosample', frame='object')
def audit_biosample_subcellular_term_match(value, system):
'''
The subcellular_fraction_term_name and subcellular_fraction_term_id
should be concordant. This should be a calculated field
If one exists the other should. This should be handled in the schema.
'''
if value['status'] in ['deleted']:
return
if ('subcellular_fraction_term_name' not in value) or ('subcellular_fraction_term_id' not in value):
return
expected_name = term_mapping[value['subcellular_fraction_term_name']]
if expected_name != (value['subcellular_fraction_term_id']):
detail = 'Biosample {} has a mismatch between subcellular_fraction_term_name "{}" and subcellular_fraction_term_id "{}"'.format(
value['@id'],
value['subcellular_fraction_term_name'],
value['subcellular_fraction_term_id'])
raise AuditFailure('mismatched subcellular_fraction_term', detail, level='ERROR')
@audit_checker('biosample', frame='object')
def audit_biosample_depleted_term_match(value, system):
'''
The depleted_in_term_name and depleted_in_term_name
should be concordant. This should be a calcualted field.
If one exists, the other should. This should be handled in the schema.
'''
if value['status'] == 'deleted':
return
if 'depleted_in_term_name' not in value:
return
if len(value['depleted_in_term_name']) != len(value['depleted_in_term_id']):
detail = 'Biosample {} has a depleted_in_term_name array and depleted_in_term_id array of differing lengths'.format(
value['@id'])
raise AuditFailure('mismatched depleted_in_term length', detail, level='ERROR')
return
for i, dep_term in enumerate(value['depleted_in_term_name']):
if (term_mapping[dep_term]) != (value['depleted_in_term_id'][i]):
detail = 'Biosample {} has a mismatch between {} and {}'.format(
value['@id'],
dep_term,
value['depleted_in_term_id'][i])
raise AuditFailure('mismatched depleted_in_term', detail, level='ERROR')
@audit_checker('biosample', frame='object')
def audit_biosample_transfection_type(value, system):
'''
A biosample with constructs or rnais should have a
transfection_type
'''
if value['status'] == 'deleted':
return
if (value['rnais']) and ('transfection_type' not in value):
detail = 'Biosample {} with a value for RNAi requires transfection_type'.format(value['@id'])
raise AuditFailure('missing transfection_type', detail, level='ERROR')
if (value['constructs']) and ('transfection_type' not in value):
detail = 'Biosample {} with a value for construct requires transfection_type'.format(value['@id'])
raise AuditFailure('missing transfection_type', detail, level='ERROR')
|
from ldm.modules.losses.contperceptual import LPIPSWithDiscriminator
|
#!/usr/bin/env python
"""This script identify all the unique splice junctions in one or more transcript
annotations, and exports these as a `BED`_ file with one splice junction per line
Optionally, this script can also export junctions as a `Tophat`_ ``.juncs`` file.
If a splice junction appears multiple times (e.g. used by more than one transcript),
only the first occurrence of the junction will be reported. Scores, if present,
are exported unaltered in `BED`_ output.
Examples:
.. code-block:: shell
# identify splice junctions from a transcript annotation supplied in GTF2
# creates output file 'annotation.bed'
$ findjuncs my_annotation --annotation_format GTF2 \\
--annotation_files transcripts.gtf
# merge unique annotations from annotation.bed and newly_discovered.bed,
# export only unique junctions to 'merged_unique.bed'
$ findjuncs merged_unique --annotation_format BED \\
--annotation_files annotation.bed newly_discovered.bed
See also
--------
:mod:`plastid.bin.slidejuncs`
Script that makes richer comparisons between discovered and annotated
junctions, using genomic sequence and :py:mod:`plastid.bin.crossmap`
results to classify junctions
"""
"""
From the Tophat specification for `.juncs` files:
Junctions are specified one per line, in a tab-delimited format. Records
look like::
<chrom> <left> <right> <+/->
left and right are zero-based coordinates, and specify the last character of
the left sequenced to be spliced to the first character of the right sequence,
inclusive. That is, the last and the first positions of the flanking exons.
Users can convert junctions.bed (one of the TopHat outputs) to this format
using bed_to_juncs < junctions.bed > new_list.juncs where bed_to_juncs can
be found under the same folder as tophat
See http://ccb.jhu.edu/software/tophat/index.shtml for more information.
"""
import sys
import argparse
import inspect
import warnings
from plastid.genomics.roitools import SegmentChain
from plastid.util.scriptlib.argparsers import AnnotationParser, BaseParser
from plastid.util.io.filters import NameDateWriter
from plastid.util.io.openers import argsopener, get_short_name
from plastid.util.scriptlib.help_formatters import format_module_docstring
warnings.simplefilter("once")
printer = NameDateWriter(get_short_name(inspect.stack()[-1][1]))
_ANNOTATION_INPUT_CHOICES = ["BED", "BigBed", "GTF2", "GFF3", "PSL"]
_ANNOTATION_DISABLED = ["add_three", "annotation_file"]
def main(argv=sys.argv[1:]):
"""Command-line program
Parameters
----------
argv : list, optional
A list of command-line arguments, which will be processed
as if the script were called from the command line if
:py:func:`main` is called directly.
Default: sys.argv[1:] (actually command-line arguments)
"""
ap = AnnotationParser(input_choices=_ANNOTATION_INPUT_CHOICES)
annotation_file_parser = ap.get_parser()
bp = BaseParser()
base_parser = bp.get_parser()
# yapf: disable
parser = argparse.ArgumentParser(
description = format_module_docstring(__doc__),
formatter_class = argparse.RawDescriptionHelpFormatter,
parents =[base_parser, annotation_file_parser]
)
parser.add_argument(
"--export_tophat",
default =False,
action ="store_true",
help ="Export tophat `.juncs` file in addition to BED output"
)
parser.add_argument("outbase", type=str, help="Basename for output files")
# yapf: enable
args = parser.parse_args(argv)
bp.get_base_ops_from_args(args)
transcripts = ap.get_transcripts_from_args(args, printer=printer, return_type=SegmentChain)
with argsopener("%s.bed" % args.outbase, args, "w") as bed_out:
if args.export_tophat == True:
tophat_out = open("%s.juncs" % args.outbase, "w")
printer.write("params: " + " ".join(argv))
printer.write("Detecting & comparing junctions...")
ex_pairs = {}
c = 0
u = 0
for chain in transcripts:
if len(chain) > 1: # if multi-exon
chrom = chain.chrom
strand = chain.strand
try:
ep = ex_pairs[(chrom, strand)]
except KeyError:
ex_pairs[(chrom, strand)] = []
ep = ex_pairs[(chrom, strand)]
for i in range(0, len(chain) - 1):
seg1 = chain[i]
seg2 = chain[i + 1]
if c % 1000 == 0 and c > 0:
printer.write("Processed %s junctions. Found %s unique..." % (c, u))
c += 1
key = (seg1.end, seg2.start)
if key not in ep:
ep.append(key)
u += 1
new_chain = SegmentChain(seg1, seg2)
bed_out.write(new_chain.as_bed())
if args.export_tophat == True:
my_junc = (chrom, seg1.end - 1, seg2.start, strand)
tophat_out.write("%s\t%s\t%s\t%s\n" % my_junc)
del new_chain
del seg1
del seg2
del chain
printer.write("Processed %s total junctions. Found %s unique." % (c, u))
bed_out.close()
if args.export_tophat == True:
tophat_out.close()
printer.write("Done.")
if __name__ == "__main__":
main()
|
# Also seen on pramp as - Basic Regex Parser
'''
Given an input string (s) and a pattern (p), implement regular expression matching with support for '.' and '*' where:
'.' Matches any single character.
'*' Matches zero or more of the preceding element.
The matching should cover the entire input string (not partial).
'''
# Ignoring the Kleene star, our algorithm can be implemented recursivey like so...
def isMatch(text, pattern):
# base case where pattern is empty str
if pattern == "":
if text == "":
# text matches pattern
return True
else:
return False
# compare first char of text/pattern match and recurse on the rest
firstMatch = False if text == "" else pattern[0] in {'.', text[0]} # first char of pattern either wildcard or matches text
# recurse on substrs
# RECALL: slicing outside the bounds of a sequence (at least for built-ins) doesn't cause an error
return firstMatch and isMatch(text[1:], pattern[1:])
print('Wildcard only func test:', isMatch('abc', 'a.c'))
print('Wildcard only func test:', isMatch('abc', 'a..c'))
'''
p = .*b
|
t = ab
|
True and
'''
# Now include Kleene Star into recursive solution for Regex Parsing
# Ignoring the Kleene star, our algorithm can be implemented recursivey like so...
def isMatch2(text, pattern):
# base case where pattern is empty str
# print(text,pattern)
if pattern == "":
if text == "":
# text matches pattern
return True
else:
# print('not match', text, pattern)
return False
# compare first char of text/pattern match and recurse on the rest
firstMatch = bool(text) and pattern[0] in {'.', text[0]} # first char of pattern either wildcard or matches text
# Recurse on substrs
# NEW - Need to consider if the next char is a Kleene Star
if len(pattern) > 1 and pattern[1] == '*':
# can branch into 2 recursive checks
# either take first match and constrain text
# or ignore pattern[0]
return (firstMatch and isMatch2(text[1:], pattern)) or isMatch2(text, pattern[2:])
else:
# RECALL: slicing outside the bounds of a sequence (at least for built-ins) doesn't cause an error
return firstMatch and isMatch2(text[1:], pattern[1:])
print('Regex only func test:', isMatch2('abc', 'a*c'))
print('Regex only func test:', isMatch2('abc', 'a.*c'))
print('Regex only func test:', isMatch2('abdfsfsdfc', 'a.*c'))
print('Regex only func test:', isMatch2('abdfsfsdfc', 'a.*e'))
'''
a.*c
|
abc
|
'''
'''
[DYNAMIC PROGRAMMING APPROACH]
Finally, we can observe that the problem has an optimal substructure
Optimal substructure being that optimal sol'n to problem can be obtained by using optimal sol'n to its subproblems
Natural to cache intermediate results - easiest to memoize rather than tabulate
dp(i,j) - asks the question whether text[i:] and pattern[j:] match?
We can describe our answer in terms of answers to questions involving smaller things
Recursive + Memoize: Top-down DP!!!
Time: O(TP)
Work for every call dp(i,j) is done once and is O(1) work
Space: O(TP) boolean entries in our stack + recrusive stack frames
'''
def isMatch3(text, pattern):
memo = {} # store tuple (i,j) as the key
def dp(i,j):
# check if already computed
if (i,j) not in memo:
# if reach end of pattern
# print('i,j: ',i, j)
if j >= len(pattern):
ans = i == len(text)
else:
firstMatch = i < len(text) and pattern[j] in {text[i], '.'}
if j < len(pattern) - 1 and pattern[j+1] == '*':
ans = dp(i,j+2) or (firstMatch and dp(i+1,j))
else:
ans = firstMatch and dp(i+1,j+1)
memo[i,j] = ans
return memo[i,j]
return dp(0,0)
print('DP func test:', isMatch3('abc', 'a*c'))
print('DP func test:', isMatch3('abdfsfsdfc', 'a.*c'))
print('DP func test:', isMatch3('abdfsfsdfc', 'a.*e'))
print('DP func test:', isMatch3('abdfsfsdfc', 'a.*'))
|
from StockInfo import StockInfo
import logging
logging_enabled = True
class Stock:
"""An object representing a single stock"""
instances_started = 0
def __init__(self, name, purchase_date, shares=0, price=0):
self.name = name
self.purchase_date = purchase_date
self.shares = shares
self.price = price
# Initiate, but don't set
self.stockinfo = None
self.total_cost = 0
self.ticker = ''
self.current_price = 0
self.isin = ''
self.update_price = False
if logging_enabled:
self.logger = logging.getLogger(f'Stockolio.{__name__}')
self.logger.debug(f"Stock object {name} created {purchase_date}, {shares}, {price}")
Stock.instances_started += 1
def add_transaction(self, date, time, shares, price, tx_cost=0):
self.shares += shares
self.total_cost += (shares * price) - tx_cost # tx_cost is given as a negative value, so subtract it, to add
def get_total_cost(self):
return self.total_cost
def get_shares(self):
return self.shares
def get_total_cost_per_share(self):
return self.total_cost / self.shares
def set_ticker(self, ticker):
self.ticker = ticker.upper()
self.stockinfo = StockInfo(self.ticker)
def get_price(self):
if self.ticker:
if self.current_price and not self.update_price:
return self.current_price
else:
self.current_price = self.stockinfo.getPrice()
return self.current_price
else:
return 0.0
def set_price(self, current_price):
self.current_price = current_price
def set_isin(self, isin):
self.isin = isin
def set_update_price(self, update_price):
if logging_enabled:
self.logger.debug(f"set_update_price {update_price} for {self.ticker}")
self.update_price = update_price
def json(self):
dict_stock = {
"name": {self.name},
"ticker": {self.ticker},
"purchase date": {self.purchase_date},
"shares": {self.shares},
"average purchase price": {self.price},
"total cost": {self.total_cost},
"current price": {self.current_price},
"ISIN": {self.isin}
}
return dict_stock
def __repr__(self):
return f'Stock({self.name}, {self.purchase_date}, {self.shares}, {self.price})'
def __str__(self):
return f'Stock({self.name} ({self.ticker}), {self.shares}, {self.price})'
|
"""Generic Collection class
"""
from typing import Any, Dict
from .collection import Collection
from .collection_utils import register_collection
class Generic(Collection):
"""
This is a QCA GenericCollection class. This class behaves similarly to a dictionary, but
can be serialized and saved to a QCFractal server.
Attributes
----------
client : client.FractalClient
A FractalClient connected to a server
"""
class DataModel(Collection.DataModel):
"""
Internal Data structure base model typed by PyDantic.
This structure validates input, allows server-side validation and data security,
and will create the information to pass back and forth between server and client.
"""
data: Dict[str, Any] = {}
class Config:
extra = "forbid"
def _pre_save_prep(self, client):
pass
def __setitem__(self, key, item):
self.data.data[key] = item
def __getitem__(self, key):
return self.data.data[key]
def get_data(self, copy: bool = True):
"""Returns a copy of the underlying data object.
Parameters
----------
copy : bool, optional
Whether to copy the object or not
Returns
-------
DataModel
The underlying DataModel
"""
if copy:
return self.data.copy(deep=True)
else:
return self.data
register_collection(Generic)
|
"""Schema and Properties"""
from expiringdict import ExpiringDict
from renogy_rover import config
from renogy_rover.driver import client
class Property:
"""Simple Address Property"""
def __init__(self, addr, length=1):
self.addr = addr
self.length = length
def get_value(self):
"""Get Value from Driver"""
return client.read(self.addr, self.length)
def set_value(self, x):
"""Write Value to Driver"""
raise NotImplementedError("TODO: Write RAW Value, No Processing")
class Numeric(Property):
"""Read Only Numeric Property with Multiplier"""
def __init__(self, addr, length=1, multiplier=1, unit=None):
super().__init__(addr, length=length)
self.multiplier = multiplier
self.unit = unit if unit is not None else ''
def get_value(self):
data = super().get_value()[0]
# Make sure floats only go to 2 digits
return round(data * self.multiplier, 2)
def set_value(self, x):
raise NotImplementedError("Set not supported")
class SchemaBase(type):
"""Metaclass for Renogy Schemas"""
def __new__(mcs, name, bases, dct):
cls = super().__new__(mcs, name, bases, dct)
cls._properties = {}
for key, value in dct.items():
if issubclass(value.__class__, Property):
cls._properties[key] = value
setattr(cls, key, property(
mcs._proxy_cached_getter(cls, key),
mcs._proxy_cached_setter(cls, key),
))
return cls
@classmethod
def _proxy_cached_getter(mcs, owner, key):
"""Get cached property value"""
def get_value(self):
try:
return self._cache[key]
except KeyError:
val = self._properties[key].get_value()
self._cache[key] = val
return val
return get_value
@classmethod
def _proxy_cached_setter(mcs, owner, key):
"""Set property value and clear cache"""
def set_value(self, x):
self._properties[key].set_value(x)
try:
del self._cache[key]
except KeyError:
pass
return set_value
class Schema(metaclass=SchemaBase):
"""Base Schema with Cache"""
def __init__(self):
self._cache = ExpiringDict(
max_len=100,
max_age_seconds=config.cache_ttl,
)
def dump(self):
"""Dump all properties to a dictionary"""
return {x: getattr(self, x) for x in self._properties}
|
"""Unit tests for the reference module in the output package."""
import argparse
import unittest
from next_action import todotxt
from next_action.output import reference
class ReferenceTest(unittest.TestCase):
"""Unit tests for the reference method."""
def setUp(self):
"""Set up the namespace with default arguments for all unit tests."""
self.namespace = argparse.Namespace()
self.namespace.line_number = False
self.namespace.reference = "multiple"
self.filename = "todo.txt"
self.line_number = 42
self.namespace.file = [self.filename]
self.task = todotxt.Task("Todo", self.filename, self.line_number)
def test_line_number(self):
"""Test that the line number is added if line_number is true."""
self.namespace.line_number = True
self.assertEqual(f"Todo [{self.line_number}]", reference(self.task, self.namespace))
def test_line_number_and_filename(self):
"""Test that the line number and file name are added if line_number is true and reference is always."""
self.namespace.line_number = True
self.namespace.reference = "always"
self.assertEqual(f"Todo [{self.filename}:{self.line_number}]", reference(self.task, self.namespace))
def test_always(self):
"""Test that the source filename is added."""
self.namespace.reference = "always"
self.assertEqual(f"Todo [{self.filename}]", reference(self.task, self.namespace))
def test_never(self):
"""Test that the source filename is not added."""
self.namespace.reference = "never"
self.assertEqual("Todo", reference(self.task, self.namespace))
def test_multiple(self):
"""Test that the source filename is added."""
self.namespace.file.append("project.txt")
self.assertEqual(f"Todo [{self.filename}]", reference(self.task, self.namespace))
|
from __future__ import division
import numpy as np
import tensorflow as tf
from SIDLoader import SIDLoader
from ModelBuilder import ModelBuilder
from Experiment import Experiment
import time,datetime,os,glob
path_prefix = '.'
checkpoint_dir = path_prefix+'/chk'
dataset_dir = path_prefix+'/dataset'
black_level = 512
seed = 1337
tensorboard_dir = path_prefix+'/tensorboard/'
#Set initial seed
np.random.seed(seed)
#Load flat matrix
dataset = SIDLoader(dataset_dir, patch_fn=None,keep_raw=False,keep_gt=True, set_id='test')
#Set up experiments
expList = []
expList.append(Experiment(name='Sony',model_fn={'fn':ModelBuilder.build_loadable_cchen},device="/device:GPU:0",tensorboard_dir=tensorboard_dir,checkpoint_dir='../checkpoint',dataset=dataset))
#expList.append(Experiment(name='cchen_sony_noflip',model_fn={'fn':ModelBuilder.build_cchen_sony_exp},device="/device:GPU:0",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='unet_s_sony_noflip',model_fn={'fn':ModelBuilder.build_unet_s_sony_exp},device="/device:GPU:1",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='deep_isp_noflip',model_fn={'fn':ModelBuilder.build_deep_isp_exp},device="/device:GPU:2",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='cchen_resize_sony_noflip',model_fn={'fn':ModelBuilder.build_cchen_sony_exp_resize},device="/device:GPU:3",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='unet_s_resize_sony_noflip',model_fn={'fn':ModelBuilder.build_unet_s_sony_exp_resize},device="/device:GPU:4",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='cchen_sony_flip',model_fn={'fn':ModelBuilder.build_cchen_sony_exp},device="/device:GPU:0",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='unet_s_sony_flip',model_fn={'fn':ModelBuilder.build_unet_s_sony_exp},device="/device:GPU:1",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='deep_isp_flip',model_fn={'fn':ModelBuilder.build_deep_isp_exp},device="/device:GPU:2",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='cchen_resize_sony_flip',model_fn={'fn':ModelBuilder.build_cchen_sony_exp_resize},device="/device:GPU:3",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='unet_s_resize_sony_flip',model_fn={'fn':ModelBuilder.build_unet_s_sony_exp_resize},device="/device:GPU:4",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='unet_self_amp2',model_fn={'fn':ModelBuilder.build_unet_self_scale},device="/device:GPU:0",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='unet_amp_infer2',model_fn={'fn':ModelBuilder.build_unet_amp_infer},device="/device:GPU:1",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
epoch = 0
dataset.start()
try:
#test loop
for exp in expList:
exp.create_test_writer()
while(epoch < 1):
#Get batch from batchloader
(x,y,r) = dataset.get_batch()
#start running training step on each GPU
for exp in expList:
exp.test_action(x,y,r)
#Wait for all to finish
for exp in expList:
exp.finish_test_action()
epoch = dataset.readEpoch
if(dataset.readC == 0): #It is the end of the epoch
for exp in expList:
exp.end_of_epoch_test()
except KeyboardInterrupt:
print('Keyboard interrupt accepted')
finally:
print("Stopping dataset")
dataset.stop()
for exp in expList:
exp.model['sess'].close()
|
import json
import boto3
from model import get_latest_model_path
from datetime import datetime
import re
import os
from prepare_data import convert_a2i_to_augmented_manifest
from urllib.parse import urlparse
BUCKET = os.environ['BUCKET']
PIPELINE = os.environ['PIPELINE']
MODEL_GROUP = os.environ['MODEL_GROUP']
body = "body"
detail = "detail"
humanLoopName = "humanLoopName"
s3_path = "s3-image-path"
loop_status = "HumanLoopStatus"
string_value = "stringValue"
a2i = boto3.client('sagemaker-a2i-runtime')
sm_client = boto3.client('sagemaker')
s3_client = boto3.client('s3')
s3 = boto3.resource('s3')
completed_human_loops = []
def lambda_handler(event, context):
# TODO implement
print(json.dumps(event))
records = event['Records']
for record in records:
if body in record:
bodyjson = json.loads(record[body])
if detail in bodyjson:
resp = a2i.describe_human_loop(HumanLoopName=bodyjson[detail][humanLoopName])
if resp[loop_status] == "Completed":
completed_human_loops.append(resp)
if len(completed_human_loops)>0:
output=[]
training_file = 'meta_train.csv'
path = "/tmp/{}".format(training_file)
now = datetime.now()
timestamp = datetime.timestamp(now)
prefix = "a2i-result/{}".format(str(timestamp))
print(prefix)
with open(path, 'w') as outfile:
outfile.write("Filename,Label,Remark\n")
for resp in completed_human_loops:
splitted_string = re.split('s3://' + BUCKET + '/', resp['HumanLoopOutput']['OutputS3Uri'])
output_bucket_key = splitted_string[1]
response = s3_client.get_object(Bucket=BUCKET, Key=output_bucket_key)
content = response["Body"].read()
json_output = json.loads(content)
print(json_output)
# convert using the function
augmented_manifest, s3_path = convert_a2i_to_augmented_manifest(json_output)
o = urlparse(s3_path, allow_fragments=False)
obucket = o.netloc
okey = o.path
of = okey.split('/')[-1]
copy_source = {
'Bucket':obucket,
'Key': okey[1:]
}
tbucket = s3.Bucket(BUCKET)
print(copy_source, "{}/train/{}".format(prefix, of))
tbucket.copy(copy_source, "{}/train/{}".format(prefix, of))
outfile.write(augmented_manifest)
outfile.write('\n')
s3_client.upload_file(path, Bucket=BUCKET, Key="{}/{}".format(prefix, training_file))
s3_path = "s3://{}/{}".format(BUCKET, prefix)
last_model_path = get_latest_model_path(MODEL_GROUP)
parameters = [
{
'Name':'TrainData',
'Value': s3_path
},
{
'Name':'ValidationData',
'Value': s3_path
},
{
'Name':'ModelData',
'Value': last_model_path
},
]
response = sm_client.start_pipeline_execution( PipelineName = PIPELINE, PipelineParameters=parameters)
return {
'statusCode': 200,
# 'body': json.dumps(completed_human_loops)
'body': 'finished'
}
|
import gevent
from gevent import monkey
from gevent.coros import Semaphore
# patches stdlib (including socket and ssl modules) to cooperate with other greenlets
monkey.patch_all()
import urllib2
import memcache
import _mysql
import redis
import unittest
n = 0
sem = Semaphore()
class SyncTest(unittest.TestCase):
def testMySQL(self):
(_, v) = do_mysql()
self.assertEqual('1', v)
def testMemcache(self):
(_, v) = do_memcache("irr", "http://irrlab.com/")
self.assertEqual("http://irrlab.com/", v)
def testURL(self):
(_, v) = do_url("http://irrlab.com/")
self.assertTrue(v > 8192)
def testRedis(self):
(_, v) = do_redis("irr", "http://irrlab.com/")
self.assertEqual("http://irrlab.com/", v)
def testZ(self):
global n
print("Semaphore=%d" % n)
self.assertEqual(n, 12)
def register():
global sem, n
try:
sem.acquire()
n = n + 1
return n
finally:
sem.release()
def do_mysql():
db = _mysql.connect("localhost", "root", "mysql", "mysql")
db.query("SELECT 1")
res = db.store_result()
row = res.fetch_row()[0]
print("MySQL: %s" % row)
db.close()
return (register(), row[0])
def do_memcache(key, url):
mc = memcache.Client(['127.0.0.1:11211'], debug=0)
mc.set(key, url)
res = mc.get(key)
print("Memcache: %s=%s" % (key, res))
mc.disconnect_all()
return (register(), res)
def do_url(url):
data = urllib2.urlopen(url).read()
size = len(data)
print('URL: %s: %s bytes' % (url, size))
return (register(), size)
def do_redis(key, url):
r = redis.StrictRedis(host='localhost', port=6379, db=0)
r.set(key, url)
res = r.get(key)
print("Redis: %s=%s" % (key, res))
r = None
return (register(), res)
def do_jobs(val):
(key, url) = val
gevent.joinall([gevent.spawn(do_mysql),
gevent.spawn(do_memcache, key, url),
gevent.spawn(do_url, url),
gevent.spawn(do_redis, key, url)])
if __name__ == "__main__":
urls = [('google', 'http://www.google.com'),
('python', 'http://www.python.org')]
gevent.joinall([gevent.spawn(do_jobs, url) for url in urls])
print("n=%d" % n)
unittest.main()
|
from typing import ByteString
import logging
import onnxruntime as rt
from onnxGrpcServer_pb2 import Features
logging.basicConfig(level=logging.INFO)
def build_float_input(f_features):
return [
f_features.engines,
f_features.passenger_capacity,
f_features.crew,
f_features.company_rating,
f_features.review_scores_rating
]
def build_categorical_input(c_features):
return [
c_features.d_check_complete,
c_features.moon_clearance_complete,
c_features.iata_approved
]
class CTCVClassifier(object):
session = None
float_input_name = None
categorical_input_name = None
label_name = None
def __init__(
self,
model_file_path: str = None
):
logging.info("Initializing model...")
self.model_file_path = model_file_path
self.load_model()
def _read_model_from_s3(self) -> ByteString:
return self.s3_client.get_object(
Bucket=self.s3_bucket,
Key=self.s3_bucket)["Body"]
def _build_session(self) -> rt.InferenceSession:
logging.info("Fetching model file from Local...")
logging.info(f"model_file_path: {self.model_file_path}")
return rt.InferenceSession(self.model_file_path)
def load_model(self) -> None:
logging.info("Loading model...")
logging.info("Building session...")
self.session = self._build_session()
logging.info("Session build Done.")
self.float_input_name = self.session.get_inputs()[0].name
self.categorical_input_name = self.session.get_inputs()[1].name
self.label_name = self.session.get_outputs()[1].name
logging.info("Model load Done.")
def predict(self, data: Features) -> float:
inputs = {
self.float_input_name: [build_float_input(data.float_features)],
self.categorical_input_name: [build_categorical_input(data.categorical_features)],
}
return self.session.run([self.label_name], inputs)[0][0][1]
|
from unittest import TestCase
from osbot_utils.utils.Dev import Dev
from osbot_utils.utils.Files import Files
from osbot_graphsv.api.Indexes_Build import Indexes_Build
from osbot_graphsv.demo.Demo_Data import Demo_Data
class test_Indexes_Build(TestCase):
def setUp(self):
self.file_system = Demo_Data().file_system()
#self.issues = Issues(self.file_system)
self.indexes_build = Indexes_Build(self.file_system)
self.result = None
def tearDown(self):
if self.result is not None:
Dev.pprint(self.result)
def test__init__(self):
assert Files.exists(self.indexes_build.file_system.folder_root)
def test_create_all(self):
self.indexes_build.create_all()
def test_create__by_key(self):
self.result = self.indexes_build.create__by_key()
def test_create__by_link_type(self):
self.indexes_build.create__by_link_type()
self.result = self.indexes_build.get__by_link_type()
# def test_create__by_fields_and_values(self):
# self.name = 'AAA BBB'
# self.issue_type = Issue_Type(self.file_system, self.name).setup()
#
# self.issue_type.issue_add({'Summary': 'abc'})
# self.issue_type.issue_add({'Summary': Misc.random_string_and_numbers()})
# self.issue_type.issue_add({'Summary': Misc.random_string_and_numbers()})
# self.issue_type.issue_add({'Summary': Misc.random_string_and_numbers()})
# self.issue_type.issue_add({'Summary': 'abc'})
#
#
# self.indexes_build.create__by_key()
#
# assert self.indexes_build.create__by_fields_and_values().get('Summary').get('abc') == ['AAA_BBB-1', 'AAA_BBB-5']
# assert self.indexes_build.get__by_fields_and_values() .get('Summary').get('abc') == ['AAA_BBB-1', 'AAA_BBB-5']
#
# self.issue_type.delete()
# def test_create__by_values(self):
# self.result = self.indexes_build.create__by_values()
|
from Arknights.ArknightsShell import ArknightsShell
from Arknights.base import ArknightsHelper
|
#!/usr/bin/python3
import sys
from collections import Counter
# One program at bottom supports entire tower.
# List of programs:
# Program name (weight) [-> programs above them]
# NB: programs above not required to be listed before!
# Which program is at the bottom?
def buildStructure(programs):
structure = {}
for program in programs:
line = program.split()
structure[line[0]] = {
'weight': int(line[1].strip('[()]')),
'children': [x.strip(', \n') for x in line[3:]],
'root': ''
}
for k in list(structure.keys()):
for child in structure[k]['children']:
structure[child]['root'] = k
return structure
def findRoot(structure):
root = [x for x in structure if structure[x]['root'] == ''][0]
return root
def weight(structure, node):
if node in structure:
if 'fullweight' not in structure[node]:
structure[node]['fullweight'] = structure[node]['weight'] + sum([weight(structure, x) for x in structure[node]['children']])
return structure[node]['fullweight']
else:
return 0
def findImbalance(structure, node):
fullweight = weight(structure, node)
freq = Counter([weight(structure, child) for child in structure[node]['children']]).most_common()
if len(freq) == 1:
# children are balanced
# therefore issue is this node
# it is not the same weight as its peers
parent = structure[node]['root']
peerfreq = Counter([weight(structure, peer) for peer in structure[parent]['children']]).most_common()
# return the weight it should be to balance
return structure[node]['weight'] + (peerfreq[0][0]-fullweight)
else:
# children are not balanced
# issue is further down
target = freq[1][0]
for child in structure[node]['children']:
if weight(structure, child) == target:
return findImbalance(structure, child)
# Unit tests for findRoot.
tt = {'x': (['pbga (66)\n', 'xhth (57)\n', 'ebii (61)\n', 'havc (66)\n', 'ktlj (57)\n', 'fwft (72) -> ktlj, cntj, xhth\n', 'qoyq (66)\n', 'padx (45) -> pbga, havc, qoyq\n', 'tknk (41) -> ugml, padx, fwft\n', 'jptl (61)\n', 'ugml (68) -> gyxo, ebii, jptl\n', 'gyxo (61)\n', 'cntj (57)\n']
, 60)}
for k, v in tt.items():
structure = buildStructure(v[0])
root = findRoot(structure)
result = findImbalance(structure, root)
if result != v[1]:
print("FAIL: input ", v[0], ": expected ", v[1], ", got ", result, sep="")
# The input is not checked for sanity, just existence.
programs = sys.stdin.readlines()
if len(programs) == 0:
print("programs missing!")
sys.exit(1)
structure = buildStructure(programs)
root = findRoot(structure)
print(findImbalance(structure, root))
|
# ============================================================================
#
# Copyright (c) 2007-2010 Integral Technology Solutions Pty Ltd,
# All Rights Reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS NOTICE BE
# LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# FOR FURTHER INFORMATION PLEASE SEE THE INTEGRAL TECHNOLOGY SOLUTIONS
# END USER LICENSE AGREEMENT (ELUA).
#
# ============================================================================
from java.io import FileWriter,BufferedWriter
from jarray import array, zeros
serverModule = '1.2.1'
log.debug('Loading module [server.py] version [' + serverModule + ']')
#=======================================================================================
# Configure servers
#=======================================================================================
def configureServers(resourcesProperties, domainProperties):
servers=domainProperties.getProperty('wls.servers')
if not servers is None and len(servers) > 0:
serverList = servers.split(',')
for server in serverList:
arguments = domainProperties.getProperty('wls.server.' + server + '.serverstart.arguments')
name = domainProperties.getProperty('wls.server.' + server + '.name')
replaceName = domainProperties.getProperty('wls.server.' + server + '.replace.name')
autoMigrationEnabled = domainProperties.getProperty('wls.server.' + server + '.auto.migration.enabled')
migrationMachine = domainProperties.getProperty('wls.server.' + server + '.migration.machine')
defaultStoreDir = domainProperties.getProperty('wls.server.' + server + '.default.store.dir')
rootDir = domainProperties.getProperty('wls.server.' + server + '.root.dir')
if not replaceName is None:
name = replaceName
if not arguments is None:
cd ('/Servers/' + str(name) + '/ServerStart/' + str(name))
log.info('Setting server start arguments for ' + str(name))
set('Arguments',arguments)
if not rootDir is None:
cd ('/Servers/' + str(name) + '/ServerStart/' + str(name))
log.info('Setting root directory for ' + str(name))
set('RootDirectory',rootDir)
if not defaultStoreDir is None:
file = File(defaultStoreDir)
if not file.exists():
if file.mkdirs():
log.info('Default store directory [' + str(defaultStoreDir) + '] has been created successfully.')
log.info('Setting default store directory [' + str(defaultStoreDir) + '] for server [' + str(name) + '].')
cd('/Servers/' + str(name) + '/DefaultFileStore/' + str(name))
cmo.setDirectory(defaultStoreDir)
#=======================================================================================
# Configure cluster
#=======================================================================================
def configureClusters(resourcesProperties, domainProperties):
clusters=domainProperties.getProperty('wls.clusters')
if not clusters is None and len(clusters) > 0:
clusterList = clusters.split(',')
for cluster in clusterList:
clusterName=domainProperties.getProperty('wls.cluster.' + str(cluster) + '.name')
frontendHost=domainProperties.getProperty('wls.cluster.' + str(cluster) + '.frontend.host')
frontendHttpPort=domainProperties.getProperty('wls.cluster.' + str(cluster) + '.frontend.http.port')
frontendHttpsPort=domainProperties.getProperty('wls.cluster.' + str(cluster) + '.frontend.https.port')
cluster = lookup(str(clusterName), 'Cluster')
try:
if not frontendHost is None:
log.info('Setting front end host [' + str(frontendHost) + '].' )
cluster.setFrontendHost(str(frontendHost))
if not frontendHttpPort is None:
log.info('Setting front end HTTP port [' + str(frontendHttpPort) + '].' )
cluster.setFrontendHTTPPort(int(frontendHttpPort))
if not frontendHttpsPort is None:
log.info('Setting front end HTTPS port [' + str(frontendHttpsPort) + '].' )
cluster.setFrontendHTTPSPort(int(frontendHttpsPort))
except Exception, error:
log.error(str(error))
#=======================================================================================
# Create managed servers if defined.
#=======================================================================================
def __createServers(online, configProperties):
servers=configProperties.getProperty('wls.servers')
if not servers is None and len(servers) > 0:
serverList = servers.split(',')
for server in serverList:
__createServer(server, online, configProperties)
#=======================================================================================
# Create a server in domain.
#=======================================================================================
def __createServer(server, online, configProperties):
#=======================================================================================
# Read managed server properties.
#=======================================================================================
domainName=configProperties.getProperty('wls.domain.name')
serverName=configProperties.getProperty('wls.server.' + str(server) + '.name')
replaceName=configProperties.getProperty('wls.server.' + str(server) + '.replace.name')
listenAddress=configProperties.getProperty('wls.server.' + str(server) + '.listener.address')
listenPort=configProperties.getProperty('wls.server.' + str(server) + '.listener.port')
enableSSL=configProperties.getProperty('wls.server.' + str(server) + '.listener.enableSSL')
sslPort=configProperties.getProperty('wls.server.' + str(server) + '.listener.sslPort')
enableTunneling=configProperties.getProperty('wls.server.' + str(server) + '.enableTunneling')
machine=configProperties.getProperty('wls.server.' + str(server) + '.machine')
enableHostnameVerification=configProperties.getProperty('wls.server.' + str(server) + '.enableHostnameVerification')
autoRestartOnFailure=configProperties.getProperty('wls.server.' + str(server) + '.autoRestart')
wlsVersion=configProperties.getProperty('wls.version')
coherenceUnicastAddress=configProperties.getProperty('wls.server.'+str(server)+'.coherence.UnicastListenAddress')
#=======================================================================================
# Configure the Managed Server and SSL port.
#=======================================================================================
serverInstance = None
serverInstanceExist = 0
try:
cd('/')
log.info('Lookup server [' + str(serverName) + ']')
if online:
serverInstance = lookup(str(serverName), 'Server')
else:
try:
cd('Servers/' + str(serverName))
serverInstance = cmo
except Exception, error:
log.info('Unable to find server [' + str(serverName) + ']. The server will be created.')
except WLSTException, error:
log.info('Unable to find server [' + str(serverName) + ']. The server will be created.')
#=======================================================================================
# Assign admin server to machine.
#=======================================================================================
machineReplaceName = None
if not machine is None and len(machine)>0 and configProperties.getProperty('wls.admin.name') == serverName:
machineName=configProperties.getProperty('wls.domain.machine.' + str(machine) + '.name')
machineReplaceName=configProperties.getProperty('wls.domain.machine.' + str(machine) + '.replace.name')
if not machineReplaceName is None:
machineName=machineReplaceName
try:
cd('/')
if online:
machineInstance = lookup(machineName, 'Machine')
log.info('Setting machine [' + str(machineName) + '] to admin server [' + str(serverName) + '].')
serverInstance.setMachine(machineInstance)
else:
log.info('Assigning machine [' + str(machineName) + '] to admin server [' + str(serverName) + '].')
assign('Server', serverName, 'Machine', machineName)
except Exception, error:
raise ScriptError, 'Unable to assign server [' + str(serverName) + '] to machine [' + str(machineName) + ']: ' + str(error)
if serverInstance is None:
cd('/')
serverInstance = create(str(serverName), 'Server')
targetCluster=configProperties.getProperty('wls.server.' + str(server) + '.cluster')
targetClusterName=configProperties.getProperty('wls.cluster.' + str(targetCluster) + '.name')
__targetServerToCluster(server, serverName, online, targetCluster, targetClusterName)
else:
serverInstanceExist = 1
log.info('Server [' + str(serverName) + '] already exists, checking REPLACE flag.')
#=======================================================================================
# Enable Auto restart functionality
#=======================================================================================
cd('/Servers/' + str(serverName))
log.info('Setting Auto Restart [' + str(autoRestartOnFailure) + '].')
if not autoRestartOnFailure is None and autoRestartOnFailure.upper()=='FALSE':
set('AutoRestart', 'False')
else:
set('AutoRestart', 'True')
if not serverInstanceExist or isReplaceRequired(configProperties.getProperty('REPLACE')) or (not replaceName is None and not serverName is None):
if serverInstanceExist and isReplaceRequired(configProperties.getProperty('REPLACE')):
log.info('REPLACE flag is specified, start replacing Server [' + str(serverName) + '] properties.')
if not replaceName is None:
log.info('Changing ' + serverInstance.getName() + ' to ' + replaceName + '.')
serverInstance.setName(replaceName)
serverName = replaceName
if configProperties.getProperty('wls.admin.name') != serverName:
cd('/')
targetCluster=configProperties.getProperty('wls.server.' + str(server) + '.cluster')
targetClusterName=configProperties.getProperty('wls.cluster.' + str(targetCluster) + '.name')
__targetServerToCluster(server, serverName, online, targetCluster, targetClusterName)
log.info('Setting listen port [' + str(listenPort) + '].')
if listenPort is '' or listenPort is None:
log.error('ListenerPort is being attempted to set to None for server \' '+str(server)+' \'. Please make sure that the proerty \'wls.server.' + str(server) + '.listener.port\' is existing and valid.' )
sys.exit()
serverInstance.setListenPort(int(listenPort))
log.info('Setting listen address [' + str(listenAddress) + '].')
serverInstance.setListenAddress(listenAddress)
log.info('Setting SSL enable [' + str(enableSSL) + '].')
if not enableSSL is None and enableSSL.upper()=='TRUE':
if online:
ssl = serverInstance.getSSL()
else:
cd('/Servers/' + str(serverName))
ssl = create(str(serverName),'SSL')
ssl.setEnabled(1)
log.info('Setting SSL port [' + str(sslPort) + '].')
ssl.setListenPort(int(sslPort))
else:
if not enableSSL is None and enableSSL.upper()=='FALSE':
if online:
ssl = serverInstance.getSSL()
else:
cd('/Servers/' + str(serverName))
ssl = create(str(serverName),'SSL')
ssl.setEnabled(0)
#=======================================================================================
# Configure tunneling.
#=======================================================================================
cd('/Servers/' + str(serverName))
log.info('Setting Tunneling Enabled [' + str(enableTunneling) + '].')
if not enableTunneling is None and enableTunneling.upper()=='TRUE':
set('TunnelingEnabled', 'True')
else:
set('TunnelingEnabled', 'False')
#=======================================================================================
# Configure logging properties.
#=======================================================================================
try:
customLog=configProperties.getProperty('wls.server.' + str(server) + '.log.custom')
logFileName=configProperties.getProperty('wls.server.' + str(server) + '.log.filename')
limitNumOfFile=configProperties.getProperty('wls.server.' + str(server) + '.log.limitNumOfFile')
fileToRetain=configProperties.getProperty('wls.server.' + str(server) + '.log.fileToRetain')
rotateOnStartup=configProperties.getProperty('wls.server.' + str(server) + '.log.rotateLogOnStartup')
logFileSeverity=configProperties.getProperty('wls.server.' + str(server) + '.log.logFileSeverity')
broadcastSeverity=configProperties.getProperty('wls.server.' + str(server) + '.log.broadcastSeverity')
memoryBufferSeverity=configProperties.getProperty('wls.server.' + str(server) + '.log.memoryBufferSeverity')
rotationType=configProperties.getProperty('wls.server.' + str(server) + '.log.rotationType')
fileMinSize=configProperties.getProperty('wls.server.' + str(server) + '.log.fileMinSize')
rotationTime=configProperties.getProperty('wls.server.' + str(server) + '.log.rotationTime')
fileTimeSpan=configProperties.getProperty('wls.server.' + str(server) + '.log.fileTimeSpan')
rotationDir=configProperties.getProperty('wls.server.' + str(server) + '.log.rotationDir')
if not customLog is None and customLog.upper()=='TRUE':
cd('/')
if online:
logObj = serverInstance.getLog()
else:
cd('/Servers/' + str(serverName))
logObj = create(serverName, 'Log')
log.info('Setting log filename [' + str(logFileName) + '].')
logObj.setFileName(logFileName)
log.info('Setting limit number of file [' + str(limitNumOfFile) + '].')
if not limitNumOfFile is None and limitNumOfFile.upper()=='TRUE':
logObj.setNumberOfFilesLimited(true)
log.info('Setting number of file [' + str(fileToRetain) + '].')
logObj.setFileCount(int(fileToRetain))
log.info('Setting rotation on startup [' + str(rotateOnStartup) + '].')
if not rotateOnStartup is None and rotateOnStartup.upper()=='TRUE':
logObj.setRotateLogOnStartup(1)
else:
logObj.setRotateLogOnStartup(0)
log.info('Setting log file severity [' + str(logFileSeverity) + '].')
logObj.setLogFileSeverity(logFileSeverity)
log.info('Setting domain log broadcast severity [' + str(broadcastSeverity) + '].')
logObj.setDomainLogBroadcastSeverity(broadcastSeverity)
log.info('Setting memory buffer severity [' + str(memoryBufferSeverity) + '].')
logObj.setMemoryBufferSeverity(memoryBufferSeverity)
log.info('Setting log rotation type [' + str(rotationType) + '].')
logObj.setRotationType(rotationType)
if rotationType.upper()=='BYTIME':
logObj.setRotationTime(rotationTime)
logObj.setFileTimeSpan(int(fileTimeSpan))
else:
if rotationType.upper()=='BYSIZE':
logObj.setFileMinSize(int(fileMinSize))
log.info('Setting log rotation directory [' + str(rotationDir) + '].')
logObj.setLogFileRotationDir(rotationDir)
except Exception, error:
raise ScriptError, 'Unable to configure logging properties on managed server [' + str(serverName) + '] : ' + str(error)
try:
if online:
webserver = serverInstance.getWebServer()
httplog = webserver.getWebServerLog()
else:
webserver = create(str(serverName), 'WebServer')
cd('WebServer/' + str(serverName))
httplog = create(str(serverName), 'WebServerLog')
httpLogEnable=configProperties.getProperty('wls.server.' + str(server) + '.httplog.enable')
log.info('Setting http log enable [' + str(httpLogEnable) + '].')
if not httpLogEnable is None and httpLogEnable.upper()=='TRUE':
httplog.setLoggingEnabled(1)
httpLogFileName=configProperties.getProperty('wls.server.' + str(server) + '.httplog.filename')
httpLimitNumOfFile=configProperties.getProperty('wls.server.' + str(server) + '.httplog.limitNumOfFile')
httpFileToRetain=configProperties.getProperty('wls.server.' + str(server) + '.httplog.fileToRetain')
httpRotateOnStartup=configProperties.getProperty('wls.server.' + str(server) + '.httplog.rotateLogOnStartup')
httpRotationType=configProperties.getProperty('wls.server.' + str(server) + '.httplog.rotationType')
httpFileMinSize=configProperties.getProperty('wls.server.' + str(server) + '.httplog.fileMinSize')
httpRotationTime=configProperties.getProperty('wls.server.' + str(server) + '.httplog.rotationTime')
httpFileTimeSpan=configProperties.getProperty('wls.server.' + str(server) + '.httplog.fileTimeSpan')
httpRotationDir=configProperties.getProperty('wls.server.' + str(server) + '.httplog.rotationDir')
httpFormat=configProperties.getProperty('wls.server.' + str(server) + '.httplog.format')
log.info('Setting http log filename [' + str(httpLogFileName) + '].')
httplog.setFileName(httpLogFileName)
log.info('Setting http limit number of file [' + str(httpLimitNumOfFile) + '].')
if not httpLimitNumOfFile is None and httpLimitNumOfFile.upper()=='TRUE':
httplog.setNumberOfFilesLimited(true)
log.info('Setting http number of file [' + str(httpFileToRetain) + '].')
httplog.setFileCount(int(httpFileToRetain))
else:
httplog.setNumberOfFilesLimited(false)
log.info('Setting http log rotate on startup [' + str(httpRotateOnStartup) + '].')
if not httpRotateOnStartup is None and httpRotateOnStartup.upper()=='TRUE':
httplog.setRotateLogOnStartup(1)
else:
httplog.setRotateLogOnStartup(0)
log.info('Setting http log format [' + str(httpFormat) + '].')
httplog.setELFFields(httpFormat)
log.info('Setting http log rotation type [' + str(httpRotationType) + '].')
httplog.setRotationType(httpRotationType)
if httpRotationType.upper()=='BYTIME':
httplog.setRotationTime(httpRotationTime)
log.info('Setting http log time span [' + str(httpFileTimeSpan) + '].')
httplog.setFileTimeSpan(int(httpFileTimeSpan))
else:
if httpRotationType.upper()=='BYSIZE':
log.info('Setting http log min size [' + str(httpFileMinSize) + '].')
httplog.setFileMinSize(int(httpFileMinSize))
log.info('Setting http log rotation directory [' + str(httpRotationDir) + '].')
httplog.setLogFileRotationDir(httpRotationDir)
else:
if not httpLogEnable is None and httpLogEnable.upper()=='FALSE':
httplog.setLoggingEnabled(0)
except Exception, error:
raise ScriptError, 'Unable to configure http logging properties on managed server [' + str(serverName) + '] : ' + str(error)
domainPath=configProperties.getProperty('wls.domain.dir')
domainName=configProperties.getProperty('wls.domain.name')
domainUsername=configProperties.getProperty('wls.admin.username')
domainPassword=configProperties.getProperty('wls.admin.password')
rootDir=configProperties.getProperty('wls.server.' + str(server) + '.root.dir')
securityDir = File(domainPath + File.separator + domainName + File.separator + 'servers' + File.separator + str(serverName) + File.separator + 'security')
if not securityDir.exists() and rootDir is None:
log.info('Creating directory ' + str(securityDir))
securityDir.mkdirs()
bootFile = File(securityDir.getAbsolutePath() + File.separator + 'boot.properties')
# TODO: Use flag no.managed.server.boot.properties=true/false instead of checking domain called osb_domain (H@CK)
if not bootFile.exists() and rootDir is None and not domainName == 'osb_domain':
log.info('Creating boot.properties for server [' + str(serverName) + '].')
bootFile.createNewFile()
fileWriter = FileWriter(bootFile)
bufWriter = BufferedWriter(fileWriter)
bufWriter.write('username=' + str(domainUsername))
bufWriter.newLine()
bufWriter.write('password=' + str(domainPassword))
bufWriter.close()
else:
log.info('Ignoring boot.properties creation for [' + str(serverName) + '].')
#=======================================================================================
# Configure additional managed server properties
#=======================================================================================
__configureAdditionalManagedServerProperties(serverName, enableHostnameVerification)
__configureCoherenceManagedServerProperties(serverName,coherenceUnicastAddress,wlsVersion)
servergroups=configProperties.getProperty('wls.server.'+str(server)+'.servergroups')
if servergroups is not None:
servergroupslist=servergroups.split(',')
if servergroupslist is not None and len(servergroupslist)>0:
__configureServerGroups(serverName,servergroupslist,wlsVersion)
#=======================================================================================
# Assign managed server to machine.
#=======================================================================================
if not machine is None and len(machine)>0:
machineName=configProperties.getProperty('wls.domain.machine.' + str(machine) + '.name')
machineReplaceName=configProperties.getProperty('wls.domain.machine.' + str(machine) + '.replace.name')
if not machineReplaceName is None:
machineName=machineReplaceName
try:
cd('/')
if online:
machineInstance = lookup(machineName, 'Machine')
log.info('Setting machine [' + str(machineName) + '] to server [' + str(serverName) + '].')
serverInstance.setMachine(machineInstance)
else:
log.info('Assigning machine [' + str(machineName) + '] to server [' + str(serverName) + '].')
assign('Server', serverName, 'Machine', machineName)
except Exception, error:
raise ScriptError, 'Unable to assign server [' + str(serverName) + '] to machine [' + str(machineName) + ']: ' + str(error)
#=======================================================================================
# Configure channel.
#=======================================================================================
srvName=configProperties.getProperty('wls.server.' + str(server) + '.name')
channelName=configProperties.getProperty('wls.server.' + str(server) + '.channel.name')
channelProtocol=configProperties.getProperty('wls.server.' + str(server) + '.channel.protocol')
channelListenerAddr=configProperties.getProperty('wls.server.' + str(server) + '.channel.listener.address')
channelListenerPort=configProperties.getProperty('wls.server.' + str(server) + '.channel.listener.port')
channelPublicListenerAddr=configProperties.getProperty('wls.server.' + str(server) + '.channel.listener.publicAddress')
channelPublicListenerPort=configProperties.getProperty('wls.server.' + str(server) + '.channel.listener.publicPort')
httpEnable=configProperties.getProperty('wls.server.' + str(server) + '.channel.httpEnable')
if not channelName is None and len(channelName)>0:
__configureChannel(srvName, channelName, online, channelProtocol, channelListenerAddr, channelListenerPort, channelPublicListenerAddr, channelPublicListenerPort, httpEnable)
def __configureCoherenceManagedServerProperties(serverName,coherenceUnicastAddress,wlsVersion ):
if wlsVersion == '12':
log.debug("Weblogic 12c....Setting up Unicast Listen address for Coherence server")
try:
cd('/')
cd('Server/'+str(serverName))
create('member_config', 'CoherenceMemberConfig')
cd('CoherenceMemberConfig/member_config')
if coherenceUnicastAddress is not None and str(coherenceUnicastAddress)=="localhost":
coherencelistenAddress="127.0.0.1"
else:
coherencelistenAddress=str(coherenceUnicastAddress)
log.debug("Setting Coherence Unicast listen aaddress to"+coherencelistenAddress)
set('UnicastListenAddress', coherencelistenAddress)
except Exception, error:
log.info("Coherence Property set up failed, Managed server may fail to start up with out this configuration")
def __configureAdditionalManagedServerProperties(serverName, enableHostnameVerification):
#=======================================================================================
# Configure additional managed server properties
#=======================================================================================
if not enableHostnameVerification is None:
try:
cd('/Servers/' + str(serverName))
ssl = create('SSL','SSL')
log.info('Setting Hostname Verification [' + str(enableHostnameVerification) + '].')
if enableHostnameVerification.upper()=='TRUE':
ssl.setHostnameVerificationIgnored(0)
else:
ssl.setHostnameVerificationIgnored(1)
except Exception, error:
cd('/Servers/' + str(serverName))
ssl = create('SSL','SSL')
log.info('Setting Hostname Verification [' + str(enableHostnameVerification) + '].')
if enableHostnameVerification.upper()=='TRUE':
ssl.setHostnameVerificationIgnored(0)
else:
ssl.setHostnameVerificationIgnored(1)
def __configureServerGroups(serverName,servergroupslist,wlsVersion):
if wlsversion == '12':
if not servergroupslist is None:
log.debug(servergroupslist)
try:
log.debug("setting server groups for "+serverName)
setServerGroups(serverName, servergroupslist)
except Exception, error:
log.info("setting server groups failed for "+serverName)
def __targetServerToCluster(server, serverName, online, targetCluster, targetClusterName):
if not targetCluster is None and len(targetCluster)>0:
log.info('Assigning Managed Server [' + str(serverName) + '] to Cluster [' + str(targetClusterName) + ']')
try:
cd('/')
if online:
clusterInstance = lookup(targetClusterName, 'Cluster')
serverInstance.setCluster(clusterInstance)
else:
cd('/Servers/' + str(serverName))
assign('Server', str(serverName), 'Cluster', str(targetClusterName))
log.info('Managed Server [' + str(serverName) + '] has been assigned to Cluster [' + str(targetClusterName) + '] successfully.')
except Exception, error:
raise ScriptError, 'Unable to assign server [' + str(serverName) + '] to cluster [' + str(targetClusterName) + ']: ' + str(error)
else:
log.info('################################################################################')
log.info('# WARNING: Managed Sever [' + str(serverName) + '] is not targeted to any cluster.')
log.info('################################################################################')
#=======================================================================================
# Create machines
#=======================================================================================
def __createMachines(online, configProperties):
domainMachines=configProperties.getProperty('wls.domain.machines')
if not domainMachines is None and len(domainMachines)>0:
machineList = domainMachines.split(',')
for machine in machineList:
__createMachine(machine, online, configProperties)
#=======================================================================================
# Create a machine
#=======================================================================================
def __createMachine(machine, online, configProperties):
machineName=configProperties.getProperty('wls.domain.machine.' + str(machine) + '.name')
machineReplaceName=configProperties.getProperty('wls.domain.machine.' + str(machine) + '.replace.name')
command_name=configProperties.getProperty('create_domain')
#log.info(command_name)
machineType=configProperties.getProperty('wls.domain.machine.' + str(machine) + '.type')
machineBindGID=configProperties.getProperty('wls.domain.machine.' + str(machine) + '.postBindGID')
machineBindGIDEnable=configProperties.getProperty('wls.domain.machine.' + str(machine) + '.postBindGIDEnabled')
machineBindUID=configProperties.getProperty('wls.domain.machine.' + str(machine) + '.postBindUID')
machineBindUIDEnable=configProperties.getProperty('wls.domain.machine.' + str(machine) + '.postBindUIDEnabled')
nodeType=configProperties.getProperty('wls.domain.machine.' + str(machine) + '.nodemanager.type')
nodeAddress=configProperties.getProperty('wls.domain.machine.' + str(machine) + '.nodemanager.address')
nodePort=configProperties.getProperty('wls.domain.machine.' + str(machine) + '.nodemanager.port')
nodeHome=configProperties.getProperty('wls.domain.machine.' + str(machine) + '.nodemanager.nodeManagerHome')
nodeShellCmd=configProperties.getProperty('wls.domain.machine.' + str(machine) + '.nodemanager.shellCommand')
nodeDebugEnable=configProperties.getProperty('wls.domain.machine.' + str(machine) + '.nodemanager.debugEnabled')
#log.debug('inside __createMachine')
try:
if not machineName is None:
machine = None
machineExist = 0
try:
#log.debug('in try')
cd('/')
if online:
log.debug('in online')
cd('Machines')
machine = lookup(str(machineName), 'Machine')
else:
try:
log.debug('in offline')
#Disabling mbean checks in offline mode for Oracle FMW 12.2 support
#cd('Machines')
#cd(str(machineName))
#machine = cmo
except Exception, error:
log.info('Unable to lookup machine [' + str(machineName) + ']. The machine will be created in offline mode' )
pass
except WLSTException, error:
log.info('Unable to lookup machine [' + str(machineName) + ']. The machine will be created.')
if machine is None:
if online:
cd('/')
if machineType.upper()=='UNIX':
machine = cmo.createUnixMachine(str(machineName))
else:
machine = create(str(machineName),'Machine')
else:
cd('/')
if machineType.upper()=='UNIX':
machine = create(str(machineName), 'UnixMachine')
else:
machine = create(str(machineName),'Machine')
log.debug('machine created ------------'+str(machineName))
else:
if not machineReplaceName is None and machineType.upper()=='UNIX':
cd('/')
delete(str(machineName),'Machine')
machine = create(str(machineReplaceName), 'UnixMachine')
oldMachineName = machineName
machineName = machineReplaceName
log.info('Removed ' + oldMachineName + ' to be replaced with ' + machineName)
else:
machineExist = 1
log.info('Machine [' + str(machineName) + '] already exists, checking REPLACE flag.')
if not machineExist or isReplaceRequired(configProperties.getProperty('REPLACE')) or not machineReplaceName is None:
if machineExist and isReplaceRequired(configProperties.getProperty('REPLACE')):
log.info('REPLACE flag is specified, start replacing machine [' + str(machineName) + '] properties.' )
if not machineReplaceName is None and not machineType.upper()=='UNIX':
log.info('Changing ' + machineName + ' to ' + machineReplaceName + '.')
oldMachineName = machineName
machineName = machineReplaceName
machine.setName(machineName)
if machineType.upper()=='UNIX':
log.info('Setting Post Bind GID Enabled [' + str(machineBindGIDEnable) + '].')
if not machineBindGIDEnable is None and machineBindGIDEnable.upper()=='TRUE':
machine.setPostBindGIDEnabled(1)
log.info('Setting Post Bind GID [' + str(machineBindGID) + '].')
machine.setPostBindGID(machineBindGID)
else:
machine.setPostBindGIDEnabled(0)
log.info('Setting Post Bind UID Enabled [' + str(machineBindUIDEnable) + '].')
if not machineBindUIDEnable is None and machineBindUIDEnable.upper()=='TRUE':
machine.setPostBindUIDEnabled(1)
log.info('Setting Post Bind UID [' + str(machineBindUID) + '].')
machine.setPostBindUID(machineBindUID)
else:
machine.setPostBindGIDEnabled(0)
if not nodeType is None and len(nodeType)>0:
if online:
nodeManager = machine.getNodeManager()
else:
try:
cd('/Machines/' + str(machineName))
except Exception, error:
try:
cd('AnyMachine/' + str(machineName))
except:
pass
try:
log.debug("machine name " + str(machineName))
cd('/Machines/' + str(machineName))
nodeManager = create('NodeManager','NodeManager')
#nodeManager = cmo
except Exception, error:
log.info('Node manager does not exist, creating new one.')
try:
cd('/Machines/' + str(machineName))
nodeManager = create('NodeManager','NodeManager')
except Exception, error:
raise ScriptError, 'Unable to create node manager :' + str(error)
log.info('Setting node manager type [' + str(nodeType) + '].')
nodeManager.setNMType(nodeType)
log.info('Setting node manager address [' + str(nodeAddress) + '].')
nodeManager.setListenAddress(nodeAddress)
log.info('Setting node manager port [' + str(nodePort) + '].')
nodeManager.setListenPort(int(nodePort))
if not nodeHome is None and len(nodeHome)>0:
log.info('Setting node manager home [' + str(nodeHome) + '].')
nodeManager.setNodeManagerHome(nodeHome)
if not nodeShellCmd is None and len(nodeShellCmd)>0:
log.info('Setting node manager shell command [' + str(nodeShellCmd) + '].')
nodeManager.setShellCommand(nodeShellCmd)
log.info('Setting node manager debug [' + str(nodeDebugEnable) + '].')
if not nodeDebugEnable is None and nodeDebugEnable.upper()=='TRUE':
nodeManager.setDebugEnabled(1)
else:
if not nodeDebugEnable is None and nodeDebugEnable.upper()=='FALSE':
nodeManager.setDebugEnabled(0)
else:
log.info('Debug Enable is not specified, skipping.')
else:
log.info('Could not create machine [' + str(machineName) + '].')
except Exception, error:
raise ScriptError, 'Unable to create machine [' + str(machineName) + ']: ' + str(error)
#=======================================================================================
# Configure channel
#=======================================================================================
def __configureChannel(serverName, channelName, online, channelProtocol, channelHost, channelPort, channelPublicAddress, channelPublicPort, httpEnable):
try:
cd('/Servers/' + str(serverName))
except WLSTException, error:
raise ScriptError, 'Unable to find server [' + str(serverName) + '], please check ' + getDomainFileName() + ' and try again.'
channel = None
channelExist = 0
try:
if online:
cd('/Servers/' + str(serverName))
channel = lookup(channelName, 'NetworkAccessPoint')
else:
try:
cd('/Servers/' + str(serverName) + '/NetworkAccessPoint/' + str(channelName))
except Exception, error:
log.info('Unable to find channel [' + str(channelName) + '], trying to create new one.')
except WLSTException, error:
log.info('Unable to find channel [' + str(channelName) + '], trying to create new one.')
if channel is None:
cd('/Servers/' + str(serverName))
log.info('Creating channel [' + str(channelName) + '].')
channel = create(channelName, 'NetworkAccessPoint')
else:
channelExist = 1
log.info('Channel [' + str(channelName) + '] already exists, checking REPLACE flag.')
if not channelExist or isReplaceRequired(configProperties.getProperty('REPLACE')):
if channelExist and isReplaceRequired(configProperties.getProperty('REPLACE')):
log.info('REPLACE flag is specified, start replacing Channel [' + str(channelName) + '] properties.' )
log.info('Setting listen address [' + str(channelHost) + '].')
channel.setListenAddress(channelHost)
log.info('Setting listen port [' + str(channelPort) + '].')
channel.setListenPort(int(channelPort))
log.info('Setting protocol [' + str(channelProtocol) + '].')
channel.setProtocol(channelProtocol)
log.info('Setting http enable [' + str(httpEnable) + '].')
if httpEnable.upper()=='TRUE':
channel.setHttpEnabledForThisProtocol(1)
log.info('Setting public address [' + str(channelPublicAddress) + '].')
channel.setPublicAddress(channelPublicAddress)
log.info('Setting public port [' + str(channelPublicPort) + '].')
channel.setPublicPort(int(channelPublicPort))
#=======================================================================================
# Create clusters.
#=======================================================================================
def __createClusters(online, configProperties):
clusters=configProperties.getProperty('wls.clusters')
if not clusters is None and len(clusters) > 0:
clusterList = clusters.split(',')
for cluster in clusterList:
__createCluster(cluster, online, configProperties)
#=======================================================================================
# Create a cluster in domain.
#=======================================================================================
def __createCluster(cluster, online, configProperties):
clusterName=configProperties.getProperty('wls.cluster.' + str(cluster) + '.name')
clusterAddress=configProperties.getProperty('wls.cluster.' + str(cluster) + '.address')
multicastAddress=configProperties.getProperty('wls.cluster.' + str(cluster) + '.multicast.address')
multicastPort=configProperties.getProperty('wls.cluster.' + str(cluster) + '.multicast.port')
algorithm=configProperties.getProperty('wls.cluster.' + str(cluster) + '.defaultLoadAlgorithm')
cd('/')
clusterExist = 0
try:
cluster = None
if online:
cluster = lookup(str(clusterName), 'Cluster')
else:
try:
#Disabling mbean checks in offline mode for Oracle FMW 12.2 support
#cd('/Clusters')
#cd(str(clusterName))
log.debug('Unable to lookup cluster [' + str(clusterName) + ']. The cluster will be created.')
except Exception, error:
log.info('Unable to lookup cluster [' + str(clusterName) + ']. The cluster will be created.')
except WLSTException, error:
log.info('Unable to lookup cluster [' + str(clusterName) + ']. The cluster will be created.')
if cluster is None:
cd('/')
log.info('Creating cluster [' + str(clusterName) + '].')
cluster = create(str(clusterName), 'Cluster')
log.info('Cluster [' + str(clusterName) + '] has been created.')
else:
clusterExist = 1
log.info('Cluster [' + str(clusterName) + '] already exists.')
if not clusterExist or isReplaceRequired(configProperties.getProperty('REPLACE')):
if clusterExist and isReplaceRequired(configProperties.getProperty('REPLACE')):
log.info('REPLACE flag is specified, start replacing cluster [' + str(clusterName) + '] properties.')
if not clusterAddress is None:
log.info('Setting cluster address [' + str(clusterAddress) + '].' )
cluster.setClusterAddress(clusterAddress)
if not multicastAddress is None:
log.info('Setting multicast address [' + str(multicastAddress) + '].')
cluster.setMulticastAddress(multicastAddress)
if not multicastPort is None:
log.info('Setting multicast port [' + str(multicastPort) + '].')
cluster.setMulticastPort(int(multicastPort))
if not algorithm is None:
log.info('Setting default load algorithm [' + str(algorithm) + '].')
cluster.setDefaultLoadAlgorithm(algorithm)
#=======================================================================================
# Assign servers to cluster.
#=======================================================================================
def __assignServersToCluster(configProperties):
#=======================================================================================
# Assign managed server to cluster.
#=======================================================================================
servers=configProperties.getProperty('wls.servers')
if not servers is None and len(servers) > 0:
serverList = servers.split(',')
for server in serverList:
serverName=configProperties.getProperty('wls.server.' + str(server) + '.name')
targetCluster=configProperties.getProperty('wls.server.' + str(server) + '.cluster')
targetClusterName=configProperties.getProperty('wls.cluster.' + str(targetCluster) + '.name')
cd('/')
serverInstance = lookup(serverName, 'Server')
clusterInstance = serverInstance.getCluster()
if not clusterInstance is None:
log.info('Server [' + str(serverName) + '] already exists in cluster [' + str(targetClusterName) + '], skipping.')
else:
clusterInstance = lookup(targetClusterName, 'Cluster')
try:
serverInstance.setCluster(clusterInstance)
except Exception, error:
raise ScriptError, 'Unable to assign server [' + str(serverName) + '] to cluster [' + str(targetClusterName) + ']: ' + str(error)
|
from measurement.base import MeasureBase
__all__ = [
'Time',
]
class Time(MeasureBase):
"""Time measurements (generally for multidimensional measures).
Please do not use this for handling durations of time unrelated to
measure classes -- python's built-in datetime module has much better
functionality for handling intervals of time than this class provides.
"""
STANDARD_UNIT = 's'
UNITS = {
's': 1.0,
'min': 60.0,
'hr': 3600.0,
'day': 86400.0
}
ALIAS = {
'second': 's',
'sec': 's', # For backward compatibility
'minute': 'min',
'hour': 'hr',
'day': 'day'
}
SI_UNITS = ['s']
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
try:
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import QWidget
except ImportError:
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from data.gsd import GSD_LUT, get_gsd
from os import path as osp
from libs.utils import distance
import sys
DEFAULT_LINE_COLOR = QColor(0, 255, 0, 128)
DEFAULT_FILL_COLOR = QColor(255, 0, 0, 128)
DEFAULT_SELECT_LINE_COLOR = QColor(255, 255, 255)
DEFAULT_SELECT_FILL_COLOR = QColor(0, 128, 255, 155)
DEFAULT_VERTEX_FILL_COLOR = QColor(0, 255, 0, 255)
DEFAULT_HVERTEX_FILL_COLOR = QColor(255, 0, 0)
class Shape(QWidget):
P_SQUARE, P_ROUND = range(2)
MOVE_VERTEX, NEAR_VERTEX = range(2)
# The following class variables influence the drawing
# of _all_ shape objects.
line_color = DEFAULT_LINE_COLOR
fill_color = DEFAULT_FILL_COLOR
select_line_color = DEFAULT_SELECT_LINE_COLOR
select_fill_color = DEFAULT_SELECT_FILL_COLOR
vertex_fill_color = DEFAULT_VERTEX_FILL_COLOR
h_vertex_fill_color = DEFAULT_HVERTEX_FILL_COLOR
point_type = P_ROUND
point_size = 8
scale = 1.0
label_font_size = 8
def __init__(self, label=None, img_name=None, line_color=None, difficult=False, parent=None):
super(Shape, self).__init__(parent=parent)
self.label = label
self.points = []
self.fill = False
self.selected = False
self.difficult = difficult
self.img_name = img_name
self._highlight_index = None
self._highlight_mode = self.NEAR_VERTEX
self._highlight_settings = {
self.NEAR_VERTEX: (4, self.P_ROUND),
self.MOVE_VERTEX: (1.5, self.P_SQUARE),
}
self._closed = False
self.area = None
if line_color is not None:
# Override the class line_color attribute
# with an object attribute. Currently this
# is used for drawing the pending line with a different color.
self.line_color = line_color
def close(self):
self._closed = True
def reach_max_points(self):
if len(self.points) >= 4:
return True
return False
def add_point(self, point):
if not self.reach_max_points():
self.points.append(point)
def pop_point(self):
if self.points:
return self.points.pop()
return None
def is_closed(self):
return self._closed
def set_open(self):
self._closed = False
def paint(self, painter):
if self.points:
color = self.select_line_color if self.selected else self.line_color
pen = QPen(color)
# Try using integer sizes for smoother drawing(?)
pen.setWidth(0.5)
painter.setPen(pen)
line_path = QPainterPath()
vertex_path = QPainterPath()
line_path.moveTo(self.points[0])
# Uncommenting the following line will draw 2 paths
# for the 1st vertex, and make it non-filled, which
# may be desirable.
# self.drawVertex(vertex_path, 0)
for i, p in enumerate(self.points):
line_path.lineTo(p)
self.draw_vertex(vertex_path, i)
if self.is_closed():
line_path.lineTo(self.points[0])
painter.drawPath(line_path)
painter.drawPath(vertex_path)
painter.fillPath(vertex_path, self.vertex_fill_color)
# Draw text at the top-left
if len(self.points) != 4:
pass
else:
width = abs(round(self.points[2].x()-self.points[0].x(), 1))
height = abs(round(self.points[2].y()-self.points[0].y(), 1))
font = QFont()
font_size = width/15 if width < height else height/15
if font_size < 3:
font_size = 3
font.setPointSize(font_size)
font.setBold(False)
painter.setFont(font)
font_x = self.points[0].x() if self.points[0].x() < self.points[2].x() else self.points[2].x()
font_y = self.points[0].y() - 1 if self.points[0].y() < self.points[2].y() else self.points[2].y()
try:
gsd = get_gsd(self.img_name)
width = width * gsd['width']
height = height * gsd['height']
self.area = width * height
if self.parent().is_show_box_size:
painter.drawText(font_x, font_y, '{:.1f} x {:.1f} m'.format(width, height))
except Exception as e:
# If GSD is not given, set 99999 to not delete this shape
self.area = 99999
if self.parent().is_show_box_size:
painter.drawText(font_x, font_y, '{:.1f} x {:.1f} pix.'.format(width, height))
if self.fill:
color = self.select_fill_color if self.selected else self.fill_color
painter.fillPath(line_path, color)
def draw_vertex(self, path, i):
d = self.point_size / self.scale
shape = self.point_type
point = self.points[i]
if i == self._highlight_index:
size, shape = self._highlight_settings[self._highlight_mode]
d *= size
if self._highlight_index is not None:
self.vertex_fill_color = self.h_vertex_fill_color
else:
self.vertex_fill_color = Shape.vertex_fill_color
if shape == self.P_SQUARE:
path.addRect(point.x() - d / 2, point.y() - d / 2, d, d)
elif shape == self.P_ROUND:
path.addEllipse(point, d / 2.0, d / 2.0)
else:
assert False, "unsupported vertex shape"
def nearest_vertex(self, point, epsilon):
for i, p in enumerate(self.points):
if distance(p - point) <= epsilon:
return i
return None
def contains_point(self, point):
return self.make_path().contains(point)
def make_path(self):
path = QPainterPath(self.points[0])
for p in self.points[1:]:
path.lineTo(p)
return path
def bounding_rect(self):
return self.make_path().boundingRect()
def move_by(self, offset):
self.points = [p + offset for p in self.points]
def move_vertex_by(self, i, offset):
self.points[i] = self.points[i] + offset
def highlight_vertex(self, i, action):
self._highlight_index = i
self._highlight_mode = action
def highlight_clear(self):
self._highlight_index = None
def copy(self):
shape = Shape("%s" % self.label)
shape.points = [p for p in self.points]
shape.fill = self.fill
shape.selected = self.selected
shape._closed = self._closed
if self.line_color != Shape.line_color:
shape.line_color = self.line_color
if self.fill_color != Shape.fill_color:
shape.fill_color = self.fill_color
shape.difficult = self.difficult
return shape
def __len__(self):
return len(self.points)
def __getitem__(self, key):
return self.points[key]
def __setitem__(self, key, value):
self.points[key] = value
|
#!/usr/bin/env python3
"""
Usage:
dev <command> [-- <extra_args>...]
dev cd [<repository>]
dev clone <repository_or_url>
dev init <shell>
dev open <target>
dev update
dev [-hvct]
Options:
-h, --help Show this screen
-v, --version Show version
-c, --commands List all commands
-t, --tasks List all tasks
"""
import os
import sys
from docopt import docopt
from dev.config import config
from dev.console import console, error_console
from dev.exceptions import CommandNotFoundError, NonZeroReturnCodeError, TaskNotFoundError
from dev.helpers import load_local_taks, task_to_class
from dev.helpers.parent_shell import ParentShellHelper
from dev.tasks.internal import HelpCommand, HelpTask
from dev.version import __version__
from . import sys_path # noqa
args = docopt(__doc__ or '')
load_local_taks()
def main(args: dict = args) -> None:
command = args['<command>']
extra_args = args['<extra_args>']
try:
if args['--version'] is True:
console.print(f'dev {__version__} - Running in Python {sys.version}')
sys.exit(0)
if args['--tasks'] is True:
HelpTask()
sys.exit(0)
if not command or args['--commands'] is True:
HelpCommand()
sys.exit(0)
warn_when_using_bare(command)
for direction, tasks in config.resolve_tasks(command, extra_args).items():
for task in tasks:
task_to_class(task.name)(args=task.args, extra_args=extra_args, direction=direction)
except CommandNotFoundError:
HelpCommand(command)
except TaskNotFoundError as e:
HelpTask(e.task)
except NonZeroReturnCodeError as e:
console.print(
f'Failed to run [b]{command}[/]:',
f'Command [b]{e.command}[/] returned with exit code {e.code}',
style='red',
)
finally:
ParentShellHelper.send_queued_commands()
def warn_when_using_bare(command: str) -> None:
if command == 'init':
# We do not need the shell wrapper when initializing the shell environment.
return
if os.environ.get('INVOKED_VIA_SHELL') == '1':
# We are already running in a shell wrapper.
return
shell = os.path.basename(os.environ.get('SHELL', 'bash'))
error_console.print(
'Warning: You are running [b]dev-bare[/] directly. For all features to work '
'properly, you need to call [b]dev[/].\n'
'If [b]dev[/] is not availible in your shell, add following to your shell config:\n\n'
f' eval "$({sys.argv[0]} init {shell})"\n',
style='yellow',
)
if __name__ == "__main__":
main()
|
import numpy as np
import cv2
def func(path):
frame = cv2.imread(path)
frame = cv2.resize(frame,(96,96))
# downsize it to reduce processing time
#cv2.imshow("original",frame)
converted = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) # Convert from RGB to HSV
#print(frame.shape)
#tuned settings
lowerBoundary = np.array([0,40,30],dtype="uint8")
upperBoundary = np.array([43,255,254],dtype="uint8")
skinMask = cv2.inRange(converted, lowerBoundary, upperBoundary)
# apply a series of erosions and dilations to the mask using an elliptical kernel
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3,3))
skinMask = cv2.erode(skinMask, kernel, iterations = 2)
skinMask = cv2.dilate(skinMask, kernel, iterations = 2)
lowerBoundary = np.array([170,80,30],dtype="uint8")
upperBoundary = np.array([180,255,250],dtype="uint8")
skinMask2 = cv2.inRange(converted, lowerBoundary, upperBoundary)
skinMask = cv2.addWeighted(skinMask,0.5,skinMask2,0.5,0.0)
#print(skinMask.flatten())
#print(skinMask.shape)
# blur the mask to help remove noise, then apply the
# mask to the frame
skinMask = cv2.medianBlur(skinMask, 5)
skin = cv2.bitwise_and(frame, frame, mask = skinMask)
frame = cv2.addWeighted(frame,1.5,skin,-0.5,0)
skin = cv2.bitwise_and(frame, frame, mask = skinMask)
#cv2.imshow("masked",skin) # Everything apart from skin is shown to be black
h,w = skin.shape[:2]
bw_image = cv2.cvtColor(skin, cv2.COLOR_HSV2BGR) # Convert image from HSV to BGR format
bw_image = cv2.cvtColor(skin, cv2.COLOR_BGR2GRAY) # Convert image from BGR to gray format
bw_image = cv2.GaussianBlur(bw_image,(5,5),0) # Highlight the main object
threshold = 1
for i in range(h):
for j in range(w):
if bw_image[i][j] > threshold:
bw_image[i][j] = 0
else:
bw_image[i][j] = 255
#cv2.imshow("thresholded",bw_image)
#cv2.waitKey(0)
cv2.destroyAllWindows()
return bw_image
|
import torch
import torch.nn as nn
from torch.utils.model_zoo import load_url
from collections import OrderedDict
__all__ = ['MNasNet', 'mnasnet0_5', 'mnasnet0_75', 'mnasnet1_0',
'mnasnet1_3']
model_urls = {
"mnasnet0_5":
"https://download.pytorch.org/models/mnasnet0.5_top1_67.592-7c6cb539b9.pth",
"mnasnet0_75": None,
"mnasnet1_0":
"https://download.pytorch.org/models/mnasnet1.0_top1_73.512-f206786ef8.pth",
"mnasnet1_3": None}
_BN_MOMENTUM = 1. - 0.9997
class InvertedResidual(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size,
stride, expansion, bn_momentum=0.1):
super(InvertedResidual, self).__init__()
assert stride in [1, 2]
assert kernel_size in [3, 5]
channels = in_channels * expansion
self.apply_residual = (stride == 1) and \
(in_channels == out_channels)
self.layers = nn.Sequential(
# pointwise convolution
nn.Conv2d(in_channels, channels, 1, bias=False),
nn.BatchNorm2d(channels, momentum=bn_momentum),
nn.ReLU(inplace=True),
# depthwise separable convolution
nn.Conv2d(channels, channels, kernel_size, stride,
padding=kernel_size // 2, groups=channels,
bias=False),
nn.BatchNorm2d(channels, momentum=bn_momentum),
nn.ReLU(inplace=True),
# pointwise convolution
nn.Conv2d(channels, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels, momentum=bn_momentum))
def forward(self, x):
if self.apply_residual:
return x + self.layers(x)
else:
return self.layers(x)
def _make_divisible(val, divisor, min_ratio=0.9):
new_val = max(divisor, int(val + divisor / 2)) // divisor * divisor
# make sure that round down does not go down by more than 10%
if new_val < min_ratio * val:
new_val += divisor
return new_val
def _scale_depths(depths, alpha):
return [_make_divisible(depth * alpha, 8) for depth in depths]
def _stack(in_channels, out_channels, kernel_size, stride,
expansion, repeats, bn_momentum):
layers = [InvertedResidual(
in_channels, out_channels, kernel_size, stride,
expansion, bn_momentum=bn_momentum)]
for _ in range(1, repeats):
layers += [InvertedResidual(
out_channels, out_channels, kernel_size, 1,
expansion, bn_momentum=bn_momentum)]
return nn.Sequential(*layers)
class MNasNet(nn.Module):
def __init__(self, alpha, num_classes=None, dropout=0.2):
super(MNasNet, self).__init__()
depths = _scale_depths([24, 40, 80, 96, 192, 320], alpha)
self.layers = nn.Sequential(
# first layer
nn.Conv2d(3, 32, 3, 2, padding=1, bias=False),
nn.BatchNorm2d(32, momentum=_BN_MOMENTUM),
nn.ReLU(inplace=True),
# depthwise separable convolution
nn.Conv2d(32, 32, 3, 1, padding=1, groups=32, bias=False),
nn.BatchNorm2d(32, momentum=_BN_MOMENTUM),
nn.ReLU(inplace=True),
nn.Conv2d(32, 16, 1, 1, padding=0, bias=False),
nn.BatchNorm2d(16, momentum=_BN_MOMENTUM),
# MNasNet blocks
_stack(16, depths[0], 3, 2, 3, 3, _BN_MOMENTUM),
_stack(depths[0], depths[1], 5, 2, 3, 3, _BN_MOMENTUM),
_stack(depths[1], depths[2], 5, 2, 6, 3, _BN_MOMENTUM),
_stack(depths[2], depths[3], 3, 1, 6, 2, _BN_MOMENTUM),
_stack(depths[3], depths[4], 5, 2, 6, 4, _BN_MOMENTUM),
_stack(depths[4], depths[5], 3, 1, 6, 1, _BN_MOMENTUM),
# last layer
nn.Conv2d(depths[5], 1280, 1, 1, padding=0, bias=False),
nn.BatchNorm2d(1280, momentum=_BN_MOMENTUM))
# build classifier if num_classes is specified
if num_classes is not None:
self.classifier = nn.Sequential(
nn.AdaptiveAvgPool2d((1, 1)),
nn.Flatten(),
nn.Dropout(p=dropout, inplace=True),
nn.Linear(1280, num_classes))
else:
self.classifier = None
# initialize weights
self.init_weights()
def forward(self, x):
x = self.layers(x)
if self.classifier:
x = self.classifier(x)
return x
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0.01)
nn.init.zeros_(m.bias)
def mnasnet0_5(pretrained=False, **kwargs):
model = MNasNet(0.5, **kwargs)
if pretrained:
model.load_state_dict(
load_url(model_urls['mnasnet0_5'], progress=True),
strict=False)
return model
def mnasnet0_75(pretrained=False, **kwargs):
model = MNasNet(0.75, **kwargs)
if pretrained:
model.load_state_dict(
load_url(model_urls['mnasnet0_75'], progress=True),
strict=False)
return model
def mnasnet1_0(pretrained=False, **kwargs):
model = MNasNet(1.0, **kwargs)
if pretrained:
model.load_state_dict(
load_url(model_urls['mnasnet1_0'], progress=True),
strict=False)
return model
def mnasnet1_3(pretrained=False, **kwargs):
model = MNasNet(1.3, **kwargs)
if pretrained:
model.load_state_dict(
load_url(model_urls['mnasnet1_3'], progress=True),
strict=False)
return model
|
import json
import os
import platform
import stat
import textwrap
import unittest
import six
from mock import patch
from requests.packages.urllib3.exceptions import ConnectionError
from conans import DEFAULT_REVISION_V1
from conans.client.tools import environment_append
from conans.client.tools.files import untargz
from conans.model.manifest import FileTreeManifest
from conans.model.package_metadata import PackageMetadata
from conans.model.ref import ConanFileReference, PackageReference
from conans.paths import CONANFILE, CONANINFO, CONAN_MANIFEST, EXPORT_TGZ_NAME
from conans.test.utils.cpp_test_files import cpp_hello_conan_files, cpp_hello_source_files
from conans.test.utils.test_files import temp_folder, uncompress_packaged_files
from conans.test.utils.tools import (NO_SETTINGS_PACKAGE_ID, TestClient, TestRequester, TestServer,
GenConanfile)
from conans.test.utils.mocks import MockedUserIO, TestBufferConanOutput
from conans.util.files import load, mkdir, save
class BadConnectionUploader(TestRequester):
fail_on = 1
def __init__(self, *args, **kwargs):
super(BadConnectionUploader, self).__init__(*args, **kwargs)
self.counter_fail = 0
def put(self, *args, **kwargs):
self.counter_fail += 1
if self.counter_fail == self.fail_on:
raise ConnectionError("Can't connect because of the evil mock")
else:
return super(BadConnectionUploader, self).put(*args, **kwargs)
class TerribleConnectionUploader(BadConnectionUploader):
def put(self, *args, **kwargs):
raise ConnectionError("Can't connect because of the evil mock")
class FailPairFilesUploader(BadConnectionUploader):
def put(self, *args, **kwargs):
self.counter_fail += 1
if self.counter_fail % 2 == 1:
raise ConnectionError("Pair file, error!")
else:
return super(BadConnectionUploader, self).put(*args, **kwargs)
class FailOnReferencesUploader(BadConnectionUploader):
fail_on = ["lib1", "lib3"]
def __init__(self, *args, **kwargs):
super(BadConnectionUploader, self).__init__(*args, **kwargs)
def put(self, *args, **kwargs):
if any(ref in args[0] for ref in self.fail_on):
raise ConnectionError("Connection fails with lib2 and lib4 references!")
else:
return super(BadConnectionUploader, self).put(*args, **kwargs)
@unittest.skipIf(TestClient().cache.config.revisions_enabled,
"We cannot know the folder of the revision without knowing the hash of "
"the contents")
class UploadTest(unittest.TestCase):
def _get_client(self, requester=None):
servers = {}
# All can write (for avoid authentication until we mock user_io)
self.test_server = TestServer([("*/*@*/*", "*")], [("*/*@*/*", "*")],
users={"lasote": "mypass"})
servers["default"] = self.test_server
return TestClient(servers=servers, users={"default": [("lasote", "mypass")]},
requester_class=requester)
def setUp(self):
self.client = self._get_client()
self.ref = ConanFileReference.loads("Hello/1.2.1@frodo/stable#%s" % DEFAULT_REVISION_V1)
self.pref = PackageReference(self.ref, "myfakeid", DEFAULT_REVISION_V1)
reg_folder = self.client.cache.package_layout(self.ref).export()
self.client.run('upload %s' % str(self.ref), assert_error=True)
self.assertIn("ERROR: Recipe not found: '%s'" % str(self.ref), self.client.out)
files = cpp_hello_source_files(0)
fake_metadata = PackageMetadata()
fake_metadata.recipe.revision = DEFAULT_REVISION_V1
fake_metadata.packages[self.pref.id].revision = DEFAULT_REVISION_V1
self.client.save({"metadata.json": fake_metadata.dumps()},
path=self.client.cache.package_layout(self.ref).base_folder())
self.client.save(files, path=reg_folder)
self.client.save({CONANFILE: GenConanfile().with_name("Hello").with_version("1.2.1"),
"include/math/lib1.h": "//copy",
"my_lib/debug/libd.a": "//copy",
"my_data/readme.txt": "//copy",
"my_bin/executable": "//copy"}, path=reg_folder)
mkdir(self.client.cache.package_layout(self.ref).export_sources())
manifest = FileTreeManifest.create(reg_folder)
manifest.time = '123123123'
manifest.save(reg_folder)
self.test_server.server_store.update_last_revision(self.ref)
self.server_pack_folder = self.test_server.server_store.package(self.pref)
package_folder = self.client.cache.package_layout(self.ref).package(self.pref)
save(os.path.join(package_folder, "include", "lib1.h"), "//header")
save(os.path.join(package_folder, "lib", "my_lib", "libd.a"), "//lib")
save(os.path.join(package_folder, "res", "shares", "readme.txt"),
"//res")
save(os.path.join(package_folder, "bin", "my_bin", "executable"), "//bin")
save(os.path.join(package_folder, CONANINFO),
"""[recipe_hash]\n%s""" % manifest.summary_hash)
FileTreeManifest.create(package_folder).save(package_folder)
self.test_server.server_store.update_last_package_revision(self.pref)
os.chmod(os.path.join(package_folder, "bin", "my_bin", "executable"),
os.stat(os.path.join(package_folder, "bin", "my_bin", "executable")).st_mode |
stat.S_IRWXU)
expected_manifest = FileTreeManifest.create(package_folder)
expected_manifest.save(package_folder)
self.server_reg_folder = self.test_server.server_store.export(self.ref)
self.assertFalse(os.path.exists(self.server_reg_folder))
self.assertFalse(os.path.exists(self.server_pack_folder))
def test_try_upload_bad_recipe(self):
files = cpp_hello_conan_files("Hello0", "1.2.1")
self.client.save(files)
self.client.run("export . frodo/stable")
ref = ConanFileReference.loads("Hello0/1.2.1@frodo/stable")
os.unlink(os.path.join(self.client.cache.package_layout(ref).export(), CONAN_MANIFEST))
with six.assertRaisesRegex(self, Exception, "Command failed"):
self.client.run("upload %s" % str(ref))
self.assertIn("Cannot upload corrupted recipe", self.client.out)
def test_upload_with_pattern(self):
for num in range(5):
files = cpp_hello_conan_files("Hello%s" % num, "1.2.1")
self.client.save(files)
self.client.run("export . frodo/stable")
self.client.run("upload Hello* --confirm")
for num in range(5):
self.assertIn("Uploading Hello%s/1.2.1@frodo/stable" % num, self.client.out)
self.client.run("upload Hello0* --confirm")
self.assertIn("Uploading Hello0/1.2.1@frodo/stable",
self.client.out)
self.assertIn("Recipe is up to date, upload skipped", self.client.out)
self.assertNotIn("Hello1", self.client.out)
self.assertNotIn("Hello2", self.client.out)
self.assertNotIn("Hello3", self.client.out)
def test_upload_error(self):
"""Cause an error in the transfer and see some message"""
# Check for the default behaviour
client = self._get_client(BadConnectionUploader)
files = cpp_hello_conan_files("Hello0", "1.2.1", build=False)
client.save(files)
client.run("export . frodo/stable")
client.run("upload Hello* --confirm")
self.assertIn("Can't connect because of the evil mock", client.out)
self.assertIn("Waiting 5 seconds to retry...", client.out)
# This will fail in the first put file, so, as we need to
# upload 3 files (conanmanifest, conanfile and tgz) will do it with 2 retries
client = self._get_client(BadConnectionUploader)
files = cpp_hello_conan_files("Hello0", "1.2.1", build=False)
client.save(files)
client.run("export . frodo/stable")
client.run("upload Hello* --confirm --retry-wait=0")
self.assertIn("Can't connect because of the evil mock", client.out)
self.assertIn("Waiting 0 seconds to retry...", client.out)
# but not with 0
client = self._get_client(BadConnectionUploader)
files = cpp_hello_conan_files("Hello0", "1.2.1", build=False)
client.save(files)
client.run("export . frodo/stable")
client.run("upload Hello* --confirm --retry 0 --retry-wait=1", assert_error=True)
self.assertNotIn("Waiting 1 seconds to retry...", client.out)
self.assertIn("ERROR: Hello0/1.2.1@frodo/stable: Upload recipe to 'default' failed: "
"Execute upload again to retry upload the failed files: "
"conan_export.tgz. [Remote: default]", client.out)
# Try with broken connection even with 10 retries
client = self._get_client(TerribleConnectionUploader)
files = cpp_hello_conan_files("Hello0", "1.2.1", build=False)
client.save(files)
client.run("export . frodo/stable")
client.run("upload Hello* --confirm --retry 10 --retry-wait=0", assert_error=True)
self.assertIn("Waiting 0 seconds to retry...", client.out)
self.assertIn("ERROR: Hello0/1.2.1@frodo/stable: Upload recipe to 'default' failed: "
"Execute upload again to retry upload the failed files", client.out)
# For each file will fail the first time and will success in the second one
client = self._get_client(FailPairFilesUploader)
files = cpp_hello_conan_files("Hello0", "1.2.1", build=False)
client.save(files)
client.run("export . frodo/stable")
client.run("install Hello0/1.2.1@frodo/stable --build")
client.run("upload Hello* --confirm --retry 3 --retry-wait=0 --all")
self.assertEqual(str(client.out).count("ERROR: Pair file, error!"), 6)
def test_upload_error_with_config(self):
"""Cause an error in the transfer and see some message"""
# This will fail in the first put file, so, as we need to
# upload 3 files (conanmanifest, conanfile and tgz) will do it with 2 retries
client = self._get_client(BadConnectionUploader)
files = cpp_hello_conan_files("Hello0", "1.2.1", build=False)
client.save(files)
client.run("export . frodo/stable")
client.run('config set general.retry_wait=0')
client.run("upload Hello* --confirm")
self.assertIn("Can't connect because of the evil mock", client.out)
self.assertIn("Waiting 0 seconds to retry...", client.out)
# but not with 0
client = self._get_client(BadConnectionUploader)
files = cpp_hello_conan_files("Hello0", "1.2.1", build=False)
client.save(files)
client.run("export . frodo/stable")
client.run('config set general.retry=0')
client.run('config set general.retry_wait=1')
client.run("upload Hello* --confirm", assert_error=True)
self.assertNotIn("Waiting 1 seconds to retry...", client.out)
self.assertIn("ERROR: Hello0/1.2.1@frodo/stable: Upload recipe to 'default' failed: "
"Execute upload again to retry upload the failed files: "
"conan_export.tgz. [Remote: default]", client.out)
# Try with broken connection even with 10 retries
client = self._get_client(TerribleConnectionUploader)
files = cpp_hello_conan_files("Hello0", "1.2.1", build=False)
client.save(files)
client.run("export . frodo/stable")
client.run('config set general.retry=10')
client.run('config set general.retry_wait=0')
client.run("upload Hello* --confirm", assert_error=True)
self.assertIn("Waiting 0 seconds to retry...", client.out)
self.assertIn("ERROR: Hello0/1.2.1@frodo/stable: Upload recipe to 'default' failed: "
"Execute upload again to retry upload the failed files", client.out)
# For each file will fail the first time and will success in the second one
client = self._get_client(FailPairFilesUploader)
files = cpp_hello_conan_files("Hello0", "1.2.1", build=False)
client.save(files)
client.run("export . frodo/stable")
client.run("install Hello0/1.2.1@frodo/stable --build")
client.run('config set general.retry=3')
client.run('config set general.retry_wait=0')
client.run("upload Hello* --confirm --all")
self.assertEqual(str(client.out).count("ERROR: Pair file, error!"), 6)
def test_upload_parallel_error(self):
"""Cause an error in the parallel transfer and see some message"""
client = TestClient(requester_class=FailOnReferencesUploader, default_server_user=True)
client.save({"conanfile.py": GenConanfile()})
client.run('user -p password -r default user')
for index in range(4):
client.run('create . lib{}/1.0@user/channel'.format(index))
client.run('upload lib* --parallel -c --all -r default', assert_error=True)
self.assertIn("Connection fails with lib2 and lib4 references!", client.out)
self.assertIn("Execute upload again to retry upload the failed files", client.out)
def test_upload_parallel_success(self):
"""Upload 2 packages in parallel with success"""
client = TestClient(default_server_user=True)
client.save({"conanfile.py": GenConanfile()})
client.run('create . lib0/1.0@user/channel')
self.assertIn("lib0/1.0@user/channel: Package '{}' created".format(NO_SETTINGS_PACKAGE_ID),
client.out)
client.run('create . lib1/1.0@user/channel')
self.assertIn("lib1/1.0@user/channel: Package '{}' created".format(NO_SETTINGS_PACKAGE_ID),
client.out)
client.run('user -p password -r default user')
client.run('upload lib* --parallel -c --all -r default')
self.assertIn("Uploading lib0/1.0@user/channel to remote 'default'", client.out)
self.assertIn("Uploading lib1/1.0@user/channel to remote 'default'", client.out)
client.run('search lib0/1.0@user/channel -r default')
self.assertIn("lib0/1.0@user/channel", client.out)
client.run('search lib1/1.0@user/channel -r default')
self.assertIn("lib1/1.0@user/channel", client.out)
def test_upload_parallel_fail_on_interaction(self):
"""Upload 2 packages in parallel and fail because non_interactive forced"""
client = TestClient(default_server_user=True)
client.save({"conanfile.py": GenConanfile()})
num_references = 2
for index in range(num_references):
client.run('create . lib{}/1.0@user/channel'.format(index))
self.assertIn("lib{}/1.0@user/channel: Package '{}' created".format(
index,
NO_SETTINGS_PACKAGE_ID),
client.out)
client.run('user -c')
client.run('upload lib* --parallel -c --all -r default', assert_error=True)
self.assertIn("ERROR: lib0/1.0@user/channel: Upload recipe to 'default' failed: "
"Conan interactive mode disabled. [Remote: default]", client.out)
def test_recipe_upload_fail_on_generic_exception(self):
# Make the upload fail with a generic Exception
client = TestClient(default_server_user=True)
conanfile = textwrap.dedent("""
import os
from conans import ConanFile
class Pkg(ConanFile):
exports = "*"
def package(self):
self.copy("*")
""")
client.save({"conanfile.py": conanfile,
"myheader.h": "",
"conan_export.tgz/dummy": ""})
client.run('create . lib/1.0@user/channel')
client.run('upload lib* -c --all -r default', assert_error=True)
self.assertIn("ERROR: lib/1.0@user/channel: Upload recipe to 'default' failed:", client.out)
self.assertIn("ERROR: Errors uploading some packages", client.out)
def test_package_upload_fail_on_generic_exception(self):
# Make the upload fail with a generic Exception
client = TestClient(default_server_user=True)
conanfile = textwrap.dedent("""
import os
from conans import ConanFile
class Pkg(ConanFile):
exports = "*"
def package(self):
os.makedirs(os.path.join(self.package_folder, "conan_package.tgz"))
self.copy("*")
""")
client.save({"conanfile.py": conanfile,
"myheader.h": ""})
client.run('create . lib/1.0@user/channel')
client.run('upload lib* -c --all -r default', assert_error=True)
self.assertNotIn("os.remove(tgz_path)", client.out)
self.assertNotIn("Traceback", client.out)
self.assertIn("ERROR: lib/1.0@user/channel:5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9: "
"Upload package to 'default' failed:", client.out)
self.assertIn("ERROR: Errors uploading some packages", client.out)
with environment_append({"CONAN_VERBOSE_TRACEBACK": "True"}):
client.run('upload lib* -c --all -r default', assert_error=True)
self.assertIn("os.remove(tgz_path)", client.out)
self.assertIn("Traceback", client.out)
self.assertIn("ERROR: lib/1.0@user/channel:5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9: "
"Upload package to 'default' failed:", client.out)
self.assertIn("ERROR: Errors uploading some packages", client.out)
def test_beat_character_long_upload(self):
client = TestClient(default_server_user=True)
slow_conanfile = textwrap.dedent("""
from conans import ConanFile
class MyPkg(ConanFile):
exports = "*"
def package(self):
self.copy("*")
""")
client.save({"conanfile.py": slow_conanfile,
"hello.cpp": ""})
client.run("create . pkg/0.1@user/stable")
client.run("user user --password=password")
with patch("conans.util.progress_bar.TIMEOUT_BEAT_SECONDS", -1):
with patch("conans.util.progress_bar.TIMEOUT_BEAT_CHARACTER", "%&$"):
client.run("upload pkg/0.1@user/stable --all")
out = "".join(str(client.out).splitlines())
self.assertIn("Compressing package...%&$%&$Uploading conan_package.tgz -> "
"pkg/0.1@user/stable:5ab8", out)
self.assertIn("%&$Uploading conan_export.tgz", out)
self.assertIn("%&$Uploading conaninfo.txt", out)
def test_upload_with_pattern_and_package_error(self):
files = cpp_hello_conan_files("Hello1", "1.2.1")
self.client.save(files)
self.client.run("export . frodo/stable")
self.client.run("upload Hello* --confirm -p 234234234", assert_error=True)
self.assertIn("-p parameter only allowed with a valid recipe reference",
self.client.out)
def test_check_upload_confirm_question(self):
user_io = MockedUserIO({"default": [("lasote", "mypass")]}, out=TestBufferConanOutput())
files = cpp_hello_conan_files("Hello1", "1.2.1")
self.client.save(files)
self.client.run("export . frodo/stable")
user_io.request_string = lambda _: "y"
self.client.run("upload Hello*", user_io=user_io)
self.assertIn("Uploading Hello1/1.2.1@frodo/stable", self.client.out)
files = cpp_hello_conan_files("Hello2", "1.2.1")
self.client.save(files)
self.client.run("export . frodo/stable")
user_io.request_string = lambda _: "n"
self.client.run("upload Hello*", user_io=user_io)
self.assertNotIn("Uploading Hello2/1.2.1@frodo/stable", self.client.out)
def test_upload_same_package_dont_compress(self):
# Create a manifest for the faked package
pack_path = self.client.cache.package_layout(self.pref.ref).package(self.pref)
package_path = self.client.cache.package_layout(self.pref.ref).package(self.pref)
expected_manifest = FileTreeManifest.create(package_path)
expected_manifest.save(pack_path)
self.client.run("upload %s --all" % str(self.ref))
self.assertIn("Compressing recipe", self.client.out)
self.assertIn("Compressing package", str(self.client.out))
self.client.run("upload %s --all" % str(self.ref))
self.assertNotIn("Compressing recipe", self.client.out)
self.assertNotIn("Compressing package", str(self.client.out))
self.assertIn("Package is up to date", str(self.client.out))
def test_upload_with_no_valid_settings(self):
# Check if upload is still working even if the specified setting is not valid.
# If this test fails, will fail in Linux/OSx
conanfile = """
from conans import ConanFile
class TestConan(ConanFile):
name = "Hello"
version = "1.2"
settings = {"os": ["Windows"]}
"""
files = {CONANFILE: conanfile}
self.client.save(files)
self.client.run("export . lasote/stable")
self.assertIn("WARN: Conanfile doesn't have 'license'", self.client.out)
self.client.run("upload Hello/1.2@lasote/stable")
self.assertIn("Uploading conanmanifest.txt", self.client.out)
def test_single_binary(self):
""" basic installation of a new conans
"""
# Try to upload an package without upload conans first
self.client.run('upload %s -p %s' % (self.ref, str(self.pref.id)))
self.assertIn("Uploaded conan recipe '%s'" % str(self.ref), self.client.out)
def test_simple(self):
# Upload package
self.client.run('upload %s' % str(self.ref))
self.server_reg_folder = self.test_server.server_store.export(self.ref)
self.assertTrue(os.path.exists(self.server_reg_folder))
if not self.client.cache.config.revisions_enabled:
self.assertFalse(os.path.exists(self.server_pack_folder))
# Upload package
self.client.run('upload %s -p %s' % (str(self.ref), str(self.pref.id)))
self.server_pack_folder = self.test_server.server_store.package(self.pref)
self.assertTrue(os.path.exists(self.server_reg_folder))
self.assertTrue(os.path.exists(self.server_pack_folder))
# Test the file in the downloaded conans
files = ['CMakeLists.txt',
'my_lib/debug/libd.a',
'hello.cpp',
'hello0.h',
CONANFILE,
CONAN_MANIFEST,
'main.cpp',
'include/math/lib1.h',
'my_data/readme.txt',
'my_bin/executable']
self.assertTrue(os.path.exists(os.path.join(self.server_reg_folder, CONANFILE)))
self.assertTrue(os.path.exists(os.path.join(self.server_reg_folder, EXPORT_TGZ_NAME)))
tmp = temp_folder()
untargz(os.path.join(self.server_reg_folder, EXPORT_TGZ_NAME), tmp)
for f in files:
if f not in (CONANFILE, CONAN_MANIFEST):
self.assertTrue(os.path.exists(os.path.join(tmp, f)))
else:
self.assertFalse(os.path.exists(os.path.join(tmp, f)))
folder = uncompress_packaged_files(self.test_server.server_store, self.pref)
self.assertTrue(os.path.exists(os.path.join(folder,
"include",
"lib1.h")))
self.assertTrue(os.path.exists(os.path.join(folder,
"lib",
"my_lib/libd.a")))
self.assertTrue(os.path.exists(os.path.join(folder,
"res",
"shares/readme.txt")))
if platform.system() != "Windows":
self.assertEqual(os.stat(os.path.join(folder,
"bin",
"my_bin/executable")).st_mode &
stat.S_IRWXU, stat.S_IRWXU)
def test_upload_all(self):
"""Upload conans and package together"""
# Try to upload all conans and packages
self.client.run('user -p mypass -r default lasote')
self.client.run('upload %s --all' % str(self.ref))
lines = [line.strip() for line in str(self.client.out).splitlines()
if line.startswith("Uploading")]
self.assertEqual(lines, ["Uploading to remote 'default':",
"Uploading Hello/1.2.1@frodo/stable to remote 'default'",
"Uploading conan_export.tgz -> Hello/1.2.1@frodo/stable",
"Uploading conanfile.py -> Hello/1.2.1@frodo/stable",
"Uploading conanmanifest.txt -> Hello/1.2.1@frodo/stable",
"Uploading package 1/1: myfakeid to 'default'",
"Uploading conan_package.tgz -> Hello/1.2.1@frodo/stable:myfa",
"Uploading conaninfo.txt -> Hello/1.2.1@frodo/stable:myfa",
"Uploading conanmanifest.txt -> Hello/1.2.1@frodo/stable:myfa",
])
if self.client.cache.config.revisions_enabled:
layout = self.client.cache.package_layout(self.ref)
rev = layout.recipe_revision()
self.ref = self.ref.copy_with_rev(rev)
prev = layout.package_revision(self.pref)
self.pref = self.pref.copy_with_revs(rev, prev)
server_reg_folder = self.test_server.server_store.export(self.ref)
server_pack_folder = self.test_server.server_store.package(self.pref)
self.assertTrue(os.path.exists(server_reg_folder))
self.assertTrue(os.path.exists(server_pack_folder))
def test_force(self):
# Tries to upload a package exported after than remote version.
# Upload all recipes and packages
self.client.run('upload %s --all' % str(self.ref))
if self.client.cache.config.revisions_enabled:
layout = self.client.cache.package_layout(self.ref)
rev = layout.recipe_revision()
self.ref = self.ref.copy_with_rev(rev)
prev = layout.package_revision(self.pref)
self.pref = self.pref.copy_with_revs(rev, prev)
self.server_reg_folder = self.test_server.server_store.export(self.ref)
self.server_pack_folder = self.test_server.server_store.package(self.pref)
self.assertTrue(os.path.exists(self.server_reg_folder))
self.assertTrue(os.path.exists(self.server_pack_folder))
# Fake datetime from exported date and upload again
old_digest = self.client.cache.package_layout(self.ref).recipe_manifest()
old_digest.file_sums["new_file"] = "012345"
fake_digest = FileTreeManifest(2, old_digest.file_sums)
fake_digest.save(self.client.cache.package_layout(self.ref).export())
self.client.run('upload %s' % str(self.ref), assert_error=True)
self.assertIn("Remote recipe is newer than local recipe", self.client.out)
self.client.run('upload %s --force' % str(self.ref))
self.assertIn("Uploading %s" % str(self.ref),
self.client.out)
# Repeat transfer, to make sure it is uploading again
self.client.run('upload %s --force' % str(self.ref))
self.assertIn("Uploading conan_export.tgz", self.client.out)
self.assertIn("Uploading conanfile.py", self.client.out)
def test_upload_json(self):
conanfile = """
from conans import ConanFile
class TestConan(ConanFile):
name = "test"
version = "0.1"
def package(self):
self.copy("mylib.so", dst="lib")
"""
client = self._get_client()
client.save({"conanfile.py": conanfile,
"mylib.so": ""})
client.run("create . danimtb/testing")
# Test conflict parameter error
client.run("upload test/0.1@danimtb/* --all -p ewvfw --json upload.json", assert_error=True)
json_path = os.path.join(client.current_folder, "upload.json")
self.assertTrue(os.path.exists(json_path))
json_content = load(json_path)
output = json.loads(json_content)
self.assertTrue(output["error"])
self.assertEqual(0, len(output["uploaded"]))
# Test invalid reference error
client.run("upload fake/0.1@danimtb/testing --all --json upload.json", assert_error=True)
json_path = os.path.join(client.current_folder, "upload.json")
self.assertTrue(os.path.exists(json_path))
json_content = load(json_path)
output = json.loads(json_content)
self.assertTrue(output["error"])
self.assertEqual(0, len(output["uploaded"]))
# Test normal upload
client.run("upload test/0.1@danimtb/testing --all --json upload.json")
self.assertTrue(os.path.exists(json_path))
json_content = load(json_path)
output = json.loads(json_content)
output_expected = {"error": False,
"uploaded": [
{
"recipe": {
"id": "test/0.1@danimtb/testing",
"remote_url": "unknown",
"remote_name": "default",
"time": "unknown"
},
"packages": [
{
"id": NO_SETTINGS_PACKAGE_ID,
"time": "unknown"
}
]
}
]}
self.assertEqual(output_expected["error"], output["error"])
self.assertEqual(len(output_expected["uploaded"]), len(output["uploaded"]))
for i, item in enumerate(output["uploaded"]):
self.assertEqual(output_expected["uploaded"][i]["recipe"]["id"], item["recipe"]["id"])
self.assertEqual(output_expected["uploaded"][i]["recipe"]["remote_name"],
item["recipe"]["remote_name"])
for j, subitem in enumerate(item["packages"]):
self.assertEqual(output_expected["uploaded"][i]["packages"][j]["id"],
subitem["id"])
|
def cardinality_assessment(X):
for col in df.columns:
print("-----------------------------")
print("{} : {} unique values".format(col, len(df[col].unique()))
if len(df[col].unique()) > X:
print(df[col].value_counts())
print("-----------------------------")
|
"""
Given a list of primes (p1, p2, p3 ... pn)
generate composite numbers of their multiples
in increasing order.
"""
import heapq
def generate(primes):
# assume primes is sorted
h = [] # min-heap.
# logic: seed the heap with the primes, pop and generate from the heap.
# for every popped element v, add (v * p) for all p in primes.
# we are guaranteed to generate all composites in order.
for p in primes: heapq.heappush(h, p)
while True:
value = heapq.heappop(h)
yield value
for p in primes:
# attempt to insert all (value * p)
# Optimization:
# however, we can optimize inserting value * p if
# value * p | q = 0, q < p since
# (value * p / q) is generated beforehand (since p / q < 1)
# and (value * p / q) * q = (value * p)
# would've been inserted when (value * p / q) is generated.
should_insert = True
new_insert_value = value * p
for q in primes:
if q >= p: break
if new_insert_value % q == 0:
should_insert = False
break
if should_insert: heapq.heappush(h, new_insert_value)
for i, v in enumerate(generate([2, 3, 5])):
if i == 100: break
print v
for i, v in enumerate(generate([3, 11])):
if i == 100: break
print v
|
# -*- coding: utf-8 -*-
import math
from six.moves import range
__all__ = ('bucket_sort', 'DEFAULT_NUM_BUCKETS')
DEFAULT_NUM_BUCKETS = 5
def num_buckets(value_min, value_max, bucket_size):
return int(math.floor((value_max - value_min) / bucket_size)) + 1
def bucket_sort(array, num_buckets=DEFAULT_NUM_BUCKETS):
""" https://en.wikipedia.org/wiki/Bucket_sort
Interesting when the input array is expected to be uniformily distributed
over a range.
"""
value_min, value_max = min(array), max(array)
bucket_length = (value_max - value_min) / float(num_buckets)
buckets = [[] for i in range(num_buckets)]
for element in array:
buckets_away = math.ceil((element - value_min)/bucket_length)
bucket_idx = min(int(buckets_away), num_buckets - 1)
buckets[bucket_idx].append(element)
for bucket_idx, bucket in enumerate(buckets):
buckets[bucket_idx] = sorted(bucket)
return sum(buckets, [])
""" Example: 5 buckets
min value = 1, max = 15 (y)
ymax-ymin = 14 (length of the ruler)
divide 14 into 5 buckets, each of length 2.8
1st: [1, 3.8]
2nd: [3.8, 6.6]
3rd: [6.6, 9.4]
4th: [9.4, 12.2]
5th: [12.2, 15]
Given an input element 5, figure out in which interval to put it.
max is 15, min is 1, length is 14.
5 is distance 5-min = 4 from minimum.
distance / length = 4 / 2.8 = 1.42 buckets away forward.
ceil is 2.
Same logic for 9: 2.85 buckets away, thus bucket number 3 (ceil)
Same logic for 14: 14-min = 13 (dist). distance / length = 4.6428 buckets
away forward, ceil would be 5 (that'd mean index 6 , wrong).
This can be fixed if we clamp with min(ceil, num_buckets - 1).
Same reasoning could be used for a function that calculates the number of
buckets away from the maximum value.
"""
|
from setuptools import setup
setup(
name='precise_lite_runner',
version='0.4.0',
packages=['precise_lite_runner'],
url='https://github.com/OpenVoiceOS/precise_lite_runner',
license='Apache-2.0',
install_requires=["tflite-runtime",
"sonopy==0.1.2",
"pyaudio"],
author='jarbas',
author_email='jarbasai@mailfence.com',
description=''
)
|
from interactions.base.picker_interaction import PickerSuperInteraction
from sims import sim_spawner
from sims.pets.breed_tuning import all_breeds_gen
from sims.sim_info_types import SpeciesExtended
from sims4.localization import TunableLocalizedString, LocalizationHelperTuning
from sims4.tuning.tunable import TunableMapping, TunableEnumEntry
from sims4.tuning.tunable_base import GroupNames
from sims4.utils import flexmethod
from ui.ui_dialog_picker import TunablePickerDialogVariant, ObjectPickerTuningFlags, BasePickerRow
import sims4
logger = sims4.log.Logger('BreedPickerSuperInteraction')
class BreedPickerSuperInteraction(PickerSuperInteraction):
INSTANCE_TUNABLES = {'picker_dialog': TunablePickerDialogVariant(description='\n The item picker dialog.\n ', available_picker_flags=ObjectPickerTuningFlags.ITEM, default='item_picker', tuning_group=GroupNames.PICKERTUNING), 'species_name': TunableMapping(description="\n If specified, for a particular species, include this text in the\n breed's name.\n ", key_type=TunableEnumEntry(tunable_type=SpeciesExtended, default=SpeciesExtended.HUMAN, invalid_enums=(SpeciesExtended.INVALID,)), value_type=TunableLocalizedString(), tuning_group=GroupNames.PICKERTUNING)}
def _run_interaction_gen(self, timeline):
self._show_picker_dialog(self.sim)
return True
yield
@flexmethod
def picker_rows_gen(cls, inst, target, context, **kwargs):
if inst is not None:
breed_species = []
species = inst.interaction_parameters['species']
for species_extended in SpeciesExtended:
if species_extended == SpeciesExtended.INVALID:
continue
if SpeciesExtended.get_species(species_extended) == species:
breed_species.append(species_extended)
else:
breed_species = (None,)
for _breed_species in breed_species:
for breed in all_breeds_gen(species=_breed_species):
name = breed.breed_display_name
name = LocalizationHelperTuning.NAME_VALUE_PARENTHESIS_PAIR_STRUCTURE(name, cls.species_name[_breed_species])
row = BasePickerRow(name=name, row_description=breed.breed_description, tag=breed)
yield row
def on_choice_selected(self, choice_tag, **kwargs):
breed = choice_tag
if breed is not None:
position = self.context.pick.location
actor_sim_info = self.sim.sim_info
params = self.interaction_parameters
age = params['age']
gender = params['gender']
species = breed.breed_species
sim_creator = sim_spawner.SimCreator(age=age, gender=gender, species=species, additional_tags=(breed.breed_tag,))
(sim_info_list, _) = sim_spawner.SimSpawner.create_sim_infos((sim_creator,), account=actor_sim_info.account, zone_id=actor_sim_info.zone_id, creation_source='cheat: BreedPickerSuperInteraction')
sim_info = sim_info_list[0]
sim_spawner.SimSpawner.spawn_sim(sim_info, sim_position=position, is_debug=True)
|
from tests.TestMyTest import TestMyTest
from tests.TestICMP import TestICMP
from tests.TestRoutingHeader import TestRoutingHeader
from tests.TestOverlappingFragments import TestOverlappingFragments
from tests.TestHeaderChain import TestHeaderChain
from tests.TestTinyFragments import TestTinyFragments
from tests.TestTinyFragmentsTimeout import TestTinyFragmentsTimeout
from tests.TestExcessiveHBHOptions import TestExcessiveHBHOptions
from tests.TestPadNCovertChannel import TestPadNCovertChannel
from tests.TestAddressScopes import TestAddressScopes
from PyQt4 import QtCore
import sys
class TestManager(QtCore.QThread):
def __init__(self, app=None):
QtCore.QThread.__init__(self)
self.exiting = False
self.tests_list = None
self.test_settings = None
self.s = None
self.app = app
self.tests = dict()
# create test classes and register them with the application
# tICMP = TestMyTest(1, "ICMP Test", "ICMP Test", self.test_settings, app)
# self.registerTest(tICMP)
tICMP = TestICMP(1, "ICMPv6 Filtering", "The ICMP Test", self.test_settings, app)
self.registerTest(tICMP)
tRoutingHeader = TestRoutingHeader(2, "Routing Header Test", "The Routing Header Test", self.test_settings, app)
self.registerTest(tRoutingHeader)
tHeaderChain = TestHeaderChain(3, "Header Chain Test", "The Header Chain Test", self.test_settings, app)
self.registerTest(tHeaderChain)
tOverlappingFragments = TestOverlappingFragments(4, "Overlapping Fragments Test", "The Overlapping Fragments Test", self.test_settings, app)
self.registerTest(tOverlappingFragments)
tTinyFragments = TestTinyFragments(5, "Tiny Fragments Test", "The Tiny Fragments Test", self.test_settings, app)
self.registerTest(tTinyFragments)
tTinyFragmentsTimeout = TestTinyFragmentsTimeout(6, "Tiny Fragments Timeout", "The Tiny Fragments Timeout Test", self.test_settings, app)
self.registerTest(tTinyFragmentsTimeout)
tExcessiveHBH = TestExcessiveHBHOptions(7, "Excessive Extension Options Test" , "The Excessive Hop-By-Hop and Destination Options Test", self.test_settings, app)
self.registerTest(tExcessiveHBH)
tPadNCovertChannel = TestPadNCovertChannel(8, "PadN Covert Channel Test", "The PadN Covert Channel Test", self.test_settings, app)
self.registerTest(tPadNCovertChannel)
tAddressScopes = TestAddressScopes(9, "Address Scopes Test", "The Address Scopes Test", self.test_settings, app)
self.registerTest(tAddressScopes)
self.tinyFragments = tTinyFragments.id
self.tinyFragmentsTimeout = tTinyFragmentsTimeout.id
def __del__(self):
self.exiting = True
self.wait()
def getTest(self, id):
return self.tests[id]
def teardown(self):
self.s.send("BYE")
self.s.close()
print "disconnected"
def registerTest(self, test):
self.tests[test.id] = test
def updateStatus(self, message):
self.app.update_status.emit(message)
def run(self):
self.s.send("InfoLinkLocal")
self.test_settings.target_ll = self.s.recv(1024).strip()[14:]
print "Found that the server's link local address is %s" % self.test_settings.target_ll
for key in self.tests_list:
if key == self.tinyFragmentsTimeout and self.tests[self.tinyFragments].state != "Success":
self.tests[key].setState("Warning")
self.tests[key].setDescription("Warning")
self.tests[key].addDetail("Information: This test was skipped as Tiny Fragments seem to be dropped completely. So no point in waiting")
self.app.trigger.emit(key)
continue
# signal the server that we're about to start the test
self.s.send("StartTest %i" % key)
response = self.s.recv(1024).strip()
if response != "StartTest %d ACKNOWLEDGED" % key:
print "Uh-Oh! While waiting for the server to respond to 'StartTest %d' we got the following reply:" % key
print response
sys.exit("Exiting")
# now we know the server is ready for us to send the test packets
self.app.update_status.emit("Executing test: %s %s" % (self.tests[key].name, self.tests[key].statusbar_detail))
self.tests[key].setTestSettings(self.test_settings)
self.tests[key].execute()
self.app.update_status.emit("Waiting for the server's resuls for: %s" % self.tests[key].name)
# That's it. Signal the server that we're done with the test packets and would now like to receive the result
self.s.send("EndTest %i" % key)
response = self.s.recv(1024).strip()
if response != "EndTest %i ACKNOWLEDGED" % key:
print "Uh-Oh! While waiting for the server to respond to 'StartTest %d' we got the following reply:" % key
print response
sys.exit("Exiting")
# receive the result
response = self.s.recv(1024).strip()
if response != "StartResult %i" % key:
print "Uh-Oh! While waiting for the server to send the result for test %d we got the following reply:" % key
print response
sys.exit("Exiting")
result_total = ""
done = False
while not done:
if ("EndResult %d" % key) in result_total:
done = True
break
else:
result_total = result_total + self.s.recv(4096)#.strip()
results = [line.strip().split(':') for line in result_total.split('\n') if line.strip()]
# check the 'aggregate state of the test': if there is at least one 'FAILURE' then the state of the whole test is 'FAILURE'
# if there is at least one 'WARNING' then the state of the whole test is 'WARNING'. If neither is the case, the
# state is 'SUCCESS'
state = "Success"
for result in results:
if "Warning!" in (result[0])[:8]:
state = "Warning"
if "Failure!" in (result[0])[:8]:
state = "Failure"
if state == "Failure":
break
self.tests[key].setState(state)
self.tests[key].setDescription(state)
for result in results:
if (result[0])[:9] != "EndResult":
self.tests[key].addDetail(result[0])
# tell the UI that the test is finished
self.app.trigger.emit(key)
self.teardown()
self.app.tests_finished.emit()
|
#!/usr/bin/env python3
import sys # import sys library
from datetime import datetime as dt # importing date time and aliasing it as dt
print(sys.version)
print(dt.now())
# Advanced strings
my_name = "Heath"
print(my_name[0])
print(my_name[-1])
sentence = "This is indeed a sentence"
print(sentence[:4])
print(sentence.split())
sentence_split = sentence.split()
sentence_join = ' '.join(sentence_split)
quote = "He said, \"Give me all your money\""
print(quote)
too_much_space = " hello"
print(too_much_space.strip())
print ("a" in "Apple")
letter = "A"
word = "Apple"
print(letter.lower() in word.lower())
movie = "The Hangover"
print("My favorite movie is {}".format(movie))
# Dictionaries
drinks = {"White Russian":7, "Old Fashion":18, "Lemon Drop":8} #drink is key, price is value
print(drinks)
employees = {"Finance":["Bob", "Linda", "Tina"], "IT":["Gorge", "Louis", "Teddy"], "HR":["Jimmy Jr.", "Mort"]}
print(employees)
employees['Legal'] = ['Mr. Frond'] # add key value pair
print(employees)
employees.update({"Sales":["Andie", "Ollie"]}) # add new key value pair
print(employees)
print(drinks.get("White Russian"))
|
from ..sampling import *
class UPSteps(Sampling):
slug = 'sampling:up_steps'
name = 'Sampling: UP Steps'
def __init__(self, steps, *args, **kwargs):
self.steps = steps
self.min = kwargs['min']
self.max = self.min << steps
super().__init__(self.max, *args, **kwargs)
def get_count(self, backdoor, results=()):
count = len(results)
bd_count = backdoor.task_count()
if count == 0:
return min(self.min, bd_count)
elif count < bd_count and count < self.max:
if sum([not r[4] for r in results if r]) == 0:
bound = min(count << 1, self.max, bd_count)
return max(0, bound - count)
return 0
def __info__(self):
return {
**super().__info__(),
'min': self.min,
'steps': self.steps,
}
__all__ = [
'UPSteps'
]
|
def minimal_number_of_coins(coins, price):
bal = price
index = len(coins)
count = 0
while bal > 0:
if bal >= max(coins[:index]):
bal -= max(coins[:index])
count += 1
else:
index -= 1
return count
if __name__ == '__main__':
coins = [1, 2, 10]
price = 28
print(minimal_number_of_coins(coins, price))
|
"""
Customized Query, RawQuery (like django.db.models.sql.query)
"""
from django.conf import settings
from django.db.models import Count
from django.db.models.sql import Query, RawQuery, constants
from salesforce.backend import DJANGO_20_PLUS
from salesforce.dbapi.driver import arg_to_soql
class SalesforceRawQuery(RawQuery):
pass
# def clone(self, using):
# return SalesforceRawQuery(self.sql, using, params=self.params)
#
# def get_columns(self):
# if self.cursor is None:
# self._execute_query()
# converter = connections[self.using].introspection.table_name_converter
# if self.cursor.rowcount > 0:
# return [converter(col) for col in self.cursor.first_row.keys() if col != 'attributes']
# # TODO hy: A more general fix is desirable with rewriting more code.
# return ['Id'] # originally [SF_PK] before Django 1.8.4
#
# def _execute_query(self):
# self.cursor = connections[self.using].cursor()
# self.cursor.prepare_query(self)
# self.cursor.execute(self.sql, self.params)
#
# def __repr__(self):
# return "<SalesforceRawQuery: %s; %r>" % (self.sql, tuple(self.params))
#
# def __iter__(self):
# for row in super(SalesforceRawQuery, self).__iter__():
# yield [row[k] for k in self.get_columns()]
class SalesforceQuery(Query):
"""
Override aggregates.
"""
def __init__(self, *args, **kwargs):
super(SalesforceQuery, self).__init__(*args, **kwargs)
self.is_query_all = False
self.max_depth = 1
def __str__(self):
sql, params = self.sql_with_params()
return sql % tuple(arg_to_soql(x) for x in params)
def sql_with_params(self):
"""
Return the query as an SOL string and the parameters.
"""
sf_alias = getattr(settings, 'SALESFORCE_DB_ALIAS', 'salesforce')
return self.get_compiler(sf_alias).as_sql()
def clone(self, klass=None, memo=None): # pylint: disable=arguments-differ
if DJANGO_20_PLUS:
query = Query.clone(self)
else:
query = Query.clone(self, klass, memo) # pylint: disable=too-many-function-args
query.is_query_all = self.is_query_all
return query
def has_results(self, using):
q = self.clone()
compiler = q.get_compiler(using=using) # pylint: disable=no-member
return bool(compiler.execute_sql(constants.SINGLE))
def set_query_all(self):
self.is_query_all = True
def get_count(self, using):
# TODO maybe can be removed soon
"""
Performs a COUNT() query using the current filter constraints.
"""
obj = self.clone()
obj.add_annotation(Count('pk'), alias='x_sf_count', is_summary=True) # pylint: disable=no-member
number = obj.get_aggregation(using, ['x_sf_count'])['x_sf_count'] # pylint: disable=no-member
if number is None:
number = 0
return number
|
'''
Copyright (C) 2019 Simon D. Levy
MIT License
'''
from gym.envs.registration import register
register(
id='Lander-v0',
entry_point='gym_copter.envs:Lander2D',
max_episode_steps=2000
)
register(
id='Lander3D-v0',
entry_point='gym_copter.envs:Lander3D',
max_episode_steps=2000
)
register(
id='Lander3D-v1',
entry_point='gym_copter.envs:TargetedLander3D',
max_episode_steps=2000
)
register(
id='Distance-v0',
entry_point='gym_copter.envs:Distance',
max_episode_steps=1000
)
register(
id='Takeoff-v0',
entry_point='gym_copter.envs:Takeoff',
max_episode_steps=1000
)
|
import sys
sys.path.append("libraries/")
from fonction_compteur import *
from fonction_compteur_datagenerator import *
from fonction_compteur_segmentation import *
from fonction_compteur_affiche import *
from skimage.measure import label
from skimage.measure import regionprops
from skimage import img_as_uint
from skimage.io import imread, imsave
from matplotlib import pyplot as plt
image_name = 'G/9994A323901.tif'
image = plt.imread(image_name)
image_mask = decoupe_mask(image, verbose='all')
zoro=np.ma.masked_where(image_mask==0,image_mask)
labeled_list=Hough_by_thres(image, zoro, cells_mean=60, verbose=True,condition=['seuil',1,0.9,1],
edges=None,labeled_list=[], exemple=[False])
labeled_list=recon_image(labeled_list,verbose='all')
labeled=labeled_list[0]
#test if region is too small ot too big
classe, amas_cells=test_region(labeled, cells_mean=60, threshold='hough_iter', bord=True)
result=affiche(image, labeled,classe, title=" premier filtrage",boxes=["ellipse","blue"])
save=result[1]
save=np.array(save*255, dtype='uint8')
save=draw_ellipse_perso(save, classe)
save=draw_ellipse_perso(save, amas_cells)
labeled_conv=complet_cells(classe+amas_cells, labeled, verbose='True')
classe=regionprops(labeled_conv)
labeled_conv = img_as_uint(labeled_conv)
imsave('out/'+"im1_labeling.png", labeled_conv)
# def hough_segmentation()
|
#!/usr/bin/python
'''Graphs role dependencies in roles/ as a graphviz digraph'''
import os
import yaml
import sys
print 'digraph {'
for role in os.listdir('./roles'):
try:
with open('./roles/%s/meta/main.yml' % role) as meta:
data = yaml.load(meta)
except Exception as exc:
print >>sys.stderr, 'Skipping %s: %r' % (role, exc)
continue
try:
deps = data['dependencies']
except Exception as exc:
print >>sys.stderr, 'Skipping %s: %r' % (role, exc)
continue
print '\t"%s" -> {' % role,
for dep in deps:
print >>sys.stderr, 'dep:', dep
name = dep['role']
print '"%s"' % name,
print '}'
print '}'
|
# Generated by Django 3.2.6 on 2021-08-11 20:48
from django.conf import settings
import django.contrib.postgres.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='note',
name='links',
field=django.contrib.postgres.fields.ArrayField(base_field=models.TextField(blank=True), default=list, size=None),
),
migrations.AddField(
model_name='note',
name='owner',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='notes', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='note',
name='step_one_iterations',
field=django.contrib.postgres.fields.ArrayField(base_field=models.TextField(blank=True), default=list, size=None),
),
migrations.AddField(
model_name='note',
name='step_three',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='note',
name='step_two_iterations',
field=django.contrib.postgres.fields.ArrayField(base_field=models.TextField(blank=True), default=list, size=None),
),
migrations.AddField(
model_name='note',
name='title',
field=models.CharField(blank=True, max_length=64),
),
migrations.AddField(
model_name='note',
name='understand',
field=models.BooleanField(default=False),
),
]
|
import os
from copy import deepcopy
# TODO GitPython topology
def get_topology(tree):
tree['topology'] = None
raise NotImplementedError
def parse_spec(file_name):
# The purpose of this method is to get a more computer-readable commit tree
with open(file_name) as spec_file:
spec = spec_file.read()
commits = [] # List of (commit_name, [parents], [branches], [tags])
all_branches = set()
all_tags = set()
for line in spec.split('\n'):
if len(line) == 0 or line[0] == '#':
# Last line or comment
continue
line = line.replace(' ', '')
if '(' in line:
commit_str = line[:line.find('(')].strip()
ref_str = line[line.find('(')+1:-1].strip().replace(' ', '')
else:
commit_str = line.strip()
ref_str = ''
if ':' not in commit_str:
# Implicit parent, use previous commit
if len(commits) == 0:
parents = []
else:
parents = [commits[len(commits)-1][0]]
commit_name = commit_str
else:
# Find parent
commit_name, parent_str = commit_str.split(':')
commit_name = commit_name.strip()
parent_str = parent_str.strip()
if parent_str:
parents = parent_str.split(' ')
else:
parents = []
# We know the commit name and parents now
assert ' ' not in commit_name # There should never be more than one change or a space in a name
# Process references
if ref_str:
refs = ref_str.split(',')
else:
refs = []
branches = []
tags = []
for ref in refs:
if ref[:4] == 'tag:':
tag = ref[4:]
assert tag not in all_tags
tags.append(tag)
all_tags.add(tag)
else:
branch = ref
assert branch not in all_branches
branches.append(branch)
all_branches.add(branch)
commits.append((commit_name, parents, branches, tags))
head = commits[-1][0]
del commits[-1]
return commits, head
def level_json(commits, head):
# We've formally replicated the input string in memory
level = {
'topology': [],
'branches': {},
'tags': {},
'commits': {},
'HEAD': {},
}
all_branches = []
all_tags = []
for commit_name, parents, branches_here, tags_here in commits:
level['topology'].append(commit_name)
level['commits'][commit_name] = {
'parents': parents,
'id': commit_name
}
all_branches.extend(branches_here)
all_tags.extend(tags_here)
for branch in branches_here:
level['branches'][branch] = {
'target': commit_name,
'id': branch
}
for tag in tags_here:
level['tags'][tag] = {
'target': commit_name,
'id': tag
}
level['HEAD'] = {
'target': head,
'id': 'HEAD'
}
return level
def test_skill(skill, test):
# We don't know the names of merges, so we match them with their test names
# TODO Only works when merges don't have other merges as parents
# TODO Topological sort merge commits
skill = deepcopy(skill)
merge_name_map = {}
for commit_name in skill['commits']:
skill_commit = skill['commits'][commit_name]
if len(skill_commit['parents']) >= 2: # TODO Stop here to get list of merges
for test_commit_name in test['commits']: # TODO Do this iteration in an intelligent manner
test_commit = test['commits'][test_commit_name]
parents_equal = True
skill_parents = skill_commit['parents']
test_parents = test_commit['parents']
for skill_parent, test_parent in zip(skill_parents, test_parents):
if skill_parent != test_parent:
parents_equal = False
break
if len(skill_parents) == len(test_parents) and parents_equal:
merge_name_map[test_commit_name] = commit_name
# TODO Update parents to reference merge commits by new name
# Check commits
if len(test['commits']) != len(skill['commits']):
return False
for commit_name in test['commits']:
test_commit = test['commits'][commit_name]
if commit_name not in skill['commits']:
if merge_name_map[commit_name] in skill['commits']:
# It's a known merge
skill_commit = skill['commits'][merge_name_map[commit_name]]
else:
return False
else:
skill_commit = skill['commits'][commit_name]
# Commits must have the same number of parents and be in the same order
if len(skill_commit['parents']) != len(test_commit['parents']):
return False
for skill_parent, test_parent in zip(skill_commit['parents'], test_commit['parents']):
if skill_parent != test_parent:
return False
# Check branches
if len(test['branches']) != len(skill['branches']):
return False
for branch_name in test['branches']:
if branch_name not in skill['branches']:
return False
if skill['branches'][branch_name]['target'] != test['branches'][branch_name]['target']:
if merge_name_map[test['branches'][branch_name]['target']] != skill['branches'][branch_name]['target']:
return False # It's also not a known merge
# Check tags
if len(test['tags']) != len(skill['tags']):
return False
for tag_name in test['tags']:
if tag_name not in skill['tags']:
return False
if skill['tags'][tag_name]['target'] != test['tags'][tag_name]['target']:
return False
# Check HEAD
if skill['HEAD']['target'] != test['HEAD']['target']:
return False
return True
class NamedList:
# names is a list populated with type str, items is a list populated with any type
def __init__(self, names, items):
assert len(names) == len(items)
self._name_dict = {name: index for index, name in enumerate(names)}
self._items = items
def __getitem__(self, query):
if isinstance(query, str):
if query.isnumeric():
if 0 < int(query) <= len(self):
return self._items[int(query) - 1]
else:
raise KeyError
return self._items[self._name_dict[query]]
else:
raise KeyError
def __iter__(self):
return self._items.__iter__()
def __len__(self):
return len(self._items)
def __setitem__(self, key, item):
if isinstance(key, str):
self._name_dict[key] = len(self._items)
self._items.append(item)
else:
raise TypeError
def __contains__(self, item):
return item in self._items
def values(self):
return self._items
def keys(self):
set_indices = { str(i) for i in range(1, len(self) + 1) }
set_names = set(self._name_dict.keys())
return set_indices | set_names
class AllSkills(NamedList):
def __init__(self, skills):
super().__init__([skill.name for skill in skills], skills)
last_level = None
for skill in self:
for level in skill:
if last_level is not None:
last_level.next_level = level
last_level = level
class Skill(NamedList):
def __init__(self, name, levels):
super().__init__([level.name for level in levels], levels)
self.name = name
for level in levels:
level.skill = self
class Level:
def __init__(self, name):
self.name = name
self.skill = None
self.next_level = None
def __repr__(self):
return "<{class_name}: '{full_name}'>".format(
class_name=type(self).__name__,
full_name=self.full_name()
)
def full_name(self):
return '{} {}'.format(self.skill.name, self.name)
def setup(self, file_operator):
pass
def instructions(self):
pass
def goal(self):
pass
def test(self, file_operator):
pass
def print_all_complete():
print("Wow! You've complete every level, congratulations!")
print("If you want to keep learning git, why not try contributing"
" to git-gud by forking the project at https://github.com/benthayer/git-gud/")
print("We're always looking for contributions and are more than"
" happy to accept both pull requests and suggestions!")
class BasicLevel(Level):
def __init__(self, name, path):
super().__init__(name)
self.path = path
self.setup_spec_path = os.path.join(self.path, 'setup.spec')
self.instructions_path = os.path.join(self.path, 'instructions.txt')
self.goal_path = os.path.join(self.path, 'goal.txt')
self.test_spec_path = os.path.join(self.path, 'test.spec')
def _setup(self, file_operator):
commits, head = parse_spec(self.setup_spec_path)
file_operator.create_tree(commits, head)
latest_commit = '0'
for commit_name, _, _, _ in commits:
try:
if int(commit_name) > int(latest_commit):
latest_commit = commit_name
except ValueError:
pass # Commit is merge and doesn't have number
file_operator.write_last_commit(latest_commit)
def setup(self, file_operator):
print('Setting up level: "{}"'.format(self.full_name()))
self._setup(file_operator)
print('Setup complete')
print()
print("Simulating: git gud goal")
self.goal()
print()
print("Type \"git gud instructions\" to view full instructions")
print("Type \"git gud test\" to test for level completion")
print("Type \"git gud help\" for more help")
print()
def instructions(self):
print('Printing instructions for level: "{}"'.format(self.full_name()))
print()
with open(self.instructions_path) as instructions_file:
for line in instructions_file:
if line[:3] == '>>>':
input('>>>')
else:
print(line.strip())
def goal_str(self):
with open(self.goal_path) as goal_file:
return goal_file.read()
def goal(self):
print(self.goal_str())
def _test(self, file_operator):
commits, head = parse_spec(self.test_spec_path)
test_tree = level_json(commits, head)
level_tree = file_operator.get_current_tree()
return test_skill(level_tree, test_tree)
def test(self, file_operator):
print('Testing completion for level: "{}"'.format(self.full_name()))
print()
if self._test(file_operator):
try:
if self.next_level.skill != self.skill:
print("Level complete, you've completed all levels in this skill!")
print('"git gud progress" to advance to the next skill')
print("Next skill is: {}".format(self.next_level.skill.name))
else:
print('Level complete! "git gud progress" to advance to the next level')
print('Next level is: {}'.format(self.next_level.full_name()))
except AttributeError:
print_all_complete()
else:
print('Level not complete, keep trying. "git gud reset" to start from scratch.')
|
# Generated by Django 2.2.6 on 2019-10-20 11:51
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('catalog', '0005_reviews'),
]
operations = [
migrations.AddField(
model_name='reviews',
name='book',
field=models.ForeignKey(default=111, on_delete=django.db.models.deletion.CASCADE, to='catalog.Book'),
preserve_default=False,
),
]
|
import os
import pandas as pd
import pymongo
import redis
import socket
import tempfile
import cPickle
import subprocess
import shutil
import vislab
def zero_results(collection, query):
return collection.find(query).limit(1).count() == 0
def exclude_ids_in_collection(image_ids, collection):
"""
Exclude ids already stored in the collection.
Useful for submitting map jobs.
"""
computed_image_ids = [
x['image_id'] for x in collection.find(fields=['image_id'])
]
print 'len(computed_image_ids)', len(computed_image_ids)
num_ids = len(image_ids)
not_computed_ids = image_ids = list(set(image_ids) - set(computed_image_ids))
print("Cut down on {} existing out of {} total image ids.".format(
num_ids - len(not_computed_ids), num_ids))
return not_computed_ids
def load_or_generate_df(filename, generator_fn, force=False, args=None):
"""
If filename does not already exist, gather data with generator_fn,
and write to filename.
If filename does exist, load from it.
"""
print 'load_or_generate_df(force={}): {}'.format(force, filename)
if not force and os.path.exists(filename):
df = pd.read_hdf(filename, 'df')
else:
print 'Generating'
df = generator_fn(args)
df.to_hdf(filename, 'df', mode='w')
return df
def running_on_icsi():
"""
Return True if this script is running on the ICSI cluster.
"""
return socket.gethostname().endswith('ICSI.Berkeley.EDU')
def get_mongodb_client():
"""
Establish connection to MongoDB.
"""
try:
host, port = vislab.config['servers']['mongo']
connection = pymongo.MongoClient(host, port)
except pymongo.errors.ConnectionFailure:
raise Exception(
"Need a MongoDB server running on {}, port {}".format(host, port))
return connection
def get_mozila_request_header():
user_agent = "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:47.0) Gecko/20100101 Firefox/47.0"
headers = {'User-Agent': user_agent}
return headers
def print_collection_counts():
"""
Print all collections and their counts for all databases in MongoDB.
"""
client = get_mongodb_client()
for db_name in client.database_names():
for coll_name in client[db_name].collection_names():
print('{} |\t\t{}: {}'.format(
db_name, coll_name, client[db_name][coll_name].count()))
def get_redis_client(host=None, port=None):
if host is None or port is None:
host, port = vislab.config['servers']['redis']
try:
connection = redis.Redis(host, port)
connection.ping()
except redis.ConnectionError:
raise Exception(
"Need a Redis server running on {}, port {}".format(host, port))
return connection
def pickle_function_call(func_name, args):
f, temp_filename = tempfile.mkstemp()
with open(temp_filename, 'w') as f:
cPickle.dump((func_name, args), f)
c = "import os; import cPickle;"
c += "f = open('{0}'); func, args = cPickle.load(f); f.close();"
c += "os.remove('{0}'); func(*args)"
c = c.format(temp_filename)
return c
def run_through_bash_script(cmds, filename=None, verbose=False, num_workers=1):
"""
Write out given commands to a bash script file and execute it.
This is useful when the commands to run include pipes, or are chained.
subprocess is not too easy to use in those cases.
Parameters
----------
cmds: list of string
filename: string or None [None]
If None, a temporary file is used and deleted after.
verbose: bool [False]
If True, output the commands that will be run.
num_workers: int [1]
If > 1, commands are piped through parallel -j num_workers
"""
assert(num_workers > 0)
remove_file = False
if filename is None:
f, filename = tempfile.mkstemp()
remove_file = True
if num_workers > 1:
contents = "echo \"{}\" | parallel --env PATH -j {}".format(
'\n'.join(cmds), num_workers)
else:
contents = '\n'.join(cmds)
with open(filename, 'w') as f:
f.write(contents + '\n')
if verbose:
print("Contents of script file about to be run:")
print(contents)
p = subprocess.Popen(['bash', filename])
out, err = p.communicate()
if remove_file:
os.remove(filename)
if not p.returncode == 0:
print(out)
print(err)
raise Exception("Script exited with code {}".format(p.returncode))
def run_shell_cmd(cmd, echo=True):
"""
Run a command in a sub-shell, capturing stdout and stderr
to temporary files that are then read.
"""
_, stdout_f = tempfile.mkstemp()
_, stderr_f = tempfile.mkstemp()
print("Running command")
print(cmd)
p = subprocess.Popen(
'{} >{} 2>{}'.format(cmd, stdout_f, stderr_f), shell=True)
p.wait()
with open(stdout_f) as f:
stdout = f.read()
os.remove(stdout_f)
with open(stderr_f) as f:
stderr = f.read()
os.remove(stderr_f)
if echo:
print("stdout:")
print(stdout)
print("stderr:")
print(stderr)
return stdout, stderr
def makedirs(dirname):
if os.path.exists(dirname):
return dirname
try:
os.makedirs(dirname)
except OSError:
pass
except:
raise
return dirname
def cleardirs(dirname):
if os.path.exists(dirname):
shutil.rmtree(dirname)
return makedirs(dirname)
|
"""Faça um programa que peça o tamanho de um arquivo para download (em MB) e a velocidade de um link de Internet (em Mbps),
calcule e informe o tempo aproximado de download do arquivo usando este link (em minutos)."""
Download = float(input("Digite o tamanho do arquivo em MB:"));
Velocidade = float(input("Digite a velocidade de um link de Internet em Mbps: "));
Convertido = Velocidade /8;
Minuto = Convertido * 60;
Tempo = Download / Minuto
print("O download vai demorar {} minutos".format(Tempo));
|
# This code generates images of size 256 x 256 px that contains either an open or closed contour.
# This contour consists of straight lines that form polygons.
# author: Christina Funke
import numpy as np
import math
import csv
import os
from PIL import Image, ImageDraw
from pathlib import Path
# -------------------------------------------------------
# code to determine overlap
# -------------------------------------------------------
# that's the distance of two non-overlaping points (=10 px)
dist = 40
# intersection
def ccw(A, B, C):
return (C[1] - A[1]) * (B[0] - A[0]) > (B[1] - A[1]) * (C[0] - A[0])
def intersect(A, B, C, D):
return ccw(A, C, D) != ccw(B, C, D) and ccw(A, B, C) != ccw(A, B, D)
def test_overlap_point(lines, point, dist):
# point=(0,0)
# lines=[(1,1),(2,2),(3,3)]
return point_to_multipleline_dist(lines, point) < dist
def test_overlap_line(lines, new_line, dist):
# new_line=[(10,10),(20,20),(30,30)]
# lines=[(1,1),(2,2),(3,3)]
# overlap
for i in range(len(new_line)):
point = new_line[i : i + 1][0]
if test_overlap_point(lines, point, dist):
return True
for i in range(len(lines)):
point = lines[i : i + 1][0]
if test_overlap_point(new_line, point, dist):
return True
# intersection
for i in range(len(new_line) - 1):
line1 = new_line[i : i + 2]
A = np.array(line1[0])
B = np.array(line1[1])
for i in range(len(lines) - 1):
line2 = lines[i : i + 2]
C = np.array(line2[0])
D = np.array(line2[1])
if intersect(A, B, C, D):
return True
return False
def test_overlap_all_lines(all_lines, new_line, dist):
# new_line=[(10,10),(20,20),(30,30)]
# all_lines=[[(1,1),(2,2),(3,3)],[(10,10),(20,20),(30,30)]]
for lines in all_lines:
if test_overlap_line(lines, new_line, dist):
return True
return False
def point_to_multipleline_dist(lines, point):
min_dist = +np.inf
for i in range(len(lines) - 1):
line = lines[i : i + 2]
min_dist = min(min_dist, point_to_line_dist(line, point))
return min_dist
def point_to_line_dist(line, point):
x1, y1 = line[0]
x2, y2 = line[1]
x3, y3 = point
px = x2 - x1
py = y2 - y1
something = px * px + py * py
u = ((x3 - x1) * px + (y3 - y1) * py) / float(something)
if u > 1:
u = 1
elif u < 0:
u = 0
x = x1 + u * px
y = y1 + u * py
dx = x - x3
dy = y - y3
dist = math.sqrt(dx * dx + dy * dy)
return dist
# -------------------------------------------------------
# code to make contours and flanker
# -------------------------------------------------------
def make_contour(polygon_points):
"""
method 1
"""
# position vertices on circle and add noise
r = np.random.uniform(128, 256)
noise = r / 3
points = []
for n in range(polygon_points):
phi = 2 * np.pi / polygon_points * n
x = r * np.cos(phi)
y = r * np.sin(phi)
x += np.random.random() * noise * 2 - noise
y += np.random.random() * noise * 2 - noise
points.append((x, y))
points.append(points[0])
return points
def get_random_angles(polygon_points):
a = np.random.random(polygon_points)
a /= a.sum()
return np.cumsum(a * 2 * np.pi)
def make_flanker(r, r2, pos, number_of_segments):
points = []
angle = np.random.rand() * 2 * np.pi
points.append((np.sin(angle) * r + pos[0], np.cos(angle) * r + pos[1]))
points.append(pos)
if number_of_segments == 2:
angle2 = angle + np.random.rand() * 3 / 4 * np.pi + 1 / 4 * np.pi
points.append((np.sin(angle2) * r2 + pos[0], np.cos(angle2) * r2 + pos[1]))
return points
def move_inside(img_size, points, width):
if test_inside(img_size, points, width):
return True, points
for n in range(50):
pos = [np.random.uniform(0, img_size), np.random.uniform(0, img_size)]
points = shift_to_pos(points, pos)
if test_inside(img_size, points, width):
return True, points
return False, points
def test_inside(img_size, points, width):
margin = 20
for point in points:
if (
point[0] < width / 2 + margin
or point[0] > img_size - width / 2 - margin
or point[1] < width / 2 + margin
or point[1] > img_size - width / 2 - margin
):
return False
return True
def test_visible(img_size, points):
for point in points:
if img_size > point[0] > 0 and img_size > point[1] > 0:
return True
return False
# -------------------------------------------------------
# code to transform closed to open contour
# -------------------------------------------------------
def remove_one_line(line):
"""
method a
"""
start = np.random.randint(0, len(line) - 1)
line = line[start:-1] + line[0:start]
return line, start
def dist_angle(a, b, c):
# a, b, c are three points.
# return angle at position b, r is the distance between a, b
a = np.array(a)
b = np.array(b)
c = np.array(c)
f = b - a
e = b - c
f_u = f / np.linalg.norm(f)
e_u = e / np.linalg.norm(e)
ang2 = np.arctan2(f[0], f[1])
ang1 = np.arctan2(e[0], e[1])
angle = (ang1 - ang2) % (2 * np.pi)
r = np.linalg.norm(f)
return angle, r
def shuffle_line(line):
"""
method b
"""
# get angle and radii
angles = []
radii = []
line.append(line[1])
for i in range(len(line) - 2):
angle, radius = dist_angle(line[i], line[i + 1], line[i + 2])
angles.append(angle)
radii.append(radius)
# shuffle
np.random.shuffle(angles)
np.random.shuffle(radii)
# reconstruct
new_a = [(512, 512)]
overlap = False
for i in range(len(angles)):
if i == 0:
old_angle = np.random.rand() * 2 * np.pi
else:
x0_diff = new_a[-1][0] - new_a[-2][0]
y0_diff = new_a[-1][1] - new_a[-2][1]
old_angle = np.arctan2(x0_diff, y0_diff)
angle = old_angle - (np.pi - angles[i])
r = radii[i]
new_x_diff = np.sin(angle) * r
new_y_diff = np.cos(angle) * r
new_point = (new_a[-1][0] + new_x_diff, new_a[-1][1] + new_y_diff)
new_a.append(new_point)
overlap = max(overlap, test_overlap_line(new_a[0:-2], new_a[-2:], dist=dist))
return new_a, overlap
def make_contour_both(polygon_points):
"""
method c
"""
diff = np.random.rand() * 15 * 4 + 10 * 4 # differenz zwischen 20px und 50px
minr = 0
radius = 256 - minr
while 1:
radii = np.random.random(polygon_points) * radius + minr
radii_closed = np.copy(radii)
phis = get_random_angles(polygon_points)
# add point
phis = np.append(phis, phis[0])
radii = np.append(radii, radii[0])
s = np.random.randint(0, 2)
radii[0] += diff * (-1) ** s
radii[-1] -= diff * (-1) ** s
ov = False
start_angle = np.random.rand() * 2 * np.pi
points = []
for n in range(polygon_points + 1):
r = radii[n]
phi = phis[n] + start_angle
x = r * np.cos(phi)
y = r * np.sin(phi)
points.append((x, y))
ov = (
ov
or test_overlap_line(points[0:-2], points[-2:], dist)
or test_overlap_point(points[0:-1], points[-1], dist)
) # teste die punkte
ov = ov or test_overlap_point(
points[1:], points[0], dist
) # teste den ersten punkt
points_closed = []
for n in range(polygon_points):
r = radii_closed[n]
phi = phis[n] + start_angle
x = r * np.cos(phi)
y = r * np.sin(phi)
points_closed.append((x, y))
ov = (
ov
or test_overlap_line(points_closed[0:-2], points_closed[-2:], dist)
or test_overlap_point(points[0:-1], points[-1], dist)
)
points_closed.append(points_closed[0])
ov = (
ov
or test_overlap_line(points_closed[1:-2], points_closed[-2:], dist)
or test_overlap_point(points_closed[0:-1], points[-1], dist)
)
ov = ov or test_overlap_point(points[1:], points[0], dist)
# if polygon_points==3:
# ov=ov or (test_overlap_point([points_closed[2],points_closed[1]], points_closed[0], dist) or
# test_overlap_point([points_closed[3],points_closed[2]], points_closed[1], dist) or
# test_overlap_point([points_closed[1],points_closed[3]], points_closed[2], dist))
if not ov:
break
return points, points_closed
def shift_to_pos(new_a, pos):
x = np.array(new_a)[:, 0]
y = np.array(new_a)[:, 1]
pos_x = x.mean()
pos_y = y.mean()
x -= pos_x - pos[0]
y -= pos_y - pos[1]
new_a_m = []
for i in range(len(new_a)):
new_a_m.append((x[i], y[i]))
return new_a_m
def shift_to_pos2(new_a, ref_a, set_num, anchor=2):
x = np.array(new_a)[:, 0]
y = np.array(new_a)[:, 1]
if set_num == 9 or set_num == 7:
x_diff = new_a[0][0] - ref_a[anchor][0]
y_diff = new_a[0][1] - ref_a[anchor][1]
else:
x_diff = new_a[anchor][0] - ref_a[anchor][0]
y_diff = new_a[anchor][1] - ref_a[anchor][1]
x -= x_diff
y -= y_diff
new_a_m = []
for i in range(len(new_a)):
new_a_m.append((x[i], y[i]))
return new_a_m
# -------------------------------------------------------
# define and draw image
# -------------------------------------------------------
def define_image(polygon_points, set_num):
img_size = 1024
width = 10
# draw main contour
if polygon_points == 0:
open_lines = []
closed_lines = []
number_of_flankers = 25
else:
inside = False
while not inside:
points_contour_open, points_contour_closed = make_contour_both(
polygon_points=polygon_points
)
if set_num == 9 or set_num == 7:
points_contour_closed = make_contour(polygon_points=polygon_points)
points_contour_open, anchor = remove_one_line(points_contour_closed)
if set_num == 7:
points_contour_open = points_contour_open[0:-1]
else:
anchor = 2
# shift to position
pos = [np.random.uniform(0, img_size), np.random.uniform(0, img_size)]
points_contour_closed = shift_to_pos(points_contour_closed, pos)
points_contour_open = shift_to_pos2(
points_contour_open,
points_contour_closed,
set_num=set_num,
anchor=anchor,
)
inside = test_inside(img_size, points_contour_open, width) and test_inside(
img_size, points_contour_closed, width
)
open_lines = []
closed_lines = []
open_lines.append(points_contour_open)
closed_lines.append(points_contour_closed)
if set_num == 6:
number_of_flankers = 0
else:
number_of_flankers = np.random.randint(10, 26)
# closed_lines_main is introduced to reduce computation time:
# When checking for overlap (in the next for loop) closed_lines_main is used to avoid to loop over all flankers twice.
closed_lines_main = np.copy(closed_lines)
# draw flanker
for flanker_num in range(number_of_flankers):
number_of_segments = np.random.randint(1, 3)
r = np.random.uniform(128, 256)
if set_num == 24:
r2 = np.random.uniform(32, 64)
number_of_segments = 2
else:
r2 = np.copy(r)
visible = False
overlap = True
while overlap or not visible:
pos = (np.random.uniform(0, img_size), np.random.uniform(0, img_size))
new_line = make_flanker(
r=r, r2=r2, pos=pos, number_of_segments=number_of_segments
)
visible = test_visible(img_size, new_line)
if (
visible
): # to save time, only test for overlap if visible (if not visible flanker will be resampled anyway)
overlap = test_overlap_all_lines(
closed_lines_main, new_line, dist
) or test_overlap_all_lines(open_lines, new_line, dist)
# inside = test_inside(img_size, new_line, width)
open_lines.append(new_line)
closed_lines.append(new_line)
return open_lines, closed_lines
def draw_image(all_lines, set_num):
img_size = 1024
if set_num == 2:
width = 5
elif set_num == 3:
width = 18
elif set_num == 8:
width = 30
else:
width = 10
img = Image.new("RGB", [img_size, img_size], "white")
draw = ImageDraw.Draw(img)
c = 0
for line in all_lines:
c += 1
num_lines = range(len(line) - 1)
color = (0, 0, 0)
for i in num_lines:
draw.line(line[i] + line[i + 1], width=width, fill=color)
for point in line: # damit es keine Löcher an den Ecken gibt
draw.ellipse(
(
point[0] - width / 2,
point[1] - width / 2,
point[0] + width / 2,
point[1] + width / 2,
),
fill=color,
)
scale = 4
img_anti = img.resize((img_size // scale, img_size // scale), Image.ANTIALIAS)
return img_anti
def draw_image_bwb(all_lines):
img_size = 1024
width = 18
widthw = 6
img = Image.new("RGB", [img_size, img_size], "white")
draw = ImageDraw.Draw(img)
c = 0
for line in all_lines:
c += 1
num_lines = range(len(line) - 1)
color = (0, 0, 0)
for i in num_lines:
draw.line(line[i] + line[i + 1], width=width, fill=color)
for point in line: # damit es keine Löcher an den Ecken gibt
draw.ellipse(
(
point[0] - width / 2,
point[1] - width / 2,
point[0] + width / 2,
point[1] + width / 2,
),
fill=color,
)
c = 0
for line in all_lines:
c += 1
num_lines = range(len(line) - 1)
color = (255, 255, 255)
for i in num_lines:
draw.line(line[i] + line[i + 1], width=widthw, fill=color)
for point in line: # damit es keine Löcher an den Ecken gibt
draw.ellipse(
(
point[0] - widthw / 2,
point[1] - widthw / 2,
point[0] + widthw / 2,
point[1] + widthw / 2,
),
fill=color,
)
scale = 4
img_anti = img.resize((img_size // scale, img_size // scale), Image.ANTIALIAS)
return img_anti
def set_seed_rep(method):
"""
Set seed and number of repetitions. Both depend on the type of the data set. For example a different seed is used
for test and training set
:param method: type of the data set [string]
:return: number of [int]
"""
if method.endswith("val"):
np.random.seed(0)
num_rep = 200
elif method.endswith("test"):
np.random.seed(1)
num_rep = 400
elif method.endswith("train"):
np.random.seed(2)
num_rep = 2000
return num_rep
# -------------------------------------------------------
# main function that combines the calculations
# -------------------------------------------------------
def set1_otf(closedness):
"""
This function can be used to generate a training image of set1 on-the-fly
:param closedness: label, either 0 (closed) or 1 (open) [int]
:return: line-drawing
"""
polygon_points = np.random.randint(3, 10)
open_lines, closed_lines = define_image(polygon_points, 1)
if closedness == 0:
return draw_image(closed_lines, 1)
elif closedness == 1:
return draw_image(open_lines, 1)
def make_full_dataset(top_dir, set_num, debug):
"""
generate and save the full data set for a specified variation
:param top_dir: where to save the images [string]
:param set_num: number that specifies the variation [one of: 1-13, 24, 25]
:param debug: generate only seven images [bool]
"""
save = True
stim_folder = top_dir + "set" + str(set_num) + "/linedrawing/"
if set_num == 1:
methods = ["val", "test", "train"] # remove "train" from list to not generate training set
else:
methods = ["test"]
for method in methods:
print(method)
num_rep = set_seed_rep(method)
if debug:
num_rep = 1
number = 0 # nummer des letzten vorhandenen bildes (0 sonst)
# make folder
new_folder = os.path.join(stim_folder, method)
print(new_folder)
if not Path(new_folder).is_dir():
print("make new folder")
os.makedirs(new_folder)
if save:
with open(os.path.join(stim_folder, method, method + ".csv"), "a") as f:
writer = csv.writer(f)
writer.writerow(["image_name", "points", "closed_contour"])
# make folders: open, closed
new_folder = os.path.join(stim_folder, method, "open")
if not Path(new_folder).is_dir():
os.mkdir(new_folder)
new_folder = os.path.join(stim_folder, method, "closed")
if not Path(new_folder).is_dir():
os.mkdir(new_folder)
for rep in range(num_rep):
print(rep, end="", flush=True)
if set_num == 10:
polygon_pointss = [3, 3, 3, 3, 3, 3, 3]
elif set_num == 11:
polygon_pointss = [6, 6, 6, 6, 6, 6, 6]
elif set_num == 12:
polygon_pointss = [9, 9, 9, 9, 9, 9, 9]
elif set_num == 13:
polygon_pointss = [12, 12, 12, 12, 12, 12, 12]
else:
polygon_pointss = [3, 4, 5, 6, 7, 8, 9]
for polygon_points in polygon_pointss:
open_lines, closed_lines = define_image(polygon_points, set_num)
for closed_contour in [True, False]:
number += 1
if closed_contour:
if set_num == 5:
res = draw_image_bwb(closed_lines)
else:
res = draw_image(closed_lines, set_num)
else:
if set_num == 5:
res = draw_image_bwb(open_lines)
else:
res = draw_image(open_lines, set_num)
if set_num == 25:
res = res.convert("1")
res = res.convert("RGB")
if save:
filename = method + str(number)
with open(
os.path.join(stim_folder, method, method + ".csv"), "a"
) as f:
writer = csv.writer(f)
writer.writerow(
[filename, str(polygon_points), str(closed_contour)]
)
if closed_contour:
folder = "closed"
else:
folder = "open"
res.save(
os.path.join(stim_folder, method, folder, filename + ".png")
)
|
# Author: Arnaud Dartois
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import time
import os
import sickbeard
import ftplib as ftp
import glob
from sickbeard import logger
class SentFTPChecker():
def run(self):
if sickbeard.USE_TORRENT_FTP:
# upload all torrent file to remote FTP
logger.log("Sending torrent file to FTP", logger.DEBUG)
self._sendToFTP("*.torrent", sickbeard.TORRENT_DIR)
def _sendToFTP(self, filter, dir):
"""
Send all of the specified filtered files (eg "*.torrent") to the appropriate FTP.
"""
iCount = self.count_files(dir, filter)
logger.log(u"Files Found (" + filter + "): " + str(iCount), logger.DEBUG)
if (iCount>0):
MyFTP = ftp.FTP()
logger.log(u"Initializing FTP Session", logger.DEBUG)
MyFTP.connect(sickbeard.FTP_HOST, sickbeard.FTP_PORT, sickbeard.FTP_TIMEOUT)
# Connect to the FTP server
MyFTP.login(sickbeard.FTP_LOGIN, sickbeard.FTP_PASSWORD, '')
# Assign passive mode
logger.log(u"Assign Session Passive Mode", logger.DEBUG)
MyFTP.set_pasv(sickbeard.FTP_PASSIVE)
# change remote directory
try:
logger.log(u"Set Remote Directory : %s" % sickbeard.FTP_DIR, logger.DEBUG)
MyFTP.cwd(sickbeard.FTP_DIR)
except Exception, e:
logger.log(u"Change directory failed :" + e.message, logger.ERROR)
for fileName in glob.glob(os.path.join(dir,filter)):
file_handler = open(fileName, 'rb')
# Send the file
logger.log(u"Send local file : " + fileName, logger.DEBUG)
MyFTP.set_debuglevel(1)
MyFTP.storbinary('STOR %s' % os.path.basename(fileName), file_handler)
MyFTP.set_debuglevel(0)
file_handler.close()
# delete local file after uploading
logger.log(u"Deleting local file : " + fileName, logger.DEBUG)
os.remove(fileName)
# Close FTP session
logger.log(u"Close FTP Session", logger.DEBUG)
MyFTP.quit()
logger.log(u"It's working ... hop a beer !", logger.DEBUG)
else:
logger.log(u"No local files found.", logger.DEBUG)
def count_files(self, path, filter):
list_dir = []
list_dir = os.listdir(path)
count = 0
for file in glob.glob(os.path.join(path,filter)):
count += 1
return count
|
# -*- coding: utf-8 -*-
"""
Created on Fri May 1 02:09:11 2020
@author: max
"""
import requests
import re
def get_m3u8(URL):
response = requests.get('http://www.99kubo.tv'+URL)
payload=response.text
try:
video_link=re.findall(r"http.{1,100}index.m3u8", payload)[0]
video_link=re.sub(r"\\","",video_link)
video_link=re.sub(r"/index.m3u8","/1000k/hls",video_link)
video_link=video_link+"/index.m3u8"
except:
video_link='not exit'
'''
m3u8_file = requests.get(video_link+"/index.m3u8", allow_redirects=True)
print(m3u8_file.content[0:200])
dot=re.compile(b',\n')
new_m3u8_file=re.sub(dot,bytes(',\n'+video_link,"utf-8"),m3u8_file.content)
open('index.m3u8', 'wb').write(new_m3u8_file)
'''
return video_link
if __name__=="__main__":
URL='http://www.99kubo.tv/vod-play-id-151292-sid-0-pid-1-ck.html'
g=get_m3u8(URL)
print(g)
|
# -*- coding: utf-8 -*-
import math
import numpy as np
import pandas as pd
from functools import reduce
from operator import mul
from sys import float_info as sflt
def combination(n:int, r:int):
"""https://stackoverflow.com/questions/4941753/is-there-a-math-ncr-function-in-python"""
if r < 0:
return None
r = min(n, n - r)
if r == 0:
return 1
numerator = reduce(mul, range(n, n - r, -1), 1)
denominator = reduce(mul, range(1, r + 1), 1)
return numerator // denominator
def dropna(df:pd.DataFrame):
"""Drop rows with 'Nan' values"""
df = df[df < math.exp(709)] # big number
df = df[df != 0.0]
df = df.dropna()
return df
def ema_depreciated(series:pd.Series, periods:int):
"""Modified EMA with an SMA
Rolled into ema when kwargs['presma'] = True
"""
series = verify_series(series)
sma = series.rolling(window=periods, min_periods=periods).mean()[:periods]
rest = series[periods:]
return pd.concat([sma, rest]).ewm(span=periods, adjust=False).mean()
def fibonacci(n:int, weighted=True, **kwargs):
"""Fibonacci Sequence as a numpy array"""
zero = kwargs.pop('zero', True)
if zero:
a, b = 0, 1
else:
n -= 1
a, b = 1, 1
result = np.array([a])
for i in range(0, n):
a, b = b, a + b
result = np.append(result, a)
if weighted:
fib_sum = np.sum(result)
return result / fib_sum
else:
return result
def get_drift(x:int):
"""Returns an int if not zero, otherwise defaults to one."""
return int(x) if x and x != 0 else 1
def get_offset(x:int):
"""Returns an int, otherwise defaults to zero."""
return int(x) if x else 0
def multichoose(n:int, r:int):
"""https://en.wikipedia.org/wiki/Binomial_coefficient"""
return combination(n + r - 1, r)
def pascals_triangle(n:int, weighted=True, **kwargs):
"""Pascal's Triangle
Returns a numpy array of the nth row of Pascal's Triangle.
"""
if n < 0: return None
sink = kwargs.pop('all', False)
# Calculation
triangle = np.array([combination(n, i) for i in range(0, n + 1)])
# Variations and Properties
max_ = np.max(triangle)
inverted = max_ - triangle
triangle_sum = np.sum(triangle)
triangle_avg = np.average(triangle)
weights = triangle / triangle_sum
inv_weights = inverted / triangle_sum
if sink:
return triangle, triangle_sum, triangle_avg, inverted, weights, inv_weights, triangle_avg
if weighted:
return weights
else:
return triangle
def signed_series(series:pd.Series, initial:int = None):
"""Returns a Signed Series with or without an initial value"""
series = verify_series(series)
sign = series.diff(1)
sign[sign > 0] = 1
sign[sign < 0] = -1
sign.iloc[0] = initial
return sign
def verify_series(series:pd.Series):
"""If a Pandas Series return it."""
if series is not None and isinstance(series, pd.core.series.Series):
return series
def zero(x):
"""If the value is close to zero, then return zero. Otherwise return the value."""
return 0 if -sflt.epsilon < x and x < sflt.epsilon else x
|
from workspace.utils import shortest_id
def test_shortest_id():
assert shortest_id('apple', ['orange', 'banana']) == 'a'
assert shortest_id('apple', ['apricot', 'banana']) == 'app'
assert shortest_id('apple', ['apple seed', 'banana']) == 'apple'
assert shortest_id('apple', ['apple', 'banana']) == 'a'
|
#!/usr/bin/env python3
import socket
import sys
import os
import time
import re
HOST = os.environ["FTP_HOST"]
PORT = int(os.environ["FTP_PORT"])
NAME = os.environ["NAME"]
PASS = os.environ["PASS"]
def send_text(s, text):
print('[CLIENT] Sending: {}'.format(text))
s.sendall('{}\r\n'.format(text).encode())
def receive_data(s):
buff = bytearray()
data = s.recv(1024)
while data and len(data) > 0:
buff.extend(data)
data = s.recv(1024)
print('[DATA] Received: {}'.format(buff))
return buff
def receive_text(s):
buff = bytearray()
text = None
while text is None:
data = s.recv(1024)
if data and len(data) > 0:
buff.extend(data)
if buff.decode(errors='ignore').find('\r\n') >= 0:
text = buff.decode()
print('[CLIENT] Received: {}'.format(text))
return text
def check_ok(text, code):
if not text.startswith(str(code)):
raise Exception('Response code not OK. Expected: {}. Text Received: {}'.format(code, text))
def parse_port(response):
m = re.search('^.*\(\d+,\d+,\d+,\d+,(\d+),(\d+)\).*$', response)
if m is None or len(m.groups()) != 2:
raise Exception('Response to PASV does not include connection port. Text Received: {}. {}'.format(response, m))
return int(m.group(1))*256 + int(m.group(2))
def do_list_sequence(s, ip, list_cmd):
send_text(s, 'PASV')
response = receive_text(s)
check_ok(response, 227)
passive_port = parse_port(response)
print('Passive port: {}'.format(passive_port))
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s_data:
send_text(s, list_cmd)
s_data.connect((ip, passive_port))
print('[DATA] Socket connected')
response = receive_text(s)
response_data = receive_data(s_data)
check_ok(response, 150)
response = receive_text(s)
check_ok(response, 226)
def main():
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
print('[CLIENT] Socket created')
ip = socket.gethostbyname(HOST)
s.connect((ip, PORT))
print('[CLIENT] Socket connected')
response = receive_text(s)
check_ok(response, 220)
send_text(s, 'USER {}'.format(NAME))
response = receive_text(s)
check_ok(response, 331)
send_text(s, 'PASS {}'.format(PASS))
response = receive_text(s)
check_ok(response, 230)
do_list_sequence(s, ip, 'LIST')
do_list_sequence(s, ip, 'LIST /home')
do_list_sequence(s, ip, 'LIST /home/user1')
except Exception as e:
print('[CLIENT] Error detected. Error: {}'.format(e))
if __name__ == '__main__':
main()
|
import string
class Base():
feature_bare = string.Template(
"""
Feature: $component
"""
)
feature_base = string.Template(
"""
Feature: $component
$scenario
"""
)
class Scenarios():
stride = {
'spoofing': string.Template(
"""
Scenario: Spoofing
Given $process causes data to flow from $source in $sourceZone to $destination in $destinationZone
When $source attempts to impersonate something or someone else related to $process
Then implement and validate digital identity by ...
# Mitigation details
# https://github.com/OWASP/ASVS/blob/master/4.0/en/0x11-V2-Authentication.md
# https://github.com/OWASP/ASVS/blob/master/4.0/en/0x12-V3-Session-management.md
# https://owasp.org/www-project-proactive-controls/v3/en/c6-digital-identity
"""
),
'tampering': string.Template(
"""
Scenario: Tampering
Given $process causes data to flow from less trusted $source in $sourceZone to more trusted $destination in $destinationZone
When $source modifies or otherwise tampers with data related to $process
Then treat all input as malcious and handle it safely by ...
# Mitigation details
# https://github.com/OWASP/ASVS/blob/master/4.0/en/0x18-V10-Malicious.md
"""
),
'repudiation': string.Template(
"""
Scenario: Repudiation
Given $process has spoofing and tampering threats
When $source claims to not have taken an action related to $process
Then securely log all actions by ...
# Mitigation details
# https://github.com/OWASP/ASVS/blob/master/4.0/en/0x15-V7-Error-Logging.md
"""
),
'informationDisclosure': string.Template(
"""
Scenario: Information Disclosure
Given $process causes data to flow from a more trusted $source in $sourceZone to a less trusted $destination in $destinationZone
When $source attempts to gain access to information it is not authorized to see related to $process
Then ensure data is protected everywhere by ...
# Mitigation details
# https://github.com/OWASP/ASVS/blob/master/4.0/en/0x14-V6-Cryptography.md
# https://github.com/OWASP/ASVS/blob/master/4.0/en/0x16-V8-Data-Protection.md
"""
),
'denialOfService': string.Template(
"""
Scenario: Denial of Service
Given $process causes data to originate from $source outside of our control in $sourceZone
When $source attempts to deny or degrade service related to $process
Then ensure service resources are protected and all errors are handled by ...
# Mitigation details
# https://github.com/OWASP/ASVS/blob/master/4.0/en/0x19-V11-BusLogic.md
# https://github.com/OWASP/ASVS/blob/master/4.0/en/0x20-V12-Files-Resources.md
"""
),
'elevationOfPrivilege': string.Template(
"""
Scenario: Elevation of Privilege
Given $process causes data to flow from less trusted $source in $sourceZone to more trusted $destination in $destinationZone
When $source attempts to gain addition capabilities without authorization related to $process
Then ensure acces control is enforced by ...
# Mitigation details
# https://github.com/OWASP/ASVS/blob/master/4.0/en/0x12-V4-Access-Control.md
"""
)
}
|
import pytest
import ckan.plugins.toolkit as tk
import ckan.tests.factories as factories
from ckan import model as model
from ckanext.hdx_pages.tests import USER, SYSADMIN, ORG, LOCATION
_get_action = tk.get_action
NotAuthorized = tk.NotAuthorized
@pytest.fixture()
def setup_user_data():
factories.User(name=USER, email='some_user@hdx.hdxtest.org')
factories.User(name=SYSADMIN, email='some_sysadmin@hdx.hdxtest.org', sysadmin=True)
syadmin_obj = model.User.get('some_sysadmin@hdx.hdxtest.org')
syadmin_obj.apikey = 'SYSADMIN_API_KEY'
model.Session.commit()
user_obj = model.User.get('some_user@hdx.hdxtest.org')
user_obj.apikey = 'USER_API_KEY'
model.Session.commit()
group = factories.Group(name=LOCATION)
factories.Organization(
name=ORG,
title='ORG NAME FOR PAGES',
users=[
{'name': USER, 'capacity': 'editor'},
],
hdx_org_type='donor',
org_url='https://hdx.hdxtest.org/'
)
@pytest.fixture(scope='module')
def keep_db_tables_on_clean():
model.repo.tables_created_and_initialised = True
|
"""
:mod:`optimal_pytorch` is a package implementing various coin-betting algorithms,
and test functions for optimization.
"""
__version__ = "0.1.0"
|
'''
Created on 10/03/2015
@author: Steven
Module for routines and _frameworks that intelligently integrate the real-space
correlation function
'''
import numpy as np
from scipy.interpolate import InterpolatedUnivariateSpline as _spline
from scipy.integrate import simps
from .halo_model import HaloModel
from hmf._cache import cached_quantity, parameter
from .halo_exclusion import dblsimps
from hmf.cosmo import Cosmology as csm
import warnings
class ProjectedCF(HaloModel):
def __init__(self, rp_min=0.01, rp_max=50.0, rp_num=30, rp_log=True, proj_limit=None, **kwargs):
# Set default rnum
if "rnum" not in kwargs:
kwargs['rnum'] = 5*rp_num
super(ProjectedCF, self).__init__(**kwargs)
self.proj_limit = proj_limit
self.rp_min = rp_min
self.rp_max = rp_max
self.rp_num = rp_num
self.rp_log = rp_log
@parameter("switch")
def rp_min(self, val):
return val
@parameter("option")
def rp_log(self, val):
return bool(val)
@parameter("res")
def rp_max(self, val):
return val
@parameter("res")
def rp_num(self, val):
if val < 0:
raise ValueError("rp_num must be > 0")
return int(val)
@parameter("switch")
def proj_limit(self, val):
return val
@cached_quantity
def rp(self):
if type(self.rp_min) == list or type(self.rp_min) == np.ndarray:
rp = np.array(self.rp_min)
else:
if self.rp_log:
rp = np.logspace(np.log10(self.rp_min), np.log10(self.rp_max), self.rp_num)
else:
rp = np.linspace(self.rp_min, self.rp_max, self.rp_num)
return rp
@cached_quantity
def rlim(self):
if self.proj_limit is None:
rlim = max(80.0, 5 * self.rp.max())
else:
rlim = self.proj_limit
return rlim
@cached_quantity
def r(self):
return np.logspace(np.log10(self.rp.min()), np.log10(self.rlim), self.rnum)
@cached_quantity
def projected_corr_gal(self):
"""
Projected correlation function w(r_p).
From Beutler 2011, eq 6.
To integrate perform a substitution y = x - r_p.
"""
return projected_corr_gal(self.r, self.corr_gg, self.rlim, self.rp)
def projected_corr_gal(r, xir, rlim, rp_out=None):
"""
Projected correlation function w(r_p).
From Beutler 2011, eq 6.
To integrate, we perform a substitution y = x - r_p.
Parameters
----------
r : float array
Array of scales, in [Mpc/h]
xir : float array
Array of xi(r), unitless
"""
if rp_out is None:
rp_out = r
lnr = np.log(r)
lnxi = np.log(xir)
p = np.zeros_like(rp_out)
fit = _spline(r, xir, k=3) # [self.corr_gal > 0] maybe?
f_peak = 0.01
a = 0
for i, rp in enumerate(rp_out):
if a != 1.3 and i < len(r) - 1:
# Get log slope at rp
ydiff = (lnxi[i + 1] - lnxi[i]) / (lnr[i + 1] - lnr[i])
# if the slope is flatter than 1.3, it will converge faster, but to make sure, we cut at 1.3
a = max(1.3, -ydiff)
theta = _get_theta(a)
min_y = theta * f_peak ** 2 * rp
# Get the upper limit for this rp
ylim = rlim - rp
# Set the y vector for this rp
y = np.logspace(np.log(min_y), np.log(ylim), 1000, base=np.e)
# Integrate
integ_corr = fit(y + rp)
integrand = (y + rp) * integ_corr / np.sqrt((y + 2 * rp) * y)
p[i] = simps(integrand, y) * 2
return p
def _get_theta(a):
theta = 2 ** (1 + 2 * a) * (7 - 2 * a ** 3 + 3 * np.sqrt(5 - 8 * a + 4 * a ** 2) + a ** 2 * (9 + np.sqrt(5 - 8 * a + 4 * a ** 2)) -
a * (13 + 3 * np.sqrt(5 - 8 * a + 4 * a ** 2))) * ((1 + np.sqrt(5 - 8 * a + 4 * a ** 2)) / (a - 1)) ** (-2 * a)
theta /= (a - 1) ** 2 * (-1 + 2 * a + np.sqrt(5 - 8 * a + 4 * a ** 2))
return theta
def flat_z_dist(zmin,zmax):
def ret(z):
z = np.atleast_1d(z)
return np.where(np.logical_and(z>=zmin,z<=zmax),1./(zmax-zmin),0)
return ret
def dxdz(z,cosmo=csm().cosmo):
"Derivative of comoving distance with redshift [Mpc/h]"
dh = cosmo.hubble_distance * cosmo.h
return dh.value/cosmo.efunc(z)
class AngularCF(HaloModel):
"""
Framework extension to angular correlation functions.
Parameters
----------
p1 : callable, optional
The redshift distribution of the sample. This needs not
be normalised to 1, as this will occur internally. May be
either a function of radial distance [Mpc/h] or redshift.
If a function of radial distance, `p_of_z` must be set to
False. Default is a flat distribution in redshift.
p2 : callable, optional
See `p1`. This can optionally be a different function against
which to cross-correlate. By default is equivalent to `p1`.
theta_min, theta_max : float, optional
min,max angular separations [Rad]
theta_num : int, optional
Number of steps in angular separation
theta_log : bool, optional
Whether to use logspace for theta values
zmin, zmax : float, optional
The redshift limits of the sample distribution. Note that
this is in redshit, regardless of the value of `p_of_z`.
znum : int, optional
Number of steps in redshift grid.
logu_min, logu_max : float, optional
min,max of the log10 of radial separation grid [Mpc/h]. Must be large
enough to let the integral over the 3D correlation function to converge.
unum : int, optional
Number of steps in the u grid.
check_p_norm : bool, optional
If False, cancels checking the normalisation of `p1` and `p2`.
p_of_z : bool, optional
Whether `p1` and `p2` are functions of redshift.
kwargs : unpacked-dict
Any keyword arguments passed down to :class:`halomod.HaloModel`.
"""
def __init__(self, p1=None, p2=None,
theta_min=1e-3 * np.pi/180.0, theta_max=np.pi/180.0, theta_num=30, theta_log=True,
zmin=0.2,zmax=0.4,znum=100,
logu_min=-4,logu_max=2.3,unum=100,check_p_norm=True, p_of_z=True,
**kwargs):
super(AngularCF, self).__init__(**kwargs)
if self.z < zmin or self.z>zmax:
warnings.warn("Your specified redshift (z=%s) is not within your selection function, z=(%s,%s)"%(self.z,zmin,zmax))
if p1 is None:
p1 = flat_z_dist(zmin,zmax)
self.p1 = p1
self.p2 = p2
self.zmin = zmin
self.zmax = zmax
self.znum = znum
self.logu_min = logu_min
self.logu_max = logu_max
self.unum = unum
self.check_p_norm = check_p_norm
self.p_of_z = p_of_z
self.theta_min = theta_min
self.theta_max = theta_max
self.theta_num = theta_num
self.theta_log = theta_log
@parameter("param")
def p1(self, val):
return val
@parameter("param")
def p2(self, val):
return val
@parameter("model")
def p_of_z(self,val):
return val
@parameter("res")
def theta_min(self, val):
if val < 0:
raise ValueError("theta_min must be > 0")
return val
@parameter("res")
def theta_max(self, val):
if val > 180.0:
raise ValueError("theta_max must be < 180.0")
return val
@parameter("res")
def theta_num(self, val):
return val
@parameter("res")
def theta_log(self, val):
return val
@parameter("param")
def zmin(self, val):
return val
@parameter("param")
def zmax(self, val):
return val
@parameter("res")
def znum(self, val):
return val
@parameter("res")
def logu_min(self, val):
return val
@parameter("res")
def logu_max(self, val):
return val
@parameter("res")
def unum(self, val):
return val
@parameter("option")
def check_p_norm(self, val):
return val
@cached_quantity
def zvec(self):
"""
Redshift distribution grid.
"""
return np.linspace(self.zmin,self.zmax,self.znum)
@cached_quantity
def uvec(self):
"Radial separation grid [Mpc/h]"
return np.logspace(self.logu_min,self.logu_max,self.unum)
@cached_quantity
def xvec(self):
"Radial distance grid (corresponds to zvec) [Mpc/h]"
return self.cosmo.comoving_distance(self.zvec).value
@cached_quantity
def theta(self):
"Angular separations, [Rad]"
if self.theta_min > self.theta_max:
raise ValueError("theta_min must be less than theta_max")
if self.theta_log:
return np.logspace(np.log10(self.theta_min), np.log10(self.theta_max), self.theta_num)
else:
return np.linspace(self.theta_min, self.theta_max, self.theta_num)
@cached_quantity
def r(self):
"Physical separation grid [Mpc/h]"
rmin = np.sqrt((10 ** self.logu_min) ** 2 + self.theta.min() ** 2 * self.xvec.min() ** 2)
rmax = np.sqrt((10 ** self.logu_max) ** 2 + self.theta.max() ** 2 * self.xvec.max() ** 2)
return np.logspace(np.log10(rmin), np.log10(rmax), self.rnum)
@cached_quantity
def angular_corr_gal(self):
"""
The angular correlation function w(theta).
From Blake+08, Eq. 33
"""
def xi(r):
s = _spline(self.r,self.corr_gg)
return s(r)
return angular_corr_gal(self.theta, xi, self.p1,
self.zmin, self.zmax, self.logu_min, self.logu_max,
znum=self.znum, unum=self.unum, p2=self.p2,
check_p_norm=self.check_p_norm, cosmo=self.cosmo,
p_of_z = self.p_of_z)
@cached_quantity
def angular_corr_matter(self):
"""
The angular correlation function w(theta).
From Blake+08, Eq. 33
"""
def xi(r):
s = _spline(self.r,self.corr_mm)
return s(r)
return angular_corr_gal(self.theta, xi, self.p1,
self.zmin, self.zmax, self.logu_min, self.logu_max,
znum=self.znum, unum=self.unum, p2=self.p2,
check_p_norm=self.check_p_norm, cosmo=self.cosmo,
p_of_z = self.p_of_z)
def _check_p(p,z):
if hasattr(p,"integral"):
integ = p.integral(z.min(),z.max())
else:
integ = simps(p(z),z)
if not np.isclose(integ,1.0,rtol=0.01):
print("WARNING: Filter function p(x) did not integrate to 1 (%s). Tentatively re-normalising."%integ)
return (lambda z: p(z)/integ)
else:
return p
def angular_corr_gal(theta, xi, p1, zmin, zmax, logu_min, logu_max,
znum=100, unum=100, p2=None, check_p_norm=True, cosmo=None,
p_of_z=True,
**xi_kw):
"""
Calculate the angular correlation function w(theta).
From Blake+08, Eq. 33. That is, this uses the Limber approximation.
This does not hold either for wide angles, or thin radial distributions.
Parameters
----------
theta : array_like
Angles at which to calculate the angular correlation. In radians.
xi : callable
A function of one variable: r [Mpc/h], which returns
the 3D correlation function at the scale r.
p1: callable
The redshift distribution of sources. Should integrate to 1 between
`logz_min` and `logz_max`. A callable function of a single variable, z.
zmin, zmax : float
The redshift limits of the sample distribution. Note that
this is in redshit, regardless of the value of `p_of_z`.
logu_min, logu_max : float
min,max of the log10 of radial separation grid [Mpc/h]. Must be large
enough to let the integral over the 3D correlation function to converge.
znum : int, optional
Number of steps in redshift grid.
unum : int, optional
Number of steps in the u grid.
p2 : callable, optional
The same as `p1`, but for a second, cross-correlating dataset. If not
provided, defaults to `p1` (i.e. auto-correlation).
check_p_norm : bool, optional
If False, cancels checking the normalisation of `p1` and `p2`.
p_of_z : bool, optional
Whether `p1` and `p2` are functions of redshift.
cosmo : `hmf.cosmo.Cosmology` instance, optional
A cosmology, used to generate comoving distance from redshift. Default
is the default cosmology of the `hmf` package.
xi_kw : unpacked-dict
Any arguments to `xi` other than r,z.
Returns
-------
wtheta : array_like
The angular correlation function corresponding to `theta`.
"""
if cosmo is None:
cosmo = csm().cosmo
# Arrays
u = np.logspace(logu_min, logu_max, unum)
dlnu = np.log(u[1]/u[0])
if p_of_z:
z = np.linspace(zmin,zmax,znum)
diff = z[1] - z[0]
x = (cosmo.comoving_distance(z)*cosmo.h).value
else:
xmin = (cosmo.comoving_distance(zmin)*cosmo.h).value
xmax = (cosmo.comoving_distance(zmax)*cosmo.h).value
x = np.linspace(xmin,xmax,znum)
diff = x[1] - x[0]
if check_p_norm:
p1 = _check_p(p1,z if p_of_z else x)
if p2 is None:
p2 = p1
elif check_p_norm:
p2 = _check_p(p2,z if p_of_z else x)
p_integ = p1(z)*p2(z) /dxdz(z,cosmo) if p_of_z else p1(x)*p2(x)
R = np.sqrt(np.add.outer(np.outer(theta**2 ,x**2),u**2)).flatten()
integrand = np.einsum("kij,i,j->kij", xi(R,**xi_kw).reshape((len(theta),len(x), len(u))), p_integ, u)
return 2*dblsimps(integrand,diff,dlnu)
|
import sys
import scipy
import numpy as np
import pyqtgraph.opengl as gl
from scipy import io
from skimage import measure
from PyQt4 import QtGui
# Create an PyQT4 application object.
app = QtGui.QApplication(sys.argv)
# Create a window object.
window = gl.GLViewWidget()
window.resize(500, 500)
window.setCameraPosition(distance=100)
window.setWindowTitle('pyqtgraph : GLIsosurface')
window.show()
# Read data from a mat file.
mat_file = io.loadmat('segment_myo4_frame30.mat')
myo3d = mat_file['myo3d']
# uniform_filter() is equivalent to smooth3() in matlab.
myo3d = scipy.ndimage.uniform_filter(myo3d, [5, 5, 20], mode='nearest')
# Using marching cubes algorithm to get a polygonal mesh of an isosurface
verts, faces = measure.marching_cubes(myo3d, 0.1)
meshdata = gl.MeshData(vertexes=verts, faces=faces)
mesh = gl.GLMeshItem(meshdata=meshdata, smooth=True, color=(1.0, 0.0, 0.0, 0.2), shader='balloon', glOptions='additive')
# Translation
[avgX, avgY, avgZ] = map(np.mean, zip(*verts))
mesh.translate(-avgX, -avgY, -avgZ)
window.addItem(mesh)
sys.exit(app.exec_())
|
import pandas as pd
passageiros = pd.read_csv('Passageiros.csv')
passageiros.head()
import seaborn as sns
import matplotlib as mpl
mpl.rcParams['figure.figsize'] = (10, 6)
mpl.rcParams['font.size'] = 22
sns.lineplot(x='tempo',y='passageiros', data=passageiros,label='dado_completo')
## Escalando os dados
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
sc.fit(passageiros)
dado_escalado = sc.transform(passageiros)
x=dado_escalado[:,0] #Features - Características - Tempo
y=dado_escalado[:,1] #Alvo - Número de passageiros
import matplotlib.pyplot as plt
sns.lineplot(x=x,y=y,label='dado_escalado')
plt.ylabel('Passageiros')
plt.xlabel('Data')
## Dividindo em treino e teste
tamanho_treino = int(len(passageiros)*0.9) #Pegando 90% dos dados para treino
tamanho_teste = len(passageiros)-tamanho_treino #O resto vamos reservar para teste
xtreino = x[0:tamanho_treino]
ytreino = y[0:tamanho_treino]
xteste = x[tamanho_treino:len(passageiros)]
yteste = y[tamanho_treino:len(passageiros)]
sns.lineplot(x=xtreino,y=ytreino,label='treino')
sns.lineplot(x=xteste,y=yteste,label='teste')
# Aula 2
## Regressão Linear
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
regressor = Sequential()
regressor.add(Dense(1, input_dim=1, kernel_initializer='Ones',
activation='linear',use_bias=False))
regressor.compile(loss='mean_squared_error',optimizer='adam')
regressor.summary()
regressor.fit(xtreino,ytreino)
y_predict= regressor.predict(xtreino) #Prevendo os dados de treino (o ajuste)
sns.lineplot(x=xtreino,y=ytreino,label='treino')
sns.lineplot(x=xtreino,y=y_predict[:,0],label='ajuste_treino')
d = {'tempo': xtreino, 'passageiros': y_predict[:,0]}
resultados = pd.DataFrame(data=d)
resultados
resultado_transf = sc.inverse_transform(resultados)
resultado_transf = pd.DataFrame(resultado_transf)
resultado_transf.columns = ['tempo','passageiros']
sns.lineplot(x="tempo",y="passageiros",data=passageiros)
sns.lineplot(x="tempo",y="passageiros",data=resultado_transf)
y_predict_teste= regressor.predict(xteste) #Prevendo os dados de teste(o futuro)
d = {'tempo': xteste, 'passageiros': y_predict_teste[:,0]}
resultados_teste = pd.DataFrame(data=d)
resultado_transf_teste = sc.inverse_transform(resultados_teste)
resultado_transf_teste = pd.DataFrame(resultado_transf_teste)
resultado_transf_teste.columns = ['tempo','passageiros']
sns.lineplot(x="tempo",y="passageiros",data=passageiros,label='dado_completo')
sns.lineplot(x="tempo",y="passageiros",data=resultado_transf,label='ajuste_treino')
sns.lineplot(x="tempo",y="passageiros",data=resultado_transf_teste,label='previsão')
## Regressão não-linear
regressor2 = Sequential()
regressor2.add(Dense(8, input_dim=1, kernel_initializer='random_uniform',
activation='sigmoid',use_bias=False))
regressor2.add(Dense(8, kernel_initializer='random_uniform',
activation='sigmoid',use_bias=False))
regressor2.add(Dense(1, kernel_initializer='random_uniform',
activation='linear',use_bias=False))
regressor2.compile(loss='mean_squared_error',optimizer='adam')
regressor2.summary()
regressor2.fit(xtreino,ytreino,epochs =500)
y_predict= regressor2.predict(xtreino) #Prevendo os dados de treino (o ajuste)
y_predict_teste= regressor2.predict(xteste) #Prevendo os dados de teste(o futuro)
sns.lineplot(x=xtreino,y=ytreino,label='treino')
sns.lineplot(x=xteste,y=yteste,label='teste')
sns.lineplot(x=xtreino,y=y_predict[:,0],label='ajuste_treino')
sns.lineplot(x=xteste,y=y_predict_teste[:,0],label='previsão')
# Aula 3
## Alterando a forma como passamos os dados
#Agora x e y vão valores diferentes. X vai conter o número de passageiros em um tempo anterior e y vai conter o número de passageiros em t+1, por exemplo.
vetor = pd.DataFrame(ytreino)[0]
import numpy as np
def separa_dados(vetor,n_passos):
"""Entrada: vetor: número de passageiros
n_passos: número de passos no regressor
Saída:
X_novo: Array 2D
y_novo: Array 1D - Nosso alvo
"""
X_novo, y_novo = [], []
for i in range(n_passos,vetor.shape[0]):
X_novo.append(list(vetor.loc[i-n_passos:i-1]))
y_novo.append(vetor.loc[i])
X_novo, y_novo = np.array(X_novo), np.array(y_novo)
return X_novo, y_novo
xtreino_novo, ytreino_novo = separa_dados(vetor,1)
print(xtreino_novo[0:5]) #X
print(ytreino_novo[0:5]) #y
## Agora vamos separar o teste
vetor2 = pd.DataFrame(yteste)[0]
xteste_novo, yteste_novo = separa_dados(vetor2,1)
## Voltando para as redes neurais
regressor3 = Sequential()
regressor3.add(Dense(8, input_dim=1, kernel_initializer='ones', activation='linear',use_bias=False))
regressor3.add(Dense(64, kernel_initializer='random_uniform', activation='sigmoid',use_bias=False))
regressor3.add(Dense(1, kernel_initializer='random_uniform', activation='linear',use_bias=False))
regressor3.compile(loss='mean_squared_error',optimizer='adam')
regressor3.summary()
regressor3.fit(xtreino_novo,ytreino_novo,epochs =100)
y_predict_novo = regressor3.predict(xtreino_novo)
sns.lineplot(x='tempo',y=ytreino_novo,data=passageiros[1:129],label='treino')
sns.lineplot(x='tempo',y=pd.DataFrame(y_predict_novo)[0],data=passageiros[1:129],label='ajuste_treino')
y_predict_teste_novo = regressor3.predict(xteste_novo)
resultado = pd.DataFrame(y_predict_teste_novo)[0]
sns.lineplot(x='tempo',y=ytreino_novo,data=passageiros[1:129],label='treino')
sns.lineplot(x='tempo',y=pd.DataFrame(y_predict_novo)[0],data=passageiros[1:129],label='ajuste_treino')
sns.lineplot(x='tempo',y=yteste_novo,data=passageiros[130:144],label='teste')
sns.lineplot(x='tempo',y=resultado.values,data=passageiros[130:144],label='previsão')
## Janelas
xtreino_novo, ytreino_novo = separa_dados(vetor,4)
xtreino_novo[0:5] #X
ytreino_novo[0:5] #y
xteste_novo, yteste_novo = separa_dados(vetor2,4)
regressor4 = Sequential()
regressor4.add(Dense(8, input_dim=4, kernel_initializer='random_uniform', activation='linear',use_bias=False)) #relu
regressor4.add(Dense(64, kernel_initializer='random_uniform', activation='sigmoid',use_bias=False)) #relu
regressor4.add(Dense(1, kernel_initializer='random_uniform', activation='linear',use_bias=False))
regressor4.compile(loss='mean_squared_error',optimizer='adam')
regressor4.summary()
regressor4.fit(xtreino_novo,ytreino_novo,epochs =300)
y_predict_teste_novo = regressor4.predict(xteste_novo)
resultado = pd.DataFrame(y_predict_teste_novo)[0]
sns.lineplot(x='tempo',y=ytreino_novo,data=passageiros[4:129],label='treino')
sns.lineplot(x='tempo',y=pd.DataFrame(y_predict_novo)[0],data=passageiros[4:129],label='ajuste_treino')
sns.lineplot(x='tempo',y=yteste_novo,data=passageiros[133:144],label='teste')
sns.lineplot(x='tempo',y=resultado.values,data=passageiros[133:144],label='previsão')
#Nova base de dados
bike = pd.read_csv('bicicletas.csv')
bike.head()
bike['datas'] = pd.to_datetime(bike['datas'])
sns.lineplot(x='datas',y='contagem', data=bike)
plt.xticks(rotation=70)
## Escalando os dados
sc2 = StandardScaler()
sc2.fit(bike['contagem'].values.reshape(-1,1))
y = sc2.transform(bike['contagem'].values.reshape(-1,1))
## Dividindo em treino e teste
tamanho_treino = int(len(bike)*0.9) #Pegando 90% dos dados para treino
tamanho_teste = len(bike)-tamanho_treino #O resto vamos reservar para teste
ytreino = y[0:tamanho_treino]
yteste = y[tamanho_treino:len(bike)]
sns.lineplot(x='datas',y=ytreino[:,0],data=bike[0:tamanho_treino],label='treino')
sns.lineplot(x='datas',y=yteste[:,0], data=bike[tamanho_treino:len(bike)],label='teste')
plt.xticks(rotation=70)
vetor = pd.DataFrame(ytreino)[0]
xtreino_novo, ytreino_novo = separa_dados(vetor,10)
print(xtreino_novo[0:5])
print(ytreino_novo[0:5])
vetor2 = pd.DataFrame(yteste)[0]
xteste_novo, yteste_novo = separa_dados(vetor2,10)
## O que a LSTM espera
# A entrada de redes recorrentes deve possuir a seguinte forma para a entrada (número de amostras, número de passos no tempo,
# e número de atributos por passo no tempo).
xtreino_novo = xtreino_novo.reshape((xtreino_novo.shape[0],xtreino_novo.shape[1],1))
print(xtreino_novo.shape)
xteste_novo = xteste_novo.reshape((xteste_novo.shape[0],xteste_novo.shape[1],1))
## Usando a LSTM
from tensorflow.keras.layers import LSTM
recorrente = Sequential()
recorrente.add(LSTM(128, input_shape=(xtreino_novo.shape[1],xtreino_novo.shape[2])
))
recorrente.add(Dense(units=1))
recorrente.compile(loss='mean_squared_error',optimizer='RMSProp')
recorrente.summary()
resultado = recorrente.fit(xtreino_novo,ytreino_novo,validation_data=(xteste_novo,yteste_novo),epochs=100)
y_ajustado = recorrente.predict(xtreino_novo)
sns.lineplot(x='datas',y=ytreino[:,0],data=bike[0:tamanho_treino],label='treino')
sns.lineplot(x='datas',y=y_ajustado[:,0],data=bike[0:15662],label='ajuste_treino')
plt.xticks(rotation=70)
y_predito = recorrente.predict(xteste_novo)
sns.lineplot(x='datas',y=yteste[:,0], data=bike[tamanho_treino:len(bike)],label='teste')
sns.lineplot(x='datas',y=y_predito[:,0], data=bike[tamanho_treino+10:len(bike)],marker='.',label='previsão')
plt.xticks(rotation=70)
from tensorflow.keras.layers import GRU
recorrente_g = Sequential()
recorrente_g.add(GRU(128, input_shape=(xtreino_novo.shape[1],xtreino_novo.shape[2])
))
recorrente_g.add(Dense(units=1))
recorrente_g.compile(loss='mean_squared_error',optimizer='RMSProp')
recorrente_g.summary()
resultado2 = recorrente_g.fit(xtreino_novo,ytreino_novo,
validation_data=(xteste_novo,yteste_novo),epochs=100)
y_predito2 = recorrente_g.predict(xteste_novo)
sns.lineplot(x='datas',y=yteste[:,0], data=bike[tamanho_treino:len(bike)])
sns.lineplot(x='datas',y=y_predito2[:,0], data=bike[tamanho_treino+10:len(bike)],marker='.')
plt.legend(['conhecido','estimado'])
plt.xticks(rotation=70)
print(resultado2.history.keys())
plt.plot(resultado.history['loss'])
plt.plot(resultado.history['val_loss'])
plt.legend(['treino','teste'])
plt.plot(resultado2.history['loss'])
plt.plot(resultado2.history['val_loss'])
plt.legend(['treino','teste'])
|
from flask import abort, g, request
from ...models import Admin
def admin_auth() -> None:
"""管理员身份认证"""
if request.endpoint.split('.')[-1] in ['login']:
return
token = request.headers.get('Authorization')
if token:
admin = Admin.get_by_token(token)
if admin:
g.admin = admin # g.admin
return
abort(401)
|
#
# @lc app=leetcode.cn id=1209 lang=python3
#
# [1209] design-bounded-blocking-queue
#
None
# @lc code=end
|
# Generated by Django 3.1.7 on 2021-06-25 10:58
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('product', '0005_auto_20210625_0451'),
]
operations = [
migrations.RenameField(
model_name='category',
old_name='category',
new_name='name',
),
migrations.RenameField(
model_name='product',
old_name='product',
new_name='name',
),
]
|
"""
File to capture environment variables and handle them as a variable.
"""
import os
STAGE = os.getenv('STAGE', None)
DEBUG = bool(os.getenv('DEBUG', False))
IS_OFFLINE = os.environ.get('IS_OFFLINE')
DYNAMO_CLIENTS_TABLE = os.environ.get('DYNAMO_CLIENTS_TABLE')
AWS_REGION = os.environ.get('AWS_REGION')
DYNAMO_LOCAL_URL = os.environ.get('DYNAMO_LOCAL_URL')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.