content stringlengths 5 1.05M |
|---|
try:
import simplejson as json
print "Using simplejson for faster json parsing"
except ImportError:
import json
import telemetryutils
import jydoop
import sys
python_map = map
def map(uid, line, context):
payload = json.loads(line)
try:
i = payload['info']
channel = i.get('appUpdateChannel', "too_old")
reason = i['reason']
buildDate = i['appBuildID'][:8]
except:
return
if (channel != 'nightly'
or not 'slowSQL' in payload
or buildDate > '20130330'
or not 'mainThread' in payload['slowSQL']):
return
mainThreadSQL = payload['slowSQL']['mainThread']
for (query, (count, time)) in mainThreadSQL.iteritems():
if not 'content-prefs.sqlite' in query:
continue
context.write(query, [count, time])
def reduce(key, values, context):
values = list(values)
out = values[0]
for i in range(1, len(values)):
inarray = values[i]
for y in range(0, len(inarray)):
out[y] += inarray[y]
context.write(key, out)
combine = reduce
setupjob = telemetryutils.setupjob
def output(path, results):
f = open(path, 'w')
for k,v in results:
[count, time] = v
f.write("%s\t%s\t%s\n" % (time,count,k))
|
import pytest
from stix2.hashes import Hash, check_hash, infer_hash_algorithm
@pytest.mark.parametrize(
"hash_name, expected_alg", [
("md5", Hash.MD5),
("md6", Hash.MD6),
("ripemd160", Hash.RIPEMD160),
("sha1", Hash.SHA1),
("sha224", Hash.SHA224),
("sha256", Hash.SHA256),
("sha384", Hash.SHA384),
("sha512", Hash.SHA512),
("sha3224", Hash.SHA3224),
("sha3256", Hash.SHA3256),
("sha3384", Hash.SHA3384),
("sha3512", Hash.SHA3512),
("ssdeep", Hash.SSDEEP),
("whirlpool", Hash.WHIRLPOOL),
("tlsh", Hash.TLSH),
("xxxx", None),
],
)
def test_hash_inference(hash_name, expected_alg):
alg = infer_hash_algorithm(hash_name)
assert alg == expected_alg
# Try some other name variations
alg = infer_hash_algorithm(hash_name[0].upper() + hash_name[1:])
assert alg == expected_alg
alg = infer_hash_algorithm("-"+hash_name)
assert alg == expected_alg
@pytest.mark.parametrize(
"hash_alg, hash_value", [
(Hash.MD5, "f9e40b9aa5464f3dae711ca524fceb63"),
(Hash.MD6, "f9e40b9aa5464f3dae711ca524fceb63"),
(Hash.RIPEMD160, "8ae5d2e6b1f3a514257f2469b637454931844aeb"),
(Hash.SHA1, "f2c7d4185880c0adcbb4a01d020a69498b16210e"),
(Hash.SHA224, "6743ed70cc26e750ad0108b6b8ad7fc2780c550f7d78adefa04dda05"),
(Hash.SHA256, "a2d1c2081aa932fe72307ab076b9739455bc7a21b3bed367bd9a86ae27af5a40"),
(Hash.SHA384, "bc846457de707f97bce93cca23b5ea58c0326fd8b79ef7b523ba1d0a792f22868732e53a5dcf2f9e3b89eecca9c9b4e3"),
(Hash.SHA512, "896e45c82f9d8ba917d4f95891c967b88304b0a67ccc59aac813ee7ab3bc700bf9ce559e283c35ddba619755f6b70bdff2a07dc9cd337576a143a2aa361d08b1"),
(Hash.SHA3224, "37cb283bc9f6ecf0f94e92d5bd4c1e061ae00d7ed85804d18f981f53"),
(Hash.SHA3256, "d5fc146e37d4fddaeaa57aa88390be5c9ca6bcb18ae1bf2346cbfc36d3310ea2"),
(Hash.SHA3384, "ac97414589b2ef59a87dc5277d156b6cfc8f6b92b7c0e889d8f38a235dd9c1ba4030321beddd13f29519390ba914f70f"),
(Hash.SHA3512, "8dc580ad3abc6305ce5ada7c5920c763720c7733c2a94d28dd5351ffbc162b6b6d21371d91d6559124159025172e19896e09889047aac4ef555cc55456e14b0a"),
(Hash.SSDEEP, "3:AXGBicFlgVNhBGcL6wCrFQEv:AXGHsNhxLsr2C"),
(Hash.WHIRLPOOL, "b752b6eeb497a8bebfc1be1649ca41d57fd1973bffc2261ca196b5474e0f353762f354c1d743581f61c51f4d86921360bc2e8ad35e830578b68b12e884a50894"),
(Hash.TLSH, "6FF02BEF718027B0160B4391212923ED7F1A463D563B1549B86CF62973B197AD2731F8"),
("foo", "bar"), # unrecognized hash type is accepted as-is
],
)
def test_hash_check(hash_alg, hash_value):
assert check_hash(hash_alg, hash_value)
assert check_hash(hash_alg, hash_value.upper()) # check case sensitivity
def test_hash_check_fail():
for hash_alg in Hash:
assert not check_hash(hash_alg, "x"*200)
|
'''
Joe Walter
difficulty: 5%
run time: 0:20
answer: 40730
***
034 Digit Factorials
145 is a curious number, as 1! + 4! + 5! = 1 + 24 + 120 = 145.
Find the sum of all numbers which are equal to the sum of the factorial of their digits.
Note: as 1! = 1 and 2! = 2 are not sums they are not included.
***
Observations
After 1999999, numbers n are too large to be expressed as sum_fact_digits(n)
'''
max = 19_999_999
f = {'0':1, '1':1, '2':2, '3':6, '4':24, '5':120, '6':720, '7':5040, '8':40320, '9':362880} # faster than a list
def sum_fact_digits(n):
return sum(f[d] for d in str(n))
def solve():
ans = 0
for n in range(10, max):
if n == sum_fact_digits(n):
ans += n
return ans
print(solve())
|
from functools import lru_cache
class Solution(object):
def climbStairs(self, n):
"""
:type n: int
:rtype: int
"""
@lru_cache(maxsize = None)
def helper(n):
if n == 1:
return 1
elif n == 2:
return 2
else:
return helper(n-2) + helper(n-1)
return helper(n)
|
from torch.optim import Optimizer
from vel.api import TrainingInfo, EpochInfo, Learner, Model, Source
class TrainPhase:
""" A single phase of training """
@property
def number_of_epochs(self) -> int:
""" How many epochs does this phase take """
raise NotImplementedError
def set_up_phase(self, training_info: TrainingInfo, model: Model, source: Source) -> Optimizer:
""" Prepare the phase for learning, returns phase optimizer """
pass
def restore(self, training_info: TrainingInfo, local_batch_idx: int, model: Model, hidden_state: dict):
"""
Restore learning from intermediate state.
"""
pass
def epoch_info(self, training_info: TrainingInfo, global_idx: int, local_idx: int) -> EpochInfo:
""" Create Epoch info """
raise NotImplementedError
def execute_epoch(self, epoch_info: EpochInfo, learner: Learner):
"""
Execute epoch training.
"""
raise NotImplementedError
def tear_down_phase(self, training_info: TrainingInfo, model: Model):
""" Clean up after phase is done """
pass
def state_dict(self):
"""
State to save down
"""
return {}
def banner(self) -> str:
""" Return banner for the phase """
return f"|------> PHASE: {self.__class__.__name__} Length: {self.number_of_epochs}"
class EmptyTrainPhase(TrainPhase):
""" A train phase that is a simple call, without any training """
@property
def number_of_epochs(self) -> int:
""" How many epochs does this phase take """
return 0
def execute_epoch(self, epoch_info, learner):
""" Prepare the phase for learning """
pass
def epoch_info(self, training_info: TrainingInfo, global_idx: int, local_idx: int) -> EpochInfo:
""" Create Epoch info """
return EpochInfo(training_info, global_epoch_idx=global_idx, local_epoch_idx=local_idx, batches_per_epoch=0)
|
import os
basedir = os.path.abspath(os.path.dirname(__file__)) # relpath
class Config(object):
MAX_LENGTH = 34
UPLOAD_FOLDER = 'tmp/'
# app.config['DISPLAY_FOLDER'] = 'app/static/'
# app.config['FINAL_FOLDER'] = 'static/'
SECRET_KEY = 'you-will-never-guess'
# SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or 'sqlite:///' + os.path.join(basedir, 'app.db')
# SQLALCHEMY_TRACK_MODIFICATIONS = False |
import re
from django import template
register = template.Library()
@register.filter(name="embedurl")
def get_embed_url_with_parameters(url):
print('we inside this emb')
if "youtube.com" in url or "youtu.be" in url:
regex = r"(?:https:\/\/)?(?:www\.)?(?:youtube\.com|youtu\.be)\/(?:watch\?v=)?(.+)" # Get video id from URL
embed_url = re.sub(
regex, r"https://www.youtube.com/embed/\1", url
) # Append video id to desired URL
print(embed_url)
embed_url_with_parameters = embed_url + "?rel=0" # Add additional parameters
return embed_url_with_parameters
else:
return None |
def register_data(app):
from app.api.data.file import blueprint as data_file
from app.api.data.friend import blueprint as data_friend
from app.api.data.mail import blueprint as data_mail
from app.api.data.post import blueprint as data_post
from app.api.data.server import blueprint as data_server
from app.api.data.user import blueprint as data_user
app.register_blueprint(data_user, url_prefix='/api/user')
app.register_blueprint(data_post, url_prefix='/api/post')
app.register_blueprint(data_friend, url_prefix='/api/friend')
app.register_blueprint(data_file, url_prefix='/api/file')
app.register_blueprint(data_mail, url_prefix='/api/mail')
app.register_blueprint(data_server, url_prefix='/api/server')
def register_central(app):
from app.api.central.server import blueprint as central_server
from app.api.central.user import blueprint as central_user
app.register_blueprint(central_user, url_prefix='/api/user')
app.register_blueprint(central_server, url_prefix='/api/server')
def init_authentication(app):
# Using the expired_token_loader decorator, we will now call
# this function whenever an expired but otherwise valid access
# token attempts to access an endpoint.
from flask import render_template
from flask_jwt_extended import JWTManager, jwt_required
from app.type import get_server_type, ServerType
jwt = JWTManager(app)
template = 'login.html'
if get_server_type() == ServerType.DATA:
template = 'data/error.html'
@jwt.expired_token_loader
def my_expired_token_callback(expired_token):
return render_template(template, error='authentication')
@jwt.unauthorized_loader
def my_unauthorized_token_callback(expired_token):
return render_template(template, error='authentication')
@jwt.needs_fresh_token_loader
def my_needs_fresh_token_loader_callback(expired_token):
return render_template(template, error='authentication')
@jwt.revoked_token_loader
def my_revoked_token_loader_callback(expired_token):
return render_template(template, error='authentication')
@jwt.invalid_token_loader
def my_invalid_token_loader_callback(expired_token):
return render_template(template, error="authentication")
def auth_username():
from flask_jwt_extended import verify_jwt_in_request_optional, \
get_jwt_identity
try:
verify_jwt_in_request_optional()
username = get_jwt_identity()
return username
except Exception:
return None
def jwt_required_custom(fn):
"""
A decorator to protect a Flask endpoint.
If you decorate an endpoint with this, it will ensure that the requester
has a valid and fresh access token before allowing the endpoint to be
called.
See also: :func:`~flask_jwt_extended.jwt_required`
"""
import base64
import json
import logging
from flask import request, current_app, Flask, render_template
from flask_jwt_extended import verify_jwt_in_request, get_jwt_identity
import jwt
import requests
from app.type import get_server_type, ServerType
from app.utils import get_central_ip
def wrapper(*args, **kwargs):
try:
# Decode token (base64).
header = None
if get_server_type() == ServerType.CENTRAL:
header = request.cookies['access_token_cookie']
else:
header = request.headers['authorization']
# Get the identity and save as username.
parts = header.split('.')
decoded = base64.b64decode(parts[1] + '=============') \
.decode('utf-8')
username = json.loads(decoded)['identity']
# Get the correct pub key.
if get_server_type() == ServerType.CENTRAL:
# Get the pubkey using own API call.
from app.api.central.server import get_pub_key
pub = get_pub_key(username)
else:
# Get the pubkey by call to the central server.
pub = requests.get(
get_central_ip() + '/api/server/pub_key',
params={
'username': username
}
).json()['data']
current_app.config['JWT_PUBLIC_KEY'] = pub
except BaseException:
# Show login on exception.
return render_template('login.html')
# Let the JWT extended library check the token.
try:
verify_jwt_in_request()
except:
return render_template('logout.html')
return fn(*args, **kwargs)
wrapper.__name__ = fn.__name__
return wrapper
|
import numpy as np
import torch
import gym
from PIL import Image
import imageio
from pygifsicle import optimize
import os, sys
root_dir = os.path.dirname(os.path.abspath(__file__))
external_dir = os.path.join(root_dir, 'externals')
sys.path.insert(0, root_dir)
sys.path.insert(1, os.path.join(external_dir, 'pytorch_a2c_ppo_acktr_gail'))
from utils.algo_utils import *
from ppo.envs import make_vec_envs
from ppo.utils import get_vec_normalize
import utils.mp_group as mp
def get_generations(load_dir, exp_name):
gen_list = os.listdir(os.path.join(load_dir, exp_name))
gen_count = 0
while gen_count < len(gen_list):
try:
gen_list[gen_count] = int(gen_list[gen_count].split("_")[1])
except:
del gen_list[gen_count]
gen_count -= 1
gen_count += 1
return [i for i in range(gen_count)]
def get_exp_gen_data(exp_name, load_dir, gen):
robot_data = []
gen_data_path = os.path.join(load_dir, exp_name, f"generation_{gen}", "output.txt")
f = open(gen_data_path, "r")
for line in f:
robot_data.append((int(line.split()[0]), float(line.split()[1])))
return robot_data
def dummy_callback(_):
pass
def save_robot_gif(out_path, env_name, body_path, ctrl_path):
global GIF_RESOLUTION
structure_data = np.load(body_path)
structure = []
for key, value in structure_data.items():
structure.append(value)
structure = tuple(structure)
env = make_vec_envs(env_name, structure, 1000, 1, None, None, device='cpu', allow_early_resets=False)
env.get_attr("default_viewer", indices=None)[0].set_resolution(GIF_RESOLUTION)
actor_critic, obs_rms = torch.load(ctrl_path, map_location='cpu')
vec_norm = get_vec_normalize(env)
if vec_norm is not None:
vec_norm.eval()
vec_norm.obs_rms = obs_rms
recurrent_hidden_states = torch.zeros(1, actor_critic.recurrent_hidden_state_size)
masks = torch.zeros(1, 1)
obs = env.reset()
img = env.render(mode='img')
reward = None
done = False
imgs = []
# arrays = []
while not done:
with torch.no_grad():
value, action, _, recurrent_hidden_states = actor_critic.act(
obs, recurrent_hidden_states, masks, deterministic=True)
obs, reward, done, _ = env.step(action)
img = env.render(mode='img')
imgs.append(img)
masks.fill_(0.0 if (done) else 1.0)
if done == True:
env.reset()
env.close()
imageio.mimsave(f'{out_path}.gif', imgs, duration=(1/50.0))
try:
optimize(out_path)
except:
pass
# print("Error optimizing gif. Most likely cause is that gifsicle is not installed.")
return 0
class Robot():
def __init__(
self,
body_path=None,
ctrl_path=None,
reward=None,
env_name=None,
exp_name=None,
gen=None):
self.body_path = body_path
self.ctrl_path = ctrl_path
self.reward = reward
self.env_name = env_name
self.exp_name = exp_name
self.gen = gen
def __str__(self):
exp_str = f'{self.exp_name}' if self.exp_name is not None else ''
gen_str = f'gen{self.gen}' if self.gen is not None else ''
reward_str = f'({round(self.reward, 3)})' if self.reward is not None else ''
comps = [exp_str, gen_str, reward_str]
out = ''
for comp in comps:
if len(comp) != 0:
out += f'{comp}_'
return out[:-1]
class Job():
def __init__(
self,
name,
experiment_names,
env_names,
load_dir,
generations=None,
ranks=None,
jobs=None,
organize_by_jobs=True,
organize_by_experiment=False,
organize_by_generation=False):
# set values
self.name = name
self.experiment_names = experiment_names
self.env_names = env_names
self.load_dir = load_dir
self.generations = generations
self.ranks = ranks
# set jobs
self.sub_jobs = []
if jobs:
for job in jobs:
self.sub_jobs.append(job)
self.sub_jobs[-1].name = job.name if organize_by_jobs else None
if organize_by_experiment:
for exp_name, env_name in zip(self.experiment_names, self.env_names):
self.sub_jobs.append(Job(
name = exp_name,
experiment_names = [exp_name],
env_names = [env_names],
load_dir = self.load_dir,
generations = self.generations,
ranks = self.ranks,
organize_by_experiment=False,
organize_by_generation=organize_by_generation
))
self.experiment_names = None
self.env_names = None
self.generations = None
self.ranks = None
elif organize_by_generation:
assert len(self.experiment_names) == 1, (
'Cannot create generation level folders for multiple experiments. Quick fix: set organize_by_experiment=True.'
)
if self.generations == None:
exp_name = self.experiment_names[0]
self.generations = get_generations(self.load_dir, exp_name)
for gen in self.generations:
self.sub_jobs.append(Job(
name = f'generation_{gen}',
experiment_names = self.experiment_names,
env_names = self.env_names,
load_dir = self.load_dir,
generations = [gen],
ranks = self.ranks,
organize_by_experiment=False,
organize_by_generation=False
))
self.experiment_names = None
self.env_names = None
self.generations = None
self.ranks = None
def generate(self, load_dir, save_dir, depth=0):
if self.name is not None and len(self.name) != 0:
save_dir = os.path.join(save_dir, self.name)
tabs = ' '*depth
print(f"{tabs}\{self.name}")
try: os.makedirs(save_dir)
except: pass
for sub_job in self.sub_jobs:
sub_job.generate(load_dir, save_dir, depth+1)
# collect robots
if self.experiment_names == None:
return
robots = []
for exp_name, env_name in zip(self.experiment_names, self.env_names):
exp_gens = self.generations if self.generations is not None else get_generations(self.load_dir, exp_name)
for gen in exp_gens:
for idx, reward in get_exp_gen_data(exp_name, load_dir, gen):
robots.append(Robot(
body_path = os.path.join(load_dir, exp_name, f"generation_{gen}", "structure", f"{idx}.npz"),
ctrl_path = os.path.join(load_dir, exp_name, f"generation_{gen}", "controller", f"robot_{idx}_controller.pt"),
reward = reward,
env_name = env_name,
exp_name = exp_name if len(self.experiment_names) != 1 else None,
gen = gen if len(exp_gens) != 1 else None,
))
# sort and generate
robots = sorted(robots, key=lambda x: x.reward, reverse=True)
ranks = self.ranks if self.ranks is not None else [i for i in range(len(robots))]
# make gifs
for i, robot in zip(ranks, robots):
save_robot_gif(
os.path.join(save_dir, f'{i}_{robot}'),
robot.env_name,
robot.body_path,
robot.ctrl_path
)
# multiprocessing is currently broken
# group = mp.Group()
# for i, robot in zip(ranks, robots):
# gif_args = (
# os.path.join(save_dir, f'{i}_{robot}'),
# robot.env_name,
# robot.body_path,
# robot.ctrl_path
# )
# group.add_job(save_robot_gif, gif_args, callback=dummy_callback)
# group.run_jobs(NUM_PROC)
GIF_RESOLUTION = (1280/5, 720/5)
# NUM_PROC = 8
if __name__ == '__main__':
exp_root = os.path.join('saved_data')
save_dir = os.path.join(root_dir, 'saved_data', 'all_media')
my_job = Job(
name = 'test_ga',
experiment_names= ['test_ga'],
env_names = ['Walker-v0'],
ranks = [i for i in range(3)],
load_dir = exp_root,
organize_by_experiment=False,
organize_by_generation=True,
)
my_job.generate(load_dir=exp_root, save_dir=save_dir) |
from prompt.prompt.context import Context as BaseContext
class Context(BaseContext):
__slots__ = [
'nvim',
'text',
'caret_locus',
'buffer_number',
'buffer_content',
'buffer_options',
'window_options',
'selected_line',
'selected_indices',
'viewinfo',
'undofile',
]
def __init__(self, nvim):
super().__init__(nvim)
buffer = nvim.current.buffer
self.buffer_number = buffer.number
self.buffer_content = buffer[:]
self.buffer_options = {
k: buffer.options[k] for k in [
'syntax',
'readonly',
'modified',
'modifiable',
]
}
window = nvim.current.window
self.window_options = {
k: window.options[k] for k in [
'spell',
'foldenable',
'statusline',
'colorcolumn',
'cursorline',
'cursorcolumn',
]
}
self.selected_line = 0
self.selected_indices = range(len(self.buffer_content))
self.viewinfo = nvim.call('winsaveview')
self.undofile = nvim.call('tempname')
nvim.command('silent wundo! %s' % self.undofile)
def restore(self, nvim):
if self.buffer_number != nvim.current.buffer.number:
raise Exception('Buffer number mismatched')
buffer = nvim.current.buffer
buffer.options['readonly'] = False
buffer.options['modifiable'] = True
buffer[:] = self.buffer_content
for k, v in self.buffer_options.items():
buffer.options[k] = v
window = nvim.current.window
for k, v in self.window_options.items():
window.options[k] = v
nvim.call('winrestview', self.viewinfo)
nvim.command('silent! rundo %s' % self.undofile)
|
import plugin
import urwid # 1.3.1
class HelloWorld(plugin.BasePlugin):
"""A simple demonstration plugin.
"""
def __init__(self):
super().__init__()
self.main_widget = urwid.Text("Hello World!")
def get_main_widget(self):
return self.main_widget
def new_instance(framework):
return HelloWorld()
|
# Copyright (c) 2021. Universidad de Pinar del Rio
# This file is part of SCEIBA (sceiba.cu).
# SCEIBA is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
#
from __future__ import absolute_import, division, print_function
import click
from flask.cli import with_appcontext
from iroko.sources.fixtures import init_journals, init_repos
from iroko.sources.harvesters.issn import IssnHarvesterManager
from iroko.sources.harvesters.miar import MiarHarvesterManager
@click.group()
def sources():
"""Command related to Iroko Sources iroko data."""
@sources.command()
@with_appcontext
def init_repos_data():
init_repos()
@sources.command()
@with_appcontext
def sync_old_journals_data():
"""sync journal data from old tocororo website"""
init_journals()
# @sources.command()
# @with_appcontext
# def initjournalsrelations():
# """Init journals relations with terms."""
# init_term_sources()
#
@sources.command()
@with_appcontext
def issn_collect():
"""get all cuban issn from issn.org save to file"""
IssnHarvesterManager.collect_issn()
@sources.command()
@with_appcontext
def issn_sync_db():
"""save all collected issn.org to SourceRawData and collect missing data if any"""
IssnHarvesterManager.sync_db()
@sources.command()
@with_appcontext
def issn_sync_records():
"""parse SourceRawData and sync to SourceRecords """
IssnHarvesterManager.sync_records()
@sources.command()
@with_appcontext
def miar_collect_db():
"""get all miar databases, save to file"""
MiarHarvesterManager.collect_databases()
@sources.command()
@with_appcontext
def miar_sync_db():
"""create with collected miar databases a Vocabulary """
MiarHarvesterManager.sync_databases()
@sources.command()
@with_appcontext
def miar_collect_journals():
"""collect miar info from all SourceRawData, save to files"""
MiarHarvesterManager.collect_journals()
@sources.command()
@with_appcontext
def miar_sync_journals():
"""sync to SourceRawData all collected info"""
MiarHarvesterManager.sync_journals()
@sources.command()
@with_appcontext
def miar_sync_records():
"""parse SourceRawData and sync to SourceRecords"""
MiarHarvesterManager.sync_journals_records()
|
Infinity = float('inf')
import op
import iz
class Iterator(object):
def __init__(self, ops):
self.ops = ops
self.index = 0
self.offset = 0
def hasNext(self):
return self.peekLength() < Infinity
def next(self, length=Infinity):
try:
nextOp = self.ops[self.index]
except:
nextOp = None
if nextOp:
offset = self.offset
opLength = op.length(nextOp)
if length >= opLength - offset:
length = opLength - offset
self.index += 1
self.offset = 0
else:
self.offset += length
if iz.number(nextOp.get('delete')):
return {'delete': length}
else:
retOp = {}
if nextOp.get('attributes'):
retOp['attributes'] = nextOp['attributes']
if iz.number(nextOp.get('retain')):
retOp['retain'] = length
elif iz.string(nextOp.get('insert')):
retOp['insert'] = nextOp['insert'][offset:(offset + length)]
else:
# offset should === 0, length should === 1
retOp['insert'] = nextOp['insert']
return retOp
return {'retain': Infinity}
def peekLength(self):
try:
self.ops[self.index]
# Should never return 0 if our index is being managed correctly
return op.length(self.ops[self.index]) - self.offset
except IndexError, e:
pass
return Infinity
def peekType(self):
try:
if self.ops[self.index]:
if iz.number(self.ops[self.index].get('delete')):
return 'delete'
elif iz.number(self.ops[self.index].get('retain')):
return 'retain'
else:
return 'insert'
except:
pass
return 'retain'
|
import processout
from processout.errors.notfounderror import NotFoundError
from processout.gatewayrequest import GatewayRequest
def main():
client = processout.ProcessOut("test-proj_gAO1Uu0ysZJvDuUpOGPkUBeE3pGalk3x",
"key_sandbox_mah31RDFqcDxmaS7MvhDbJfDJvjtsFTB")
# Create and fetch an invoice
invoice = client.new_invoice({
"name": "Test invoice",
"amount": "9.99",
"currency": "USD"
}).create()
assert invoice.id != "", "The invoice ID should not be empty"
fetched = client.new_invoice().find(invoice.id)
assert fetched.id != "", "The fetched invoice ID should not be empty"
assert invoice.id == fetched.id, "The invoices ID should be equal"
# Capture an invoice
gr = GatewayRequest("sandbox", "POST", "https://processout.com?token=test-valid", {
"Content-Type": "application/json"
}, "")
transaction = invoice.capture(gr.to_string())
assert transaction.status == "completed", "The transaction status was not completed"
# Expand the gateway configuration used on the transaction
transaction = transaction.find(transaction.id, {
"expand": ["gateway_configuration"]
})
assert transaction.gateway_configuration.id != "", "The transaction gateway configuration ID is empty"
# Fetch the customers
client.new_customer().all()
# Create a subscription for a customer
customer = client.new_customer().create()
assert customer.id != "", "The created customer ID should not be empty"
subscription = client.new_subscription({
"customer_id": customer.id,
"name": "Test subscription",
"amount": "9.99",
"currency": "USD",
"interval": "1d"
}).create()
assert subscription.id != "", "The created subscription ID should not be empty"
# Expand a customers' project and fetch gateways
customer = client.new_customer().create({"expand": ["project"]})
assert customer.project != None, "The customer project should be expanded"
# Check error code
try:
pass
except NotFoundError as err:
assert err.code == "resource.customer.not-found", "The error code was invalid"
if __name__ == "__main__":
main()
|
import os
from codecs import open
from setuptools import setup
with open(os.path.join(os.path.dirname(__file__), "README.md")) as f:
long_description = f.read()
setup(
name = 'electronicscalc',
packages = ['electronicscalc'],
version = '0.5',
license='MIT',
description = 'This is the package that houses various functions which can be used to calculate values for problems involving circuit design , value of the components which are needed to construct the circuit and so on',
long_description = long_description,
long_description_content_type = "text/markdown",
author = ['Vishal Balaji Sivaraman','Vigneshwar K R'],
author_email = 'vb.sivaraman_official@yahoo.com',
url = 'https://github.com/The-SocialLion/electronicscalc',
download_url = 'https://github.com/The-SocialLion/electronicscalc/archive/master.zip',
keywords = ['ELECTRONICS', 'CALCULATOR', 'ELECTRONIC-CALCULATIONS','CIRCUIT-DESIGN','PLOT'],
install_requires=[
'numpy',
'pandas',
'Matplotlib',
'SciPy',
],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
)
|
from hashlib import sha1
from time import time
try:
# Due: https://www.python.org/dev/peps/pep-0476
import ssl
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
# Legacy Python that doesn't verify HTTPS certificates by default
pass
else:
# Handle target environment that doesn't support HTTPS verification
ssl._create_default_https_context = _create_unverified_https_context
except ImportError:
pass
from erppeek_wst import ClientWST as Client
class Pool(object):
def connect(self, server, db=None, user=None, password=None):
client = Client(server, db=db, user=user, password=password)
return client
|
from collections import Counter
from utils import fileToDict
class Knn:
def __init__(self, nn=2):
self.xtrain = list()
self.ytrain = list()
self.nn = nn
def train(self, x: list, y: list):
self.xtrain += x
self.ytrain += y
def predict(self, xpred: dict):
neighbors = list()
for xdict, label in zip(self.xtrain, self.ytrain):
distance = 0
for key in xdict.keys():
distance += (xdict[key] - xpred[key])**2
neighbors.append((distance, label))
neighbors.sort()
majority = Counter([tup[1] for tup in neighbors[:self.nn]])
value, count = majority.most_common()[0]
return value, count, majority
namkangs = [fileToDict(filename) for filename in list(
map(lambda y: 'namkang-{y}'.format(y=y), range(1, 4)))]
velodys = [fileToDict(filename) for filename in list(
map(lambda y: 'velody-{y}'.format(y=y), range(1, 4)))]
model = Knn()
model.train(namkangs[:2]+velodys[:2],
['namkang', 'namkang', 'velody', 'velody'])
print(model.predict(namkangs[2]))
print(model.predict(velodys[2]))
|
from rest_framework import serializers
from .models import DataDogConfiguration
class DataDogConfigurationSerializer(serializers.ModelSerializer):
class Meta:
model = DataDogConfiguration
fields = ("id", "base_url", "api_key")
|
'''https://projecteuler.net/problem=6'''
'''Please see the README document for details'''
import math
def run(limit):
result = 0
print "Way #1"
for outer_counter in range(1, limit+1, 1):
for inner_counter in range(outer_counter+1,limit+1,1):
result = result + 2*outer_counter*inner_counter
print "Difference between (1^2+2^2+ ... +"+str(limit)+"^2) and (1+2+3 ... +"+str(limit)+")^2 is "+str(result)
print "Way #2"
sum_square = 0
square_sum = 0
for counter in range(1, limit+1, 1):
sum_square = sum_square + math.pow((counter),2)
square_sum = square_sum + counter
result = math.pow(square_sum,2) - sum_square
print str(math.pow(square_sum,2)) +"-"+ str(sum_square) +" is "+str(result)
print "Difference between (1^2+2^2+ ... +"+str(limit)+"^2) and (1+2+3 ... +"+str(limit)+")^2 is "+str(result)
if __name__ == "__main__":
print "https://projecteuler.net/problem=6"
|
#!/usr/bin/env python
import sys
import cv2
import numpy as np
import matplotlib.pyplot as plt
import copy
import sift
class Blob_Detect:
def __init__(self, image):
self.image = image
self.minThreshold = 1
self.maxThreshold = 2000
# Filter by Area.
self.filterByArea = True
self.minArea = 1
# Filter by Circularity
self.filterByCircularity = True
self.minCircularity = 1
# Filter by Convexity
self.filterByConvexity = True
self.minConvexity = 0.5
# Filter by Inertia
self.filterByInertia = True
self.minInertiaRatio = 0.01
def minThresh_update(self, level):
self.minThreshold = level
self.refined_blob_detection(self.image.copy())
def maxThresh_update(self, level):
self.maxThreshold = level
self.refined_blob_detection(self.image.copy())
def minCircularity_update(self, level):
self.minCircularity = level / 100
self.refined_blob_detection(self.image.copy())
def minConvexity_update(self, level):
self.minConvexity = level / 100
self.refined_blob_detection(self.image.copy())
def minInertia_update(self, level):
self.minInertiaRatio = level / 100
self.refined_blob_detection(self.image.copy())
def minArea_update(self, level):
self.minArea = level
self.refined_blob_detection(self.image.copy())
def main(self, img):
# sift.feat_detection(img)
self.refined_blob_detection(img.copy())
cv2.namedWindow('image', cv2.WINDOW_AUTOSIZE)
cv2.createTrackbar( "Min Threshold", "image", 1, 1000, self.minThresh_update)
cv2.createTrackbar( "Max Threshold", "image", 1000, 2000, self.maxThresh_update)
cv2.createTrackbar( "Min Circularity", "image", 1, 100, self.minCircularity_update)
cv2.createTrackbar( "Min Convexity", "image", 1, 100, self.minConvexity_update)
cv2.createTrackbar( "Min Inertia", "image", 1, 100, self.minInertia_update)
cv2.createTrackbar( "Min Area", "image", 1, 100, self.minArea_update)
cv2.imshow('image', img)
cv2.waitKey()
0xFF & cv2.waitKey()
cv2.destroyAllWindows()
cv2.waitKey(1)
cv2.waitKey(1)
cv2.waitKey(1)
def refined_blob_detection(self, img):
# Setup SimpleBlobDetector parameters.
params = cv2.SimpleBlobDetector_Params()
# Change thresholds
params.minThreshold = self.minThreshold;
params.maxThreshold = self.maxThreshold;
# Filter by Area.
params.filterByArea = self.filterByArea
params.minArea = self.minArea
# Filter by Circularity
params.filterByCircularity = self.filterByCircularity
params.minCircularity = self.minCircularity
# Filter by Convexity
params.filterByConvexity = self.filterByConvexity
params.minConvexity = self.minConvexity
# Filter by Inertia
params.filterByInertia = self.filterByInertia
params.minInertiaRatio = self.minInertiaRatio
# Create a detector with the parameters
ver = (cv2.__version__).split('.')
if int(ver[0]) < 3 :
detector = cv2.SimpleBlobDetector(params)
else :
detector = cv2.SimpleBlobDetector_create(params)
# Detect blobs.
keypoints = detector.detect(img)
# Draw detected blobs as red circles.
# cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
im_with_keypoints = cv2.drawKeypoints(img, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
# # Show keypoints
cv2.imshow("image", im_with_keypoints)
# # cv2.waitKey(1)
# plt.imshow(im_with_keypoints)
def calibrate(self, img):
img = cv2.GaussianBlur(img,(9,9),0)
img = cv2.resize(img, (0,0), fx = 5, fy = 5)
self.image = img
self.main(img)
if __name__ == '__main__':
# img = cv2.imread("masked.jpg", 0)
img = cv2.imread("images/trial.cup_ring.jpg", 0)
# img = cv2.resize(img, (0,0), fx = 5, fy = 5)
img = cv2.GaussianBlur(img,(9,9),0)
img = cv2.resize(img, (0,0), fx = 5, fy = 5)
# return sharp
img = sift.sharpen(img)
bd = Blob_Detect(img)
bd.main(img) |
from time import sleep
c=('\033[m', #0 sem cor
'\033[0;30;41m',#1 vermelho
'\033[0;30;42m',#2 verde
'\033[0;30;43m',#3 amarelo
'\033[0;30;44m',#4 azul
'\033[0;30;45m',#5 roxo
'\033[0;30;42m',#6 branco
)
def titulo(txt):
x=len(txt)
print(f'{c[1]}~'* (x +4),f'{c[0]}')
print(f'{c[2]} {txt} {c[0]}')
print(f'{c[1]}~'* (x + 4),f'{c[0]}')
def ajuda(ajuda):
while True:
if ajuda.upper()!= 'FIM':
print('\33[2;32;47','m=-'*len(ajuda))
print(f'ACESSANDO O HELP {ajuda} ')
print('=-' * len(ajuda))
print('\33[2;33;44m')
help(ajuda)
print('\33[m')
ajuda= input('digite ')
else:
print(f'{c[5]}VOLTE SEMPRE{c[0]}')
break
titulo('HELP DO TUTU')
ajuda() |
##
# Copyright © 2020, The Gust Framework Authors. All rights reserved.
#
# The Gust/Elide framework and tools, and all associated source or object computer code, except where otherwise noted,
# are licensed under the Zero Prosperity license, which is enclosed in this repository, in the file LICENSE.txt. Use of
# this code in object or source form requires and implies consent and agreement to that license in principle and
# practice. Source or object code not listing this header, or unless specified otherwise, remain the property of
# Elide LLC and its suppliers, if any. The intellectual and technical concepts contained herein are proprietary to
# Elide LLC and its suppliers and may be covered by U.S. and Foreign Patents, or patents in process, and are protected
# by trade secret and copyright law. Dissemination of this information, or reproduction of this material, in any form,
# is strictly forbidden except in adherence with assigned license requirements.
##
load(
"@rules_pkg//:pkg.bzl",
_pkg_tar = "pkg_tar",
_pkg_zip = "pkg_zip",
)
load(
"@io_bazel_rules_k8s//k8s:object.bzl",
_k8s_object = "k8s_object",
)
load(
"@io_bazel_rules_k8s//k8s:objects.bzl",
_k8s_objects = "k8s_objects",
)
def _k8s_config(name,
kind = None,
template = None,
deps = None,
**kwargs):
""" Generate targets for a generic Kubernetes config file. """
if deps != None and template != None:
fail("Cannot specify both `deps` and `template` for k8s_config. Please use `deps=` for groupings of " +
"Kubernetes objects.")
native.filegroup(
name = "%s-files" % name,
srcs = (template and [template] or []) + (deps or []),
)
if deps != None:
_pkg_tar(
name = "%s-tar" % name,
srcs = [":%s-files" % name],
deps = [
("%s-tar" % n) for n in (deps or [])
],
)
_pkg_tar(
name = "%s-zip" % name,
srcs = [":%s-files" % name],
deps = [
("%s-zip" % n) for n in (deps or [])
],
)
_k8s_objects(
name = name,
objects = deps,
**kwargs
)
else:
_k8s_object(
name = name,
kind = kind,
template = template,
**kwargs
)
k8s_config = _k8s_config
|
import logging
logging.basicConfig(format="[%(asctime)s] %(message)s", datefmt="%m-%d %H:%M:%S")
import os
import time
import numpy as np
import tensorflow as tf
import core.data.cifar_data as cifar
import core.data.mnist_data as mnist
from network import Network
from statistic import Statistic
import utils as util
flags = tf.app.flags
# network
flags.DEFINE_integer("batch_size", 100, "size of a batch")
flags.DEFINE_integer("gated_conv_num_layers", 7, "the number of gated conv layers")
flags.DEFINE_integer("gated_conv_num_feature_maps", 16, "the number of input / output feature maps in gated conv layers")
flags.DEFINE_integer("output_conv_num_feature_maps", 32, "the number of output feature maps in output conv layers")
flags.DEFINE_integer("q_levels", 4, "the number of quantization levels in the output")
# training
flags.DEFINE_float("max_epoch", 100000, "maximum # of epochs")
flags.DEFINE_float("learning_rate", 1e-3, "learning rate")
flags.DEFINE_float("grad_clip", 1, "value of gradient to be used for clipping")
# data
flags.DEFINE_string("data", "mnist", "name of dataset [mnist, color-mnist, cifar]")
flags.DEFINE_string("runtime_base_dir", "./", "path of base directory for checkpoints, data_dir, logs and sample_dir")
flags.DEFINE_string("data_dir", "data", "name of data directory")
flags.DEFINE_string("sample_dir", "samples", "name of sample directory")
# generation
flags.DEFINE_string("occlude_start_row", 18, "image row to start occlusion")
flags.DEFINE_string("num_generated_images", 9, "number of images to generate")
# Debug
flags.DEFINE_boolean("is_train", True, "training or testing")
flags.DEFINE_string("log_level", "INFO", "log level [DEBUG, INFO, WARNING, ERROR, CRITICAL]")
flags.DEFINE_integer("random_seed", 123, "random seed for python")
conf = flags.FLAGS
# logging
logger = logging.getLogger()
logger.setLevel(conf.log_level)
# random seed
tf.set_random_seed(conf.random_seed)
np.random.seed(conf.random_seed)
def validate_parameters(conf):
if conf.data not in ["mnist", "color-mnist", "cifar"]:
raise ValueError("Configuration parameter 'data' is '{}'. Must be one of [mnist, color-mnist, cifar]"
.format(conf.data))
def preprocess(q_levels):
def preprocess_fcn(images, labels):
# Create the target pixels from the image. Quantize the scalar pixel values into q_level indices.
target_pixels = np.clip(((images * q_levels).astype('int64')), 0, q_levels - 1) # [N,H,W,C]
return (images, target_pixels)
return preprocess_fcn
def get_dataset(data_dir, q_levels):
if conf.data == "mnist":
dataset = mnist.get_dataset(data_dir, preprocess(q_levels), reshape=False)
elif conf.data == "color-mnist":
dataset = mnist.get_colorized_dataset(data_dir, preprocess(q_levels), reshape=False)
elif conf.data == "cifar":
dataset = cifar.get_dataset(data_dir, preprocess(q_levels))
return dataset
def generate_from_occluded(network, images):
occlude_start_row = conf.occlude_start_row
num_generated_images = conf.num_generated_images
samples = network.generate_from_occluded(images, num_generated_images, occlude_start_row)
occluded = np.copy(images[0:num_generated_images,:,:,:])
# render white line in occlusion start row
occluded[:,occlude_start_row,:,:] = 255
return samples, occluded
def train(dataset, network, stat, sample_dir):
initial_step = stat.get_t()
logger.info("Training starts on epoch {}".format(initial_step))
train_step_per_epoch = dataset.train.num_examples / conf.batch_size
test_step_per_epoch = dataset.test.num_examples / conf.batch_size
for epoch in range(initial_step, conf.max_epoch):
start_time = time.time()
# 1. train
total_train_costs = []
for _ in xrange(train_step_per_epoch):
images = dataset.train.next_batch(conf.batch_size)
cost = network.test(images, with_update=True)
total_train_costs.append(cost)
# 2. test
total_test_costs = []
for _ in xrange(test_step_per_epoch):
images = dataset.test.next_batch(conf.batch_size)
cost = network.test(images, with_update=False)
total_test_costs.append(cost)
avg_train_cost, avg_test_cost = np.mean(total_train_costs), np.mean(total_test_costs)
stat.on_step(avg_train_cost, avg_test_cost)
# 3. generate samples
images, _ = dataset.test.next_batch(conf.batch_size)
samples, occluded = generate_from_occluded(network, images)
util.save_images(np.concatenate((occluded, samples), axis=2),
dataset.height, dataset.width * 2, conf.num_generated_images, 1,
directory=sample_dir, prefix="epoch_%s" % epoch)
logger.info("Epoch {}: {:.2f} seconds, avg train cost: {:.3f}, avg test cost: {:.3f}"
.format(epoch,(time.time() - start_time), avg_train_cost, avg_test_cost))
def generate(network, height, width, sample_dir):
logger.info("Image generation starts")
samples = network.generate()
util.save_images(samples, height, width, 10, 10, directory=sample_dir)
def main(_):
model_dir = util.get_model_dir(conf,
['data_dir', 'sample_dir', 'max_epoch', 'test_step', 'save_step',
'is_train', 'random_seed', 'log_level', 'display', 'runtime_base_dir',
'occlude_start_row', 'num_generated_images'])
util.preprocess_conf(conf)
validate_parameters(conf)
data = 'mnist' if conf.data == 'color-mnist' else conf.data
DATA_DIR = os.path.join(conf.runtime_base_dir, conf.data_dir, data)
SAMPLE_DIR = os.path.join(conf.runtime_base_dir, conf.sample_dir, conf.data, model_dir)
util.check_and_create_dir(DATA_DIR)
util.check_and_create_dir(SAMPLE_DIR)
dataset = get_dataset(DATA_DIR, conf.q_levels)
with tf.Session() as sess:
network = Network(sess, conf, dataset.height, dataset.width, dataset.channels)
stat = Statistic(sess, conf.data, conf.runtime_base_dir, model_dir, tf.trainable_variables())
stat.load_model()
if conf.is_train:
train(dataset, network, stat, SAMPLE_DIR)
else:
generate(network, dataset.height, dataset.width, SAMPLE_DIR)
if __name__ == "__main__":
tf.app.run()
|
import tensorflow as tf
graph = tf.Graph()
with graph.as_default():
tf_ys = tf.placeholder("float", shape=[ None, 1 ])
b = tf.Variable(tf.zeros([ 1 ]))
tf_res = tf.reduce_sum(tf_ys - b)
ops = tf.train.GradientDescentOptimizer(0.4).minimize(tf_res)
tf.train.write_graph(graph.as_graph_def(), '.', 'simple.pbtxt', as_text=True)
|
#!/usr/bin/env python
# coding: utf-8
"""
Copyright (c) 2021 Tatsuya Matsuura
Released under the MIT License.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
plaintext = '10101001' #平文 8ビット
key = '0110011010' #鍵 10ビット
ciphertext = '10110010' #暗号文 8ビット
mode = 0 #0なら暗号化 1なら復号化
if mode == 0:
input = plaintext
elif mode == 1:
input = ciphertext
p10 = [key[2], key[4], key[1], key[6], key[3], key[9], key[0], key[8], key[7], key[5]]
ls1 = [p10[1], p10[2], p10[3], p10[4], p10[0], p10[6], p10[7], p10[8], p10[9], p10[5]]
k1 = "".join([ls1[5], ls1[2], ls1[6], ls1[3], ls1[7], ls1[4], ls1[9], ls1[8]])
ls2 = [ls1[2], ls1[3], ls1[4], ls1[0], ls1[1], ls1[7], ls1[8], ls1[9], ls1[5], ls1[6]]
k2 = "".join([ls2[5], ls2[2], ls2[6], ls2[3], ls2[7], ls2[4], ls2[9], ls2[8]])
#print(k1)
#print(k2)
ip = [input[1], input[5], input[2], input[0], input[3], input[7], input[4], input[6]]
ep = "".join([ip[7], ip[4], ip[5], ip[6], ip[5], ip[6], ip[7], ip[4]])
if mode == 0:
epkxor = bin(int(ep, 2) ^ int(k1, 2)).removeprefix('0b').zfill(8)
elif mode == 1:
epkxor = bin(int(ep, 2) ^ int(k2, 2)).removeprefix('0b').zfill(8)
epl = [int("".join([epkxor[0], epkxor[3]]), 2), int("".join([epkxor[1], epkxor[2]]), 2)]
epr = [int("".join([epkxor[4], epkxor[7]]), 2), int("".join([epkxor[5], epkxor[6]]), 2)]
s0 = [[1, 0, 3, 2],
[3, 2, 1, 0],
[0, 2, 1, 3],
[3, 1, 3, 2]]
s1 = [[0, 1, 2, 3],
[2, 0, 1, 3],
[3, 0, 1, 0],
[2, 1, 0, 3]]
s0s1 = "".join([bin(s0[epl[0]][epl[1]]).removeprefix('0b').zfill(2), bin(s1[epr[0]][epr[1]]).removeprefix('0b').zfill(2)])
p4 = "".join([s0s1[1], s0s1[3], s0s1[2], s0s1[0]])
p4xor = bin(int(p4, 2) ^ int("".join([ip[0], ip[1], ip[2], ip[3]]), 2)).removeprefix('0b').zfill(4)
sw = "".join([ip[4], ip[5], ip[6], ip[7]]) + p4xor
#print(ip)
#print(ep)
#print(epkxor)
#print(s0s1)
#print(p4)
#print(p4xor)
#print(sw)
ep = "".join([sw[7], sw[4], sw[5], sw[6], sw[5], sw[6], sw[7], sw[4]])
if mode == 0:
epkxor = bin(int(ep, 2) ^ int(k2, 2)).removeprefix('0b').zfill(8)
elif mode == 1:
epkxor = bin(int(ep, 2) ^ int(k1, 2)).removeprefix('0b').zfill(8)
epl = [int("".join([epkxor[0], epkxor[3]]), 2), int("".join([epkxor[1], epkxor[2]]), 2)]
epr = [int("".join([epkxor[4], epkxor[7]]), 2), int("".join([epkxor[5], epkxor[6]]), 2)]
s0s1 = "".join([bin(s0[epl[0]][epl[1]]).removeprefix('0b').zfill(2), bin(s1[epr[0]][epr[1]]).removeprefix('0b').zfill(2)])
p4 = "".join([s0s1[1], s0s1[3], s0s1[2], s0s1[0]])
p4xor = bin(int(p4, 2) ^ int("".join([sw[0], sw[1], sw[2], sw[3]]), 2)).removeprefix('0b').zfill(4)
result = p4xor + "".join([sw[4], sw[5], sw[6], sw[7]])
ipm1 = "".join([result[3], result[0], result[2], result[4], result[6], result[1], result[7], result[5]])
#print(ep)
#print(epkxor)
#print(s0s1)
#print(p4)
#print(p4xor)
#print(result)
print(ipm1) |
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
import hashlib
import os
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ['DATABASE']
app.config['SQLALCHEMY_POOL_RECYCLE'] = 30;
db = SQLAlchemy(app)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True, nullable=False)
password = db.Column(db.String(64), nullable=False)
def __init__(self, username, password):
self.username = username
self.password = hashlib.sha256(password.encode()).hexdigest()
def __repr__(self):
return '<User %r>' % self.username
class Posts(db.Model):
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer,nullable=False)
tsurami = db.Column(db.Numeric(10,9), nullable=False)
timestamp = db.Column(db.DateTime, nullable=False)
annotation = db.Column(db.String(140))
def __init__(self, user_id, tsurami, timestamp):
self.user_id = user_id
self.tsurami = tsurami
self.timestamp = timestamp
def __repr__(self):
return '<ID:{0} User:{1} Tsurami:{2}>'.format(self.id, self.user_id, self.tsurami)
class Connection(db.Model):
user_id = db.Column(db.Integer,nullable=False, primary_key=True, autoincrement=False)
target_id = db.Column(db.Integer,nullable=False, primary_key=True, autoincrement=False)
def __init__(self, user_id, target_id):
self.user_id = user_id
self.target_id = target_id
|
import os
from symbolic import Unreal4Crash
def test_unreal_crash_files(res_path):
path = os.path.join(res_path, 'unreal', 'unreal_crash')
with open(path, mode='rb') as crash_file:
buffer = crash_file.read()
unreal_crash = Unreal4Crash.from_bytes(buffer)
files = list(unreal_crash.files())
assert len(files) == 4
assert files[0].name == "CrashContext.runtime-xml"
assert files[0].type == "context"
assert len(files[0].open_stream().read()) == 6545
assert files[1].name == "CrashReportClient.ini"
assert files[1].type == "config"
assert len(files[1].open_stream().read()) == 204
assert files[2].name == "MyProject.log"
assert files[2].type == "log"
assert len(files[2].open_stream().read()) == 21143
assert files[3].name == "UE4Minidump.dmp"
assert files[3].type == "minidump"
stream = files[3].open_stream()
assert stream.size == 410700
assert len(stream.read()) == 410700
def test_unreal_crash_context(res_path):
path = os.path.join(res_path, 'unreal', 'unreal_crash')
with open(path, mode='rb') as crash_file:
buffer = crash_file.read()
unreal_crash = Unreal4Crash.from_bytes(buffer)
context = unreal_crash.get_context()
assert context['runtime_properties']['crash_guid'] == "UE4CC-Windows-379993BB42BD8FBED67986857D8844B5_0000"
def test_unreal_crash_logs(res_path):
path = os.path.join(res_path, 'unreal', 'unreal_crash')
with open(path, mode='rb') as crash_file:
buffer = crash_file.read()
unreal_crash = Unreal4Crash.from_bytes(buffer)
logs = unreal_crash.get_logs()
assert len(logs) == 100
assert logs[0]['timestamp'] == "2018-10-29T16:56:37Z"
assert logs[0]['component'] == "LogD3D11RHI"
assert logs[0]['message'] == "Chosen D3D11 Adapter: 0"
assert logs[99]['timestamp'] == "2018-10-29T16:56:38Z"
assert logs[99]['component'] == "LogWindows"
assert logs[99]['message'] == "Windows GetLastError: The operation completed successfully. (0)"
|
# Generated by Django 4.0.3 on 2022-03-20 20:28
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='NeighbourHood',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('location', models.CharField(max_length=60)),
('hood_logo', models.ImageField(upload_to='images/')),
('description', models.TextField()),
('health_tell', models.IntegerField(blank=True, null=True)),
('police_number', models.IntegerField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=80)),
('bio', models.TextField(blank=True, max_length=254)),
('profile_picture', models.ImageField(default='default.png', upload_to='images/')),
('location', models.CharField(blank=True, max_length=50, null=True)),
('neighbourhood', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='members', to='myneighborhood.neighbourhood')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=120, null=True)),
('post', models.TextField()),
('date', models.DateTimeField(auto_now_add=True)),
('hood', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='hood_post', to='myneighborhood.neighbourhood')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='post_owner', to='myneighborhood.profile')),
],
),
migrations.AddField(
model_name='neighbourhood',
name='admin',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='hood', to='myneighborhood.profile'),
),
migrations.CreateModel(
name='Business',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=120)),
('email', models.EmailField(max_length=254)),
('description', models.TextField(blank=True)),
('neighbourhood', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='business', to='myneighborhood.neighbourhood')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='owner', to='myneighborhood.profile')),
],
),
]
|
'''
Created on 06 May 2010
@author: EWillemse
'''
import routes_neighbours_IF_all as RN
import time
import transformSolution
import reduceNumberTrips_IF as RED
import refitIFs
import testSolutions
import gen_neighbours_beta1
from copy import deepcopy
#===============================================================================
#
#===============================================================================
class ImplementLocalSearchIFs(object):
def __init__(self, info):
self.info = info
self.Capacity = info.capacity
self.serviceCostD = info.serveCostD
self.SP = info.spDistanceD
self.depot = self.info.depotArc
self.maxTrip = self.info.maxTrip
self.captureStats = True
def makeRouteList(self, solution):
routes = solution.keys()
nRoutes = len(routes)-1
routes = []
k = -1
routeIndex = {}
for routeIkey in range(nRoutes):
routesI = solution[routeIkey+1]['Solution']
for subRouteIkey in range(len(routesI)):
k += 1
if subRouteIkey == len(routesI) - 1:
subRouteI = routesI[subRouteIkey][:-1]
routeIndex[k] = (routeIkey, subRouteIkey, True)
else:
subRouteI = routesI[subRouteIkey]
routeIndex[k] = (routeIkey, subRouteIkey, False)
routes.append(subRouteI)
return(routes, routeIndex)
def makeLoadList(self, solution):
routes = solution.keys()
nRoutes = len(routes)-1
loads = []
k = -1
routeLoadIndex = {}
for routeIkey in range(nRoutes):
loadI = solution[routeIkey+1]['Load']
for subRouteIkey in range(len(loadI)):
k += 1
subLoadI = loadI[subRouteIkey]
routeLoadIndex[k] = (routeIkey, subRouteIkey)
loads.append(subLoadI)
return(loads)
def makeCostList(self, solution):
routes = solution.keys()
nRoutes = len(routes)-1
routeCostIndex = {}
costs = []
tripCosts = []
k = -1
for routeIkey in range(nRoutes):
costI = solution[routeIkey+1]['Subtrip cost']
for subRouteIkey in range(len(costI)):
k += 1
subCostI = costI[subRouteIkey]
routeCostIndex[k] = (routeIkey, subRouteIkey)
costs.append(subCostI)
tripCosts.append(solution[routeIkey+1]['Cost'])
return(costs, tripCosts)
def makeServiceCostList(self, solution):
nRoutes = len(solution.keys())-1
routeServiceCostIndex = {}
serviceCosts = []
k = -1
for routeIkey in range(nRoutes):
j = -1
for subRouteI in solution[routeIkey+1]['Solution']:
j += 1
k += 1
serviceCostTemp = 0
for arc in subRouteI:
serviceCostTemp += self.serviceCostD[arc]
serviceCosts.append(serviceCostTemp)
routeServiceCostIndex[k] = (routeIkey, j)
return(serviceCosts)
def makeSolutionList(self, solution):
(routes, routeIndex) = self.makeRouteList(solution)
(loads) = self.makeLoadList(solution)
(costs, tripCosts) = self.makeCostList(solution)
(serviceCosts) = self.makeServiceCostList(solution)
return((routes, loads, costs, serviceCosts, routeIndex))
def makeSolutionDictionaryIFs(self, solution, routes, loads, costs, routeIndex, tripCost):
nRoutes = len(routes)
for i in range(nRoutes):
routeI = routeIndex[i][0]
subRouteI = routeIndex[i][1]
if routeIndex[i][2]: solution[routeI+1]['Solution'][subRouteI] = deepcopy(routes[i]) + [self.depot]
else: solution[routeI+1]['Solution'][subRouteI] = deepcopy(routes[i])
solution[routeI+1]['Load'][subRouteI] = loads[i]
solution[routeI+1]['Subtrip cost'][subRouteI] = costs[i]
for i in range(len(tripCost)):
solution[i+1]['Cost'] = tripCost[i]
solution['Total cost'] = sum(costs)
return(solution)
def changeSingleSolution(self, solutionOld, neighbour, routeIndex):
solution = deepcopy(solutionOld)
legalMove = True
if self.captureStats:
self.neighbourhoodSearchStats2[neighbour[-2]]['Executions'] += 1
self.neighbourhoodSearchStats2[neighbour[-2]]['Total saving'] += neighbour[0]
if neighbour[-1] == 'oneRoute':
modifications = neighbour[1]
routeItemp = modifications['routes']
routeI = routeIndex[routeItemp][0]
subRouteI = routeIndex[routeItemp][1]
routeI += 1
if routeIndex[routeItemp][2]: solution[routeI]['Solution'][subRouteI] = modifications['modifiedRoutes'] + [self.depot]
else: solution[routeI]['Solution'][subRouteI] = modifications['modifiedRoutes']
costIdelta = modifications['costDelta']
solution[routeI]['Subtrip cost'][subRouteI] += costIdelta
solution[routeI]['Cost'] += costIdelta
solution['Total cost'] += neighbour[0]
elif neighbour[-1] == 'twoRoutes':
bestNeighbour = neighbour
modifications = bestNeighbour[1]
(routeItemp, routeJtemp) = modifications['routes']
routeI = routeIndex[routeItemp][0]
subRouteI = routeIndex[routeItemp][1]
routeJ = routeIndex[routeJtemp][0]
subRouteJ = routeIndex[routeJtemp][1]
routeI += 1
routeJ += 1
if routeIndex[routeItemp][2]: solution[routeI]['Solution'][subRouteI] = modifications['modifiedRoutes'][0] + [self.depot]
else: solution[routeI]['Solution'][subRouteI] = modifications['modifiedRoutes'][0]
if routeIndex[routeJtemp][2]: solution[routeJ]['Solution'][subRouteJ] = modifications['modifiedRoutes'][1] + [self.depot]
else: solution[routeJ]['Solution'][subRouteJ] = modifications['modifiedRoutes'][1]
(costIdelta, costJdelta) = modifications['costDelta']
solution[routeI]['Cost'] += costIdelta
solution[routeJ]['Cost'] += costJdelta
solution[routeI]['Subtrip cost'][subRouteI] += costIdelta
solution[routeJ]['Subtrip cost'][subRouteJ] += costJdelta
(loadIdelta, loadJdelta) = modifications['loadDelta']
solution[routeI]['Load'][subRouteI] += loadIdelta
solution[routeJ]['Load'][subRouteJ] += loadJdelta
if (solution[routeI]['Load'][subRouteI] > self.Capacity) | (solution[routeJ]['Load'][subRouteJ] > self.Capacity):legalMove = False
if (solution[routeI]['Cost'] > self.maxTrip) | ((solution[routeJ]['Cost'] > self.maxTrip) > self.maxTrip):legalMove = False
solution['Total cost'] += bestNeighbour[0]
return(solution, legalMove)
#===============================================================================
#
#===============================================================================
class checkAllMoves(object):
def __init__(self, info):
self.info = info
self.Capacity = self.info.capacity
self.maxTrip = self.info.maxTrip
self.captureStats = True
self.printOutput = True
self.outputLines = []
def captureNeighbourStats(self, deltaChange, statType):
if self.captureStats:
self.neighbourhoodSearchStats[statType]['Executions'] += 1
self.neighbourhoodSearchStats[statType]['Total saving'] += deltaChange
def captureNeighbourStats2(self, deltaChange, statType):
if self.captureStats:
self.neighbourhoodSearchStats2[statType]['Executions'] += 1
self.neighbourhoodSearchStats2[statType]['Total saving'] += deltaChange
def checkSingleRouteMoves(self, move, tabuMovePositions):
makeSingleRouteMoveCheck = False
moveInfo = move[1]
routeI = moveInfo['routes']
if (move[-2] == 'RemoveInsertAllArcs') | (move[-2] == 'ExchangeAllArcs'):
(pI, pJ) = moveInfo['pos']
if (pI not in tabuMovePositions[routeI]) & (pJ not in tabuMovePositions[routeI]):
makeSingleRouteMoveCheck = True
elif (move[-2] == 'RemoveInsertAllDoubleArcs'):
(pI, pJ) = moveInfo['pos']
if (pI not in tabuMovePositions[routeI]) & ((pI + 1) not in tabuMovePositions[routeI]) & (pJ not in tabuMovePositions[routeI]):
makeSingleRouteMoveCheck = True
elif (move[-2] == 'ExchangeAllDoubleArcs'):
(pI, pJ) = moveInfo['pos']
if (pI not in tabuMovePositions[routeI]) & ((pI + 1) not in tabuMovePositions[routeI]) & (pJ not in tabuMovePositions[routeI]) & ((pJ + 1) not in tabuMovePositions[routeI]):
makeSingleRouteMoveCheck = True
return(makeSingleRouteMoveCheck)
def checkTwoRouteMoves(self, move, tabuMovePositions):
makeTwoRouteMoveCheck = False
moveInfo = move[1]
(routeI, routeJ) = moveInfo['routes']
if (move[-2] == 'RemoveInsertAllArcsAllRoutes') | (move[-2] == 'ExchangeAllArcsAllRoutes'):
(pI, pJ) = moveInfo['pos']
if (pI not in tabuMovePositions[routeI]) & (pJ not in tabuMovePositions[routeJ]):
makeTwoRouteMoveCheck = True
elif (move[-2] == 'RemoveInsertAllDoubleArcsAllRoutes'):
(pI, pJ) = moveInfo['pos']
if (pI not in tabuMovePositions[routeI]) & ((pI+1) not in tabuMovePositions[routeI]) & (pJ not in tabuMovePositions[routeJ]):
makeTwoRouteMoveCheck = True
elif (move[-2] == 'ExchangeAllDoubleArcsAllRoutes'):
(pI, pJ) = moveInfo['pos']
if (pI not in tabuMovePositions[routeI]) & ((pI+1) not in tabuMovePositions[routeI]) & (pJ not in tabuMovePositions[routeJ]) & ((pJ+1) not in tabuMovePositions[routeJ]):
makeTwoRouteMoveCheck = True
return(makeTwoRouteMoveCheck)
def updateMoveInfo(self, input):
(moveInfo, routeIndex, tempLoads, tempCosts, tempTripCosts, tempServiceCosts, tabuMovePositions, totalCostChange) = input
moveInfo = moveInfo
(routeI, routeJ) = moveInfo['routes']
actualI = routeIndex[routeI][0]
actualJ = routeIndex[routeJ][0]
(deltaLoadI, deltaLoadJ) = moveInfo['loadDelta']
(costDeltaI, costDeltaJ) = moveInfo['costDelta']
(serviceCostDeltaI, serviceCostDeltaJ) = moveInfo['serviceDelta']
tempLoads[routeI] += deltaLoadI
tempLoads[routeJ] += deltaLoadJ
tempCosts[routeI] += costDeltaI
tempCosts[routeJ] += costDeltaJ
tempTripCosts[actualI] += costDeltaI
tempTripCosts[actualJ] += costDeltaJ
tempServiceCosts[routeI] += serviceCostDeltaI
tempServiceCosts[routeJ] += serviceCostDeltaJ
tabuMovePositions[routeI] += moveInfo['tabus']['I']
tabuMovePositions[routeJ] += moveInfo['tabus']['J']
totalCostChange += costDeltaI + costDeltaJ
return(tempLoads, tempCosts, tempTripCosts, tempServiceCosts, tabuMovePositions, totalCostChange)
def chechMoves(self, neighbourhoods, solution, routeIndex, tripCosts):
(routes, loads, costs, serviceCosts, routeIndex) = self.makeSolutionList(solution)
nRoutes = len(loads)
# print('{250}',neighbourhoods)
# print('{251}',len(neighbourhoods))
neighbourhoods.sort()
tabuMovePositions = {}.fromkeys(range(nRoutes))
for i in range(nRoutes):
tabuMovePositions[i] = []
movesToMake = []
tempTripCosts = deepcopy(tripCosts)
tempCosts = deepcopy(costs)
tempLoads = deepcopy(loads)
tempServiceCosts = deepcopy(serviceCosts)
totalCostChange = 0
for move in neighbourhoods:
self.captureNeighbourStats2(move[0], move[-2])
if move[-1] == 'oneRoute':
moveInfo = move[1]
routeI = moveInfo['routes']
actualI = routeIndex[routeI][0]
costDeltaI = moveInfo['costDelta']
if self.checkSingleRouteMoves(move, tabuMovePositions):
movesToMake.append(move)
tabuMovePositions[routeI] += moveInfo['tabus']
tempCosts[routeI] += costDeltaI
totalCostChange += costDeltaI
tempTripCosts[actualI] += costDeltaI
self.captureNeighbourStats(move[0], move[-2])
elif move[-1] == 'twoRoutes':
moveInfo = move[1]
(routeI, routeJ) = moveInfo['routes']
actualI = routeIndex[routeI][0]
actualJ = routeIndex[routeJ][0]
(deltaLoadI, deltaLoadJ) = moveInfo['loadDelta']
(costDeltaI, costDeltaJ) = moveInfo['costDelta']
if (actualI == actualJ) & (tempLoads[routeI] + deltaLoadI <= self.Capacity) & (tempLoads[routeJ] + deltaLoadJ <= self.Capacity):
if self.checkTwoRouteMoves(move, tabuMovePositions):
movesToMake.append(move)
input = (moveInfo, routeIndex, tempLoads, tempCosts, tempTripCosts, tempServiceCosts, tabuMovePositions, totalCostChange)
output = self.updateMoveInfo(input)
(tempLoads, tempCosts, tempTripCosts, tempServiceCosts, tabuMovePositions, totalCostChange) = output
self.captureNeighbourStats(move[0], move[-2])
elif (tempLoads[routeI] + deltaLoadI <= self.Capacity) & (tempLoads[routeJ] + deltaLoadJ <= self.Capacity) & (tempTripCosts[actualI] + costDeltaI <= self.maxTrip) & (tempTripCosts[actualJ] + costDeltaJ <= self.maxTrip):
if self.checkTwoRouteMoves(move, tabuMovePositions):
movesToMake.append(move)
input = (moveInfo, routeIndex, tempLoads, tempCosts, tempTripCosts, tempServiceCosts, tabuMovePositions, totalCostChange)
output = self.updateMoveInfo(input)
(tempLoads, tempCosts, tempTripCosts, tempServiceCosts, tabuMovePositions, totalCostChange) = output
self.captureNeighbourStats(move[0], move[-2])
return(movesToMake,tempLoads,tempCosts,tempServiceCosts,totalCostChange, tempTripCosts)
#===============================================================================
#
#===============================================================================
class makeAllMoves(object):
def __init__(self, info):
self.info = info
self.printOutput = True
self.outputLines = []
def singleRouteMove(self, move, routeI, iPosActual, jPosActual, positionList):
iPos = positionList[iPosActual]
jPos = positionList[jPosActual]
if move[-2] == 'RemoveInsertAllArcs':
routeI = self.RNsingleRoute.removeInsertGivenArcs(routeI, iPos, jPos)
if iPos < jPos:
for i in range(iPosActual+1,jPosActual):
positionList[i] += -1
elif iPos > jPos:
for i in range(jPosActual, iPosActual-1):
positionList[i] += 1
elif move[-2] == 'ExchangeAllArcs':
routeI = self.RNsingleRoute.exchangeGivenArcs(routeI, iPos, jPos)
elif move[-2] == 'RemoveInsertAllDoubleArcs':
routeI = self.RNsingleRoute.removeInsertGivenDoubleArcs(routeI, iPos, jPos)
if iPos < jPos:
for i in range(iPosActual+1,jPosActual+1):
positionList[i] += -2
elif iPos > jPos:
for i in range(jPosActual, iPosActual-1):
positionList[i] += 2
elif move[-2] == 'ExchangeAllDoubleArcs':
routeI = self.RNsingleRoute.exchangeGivenDoubleArcs(routeI, iPos, jPos)
return(routeI, positionList)
def doubleRouteMove(self, move, routeI, routeJ, iPosActual, jPosActual, positionListI, positionListJ):
posI, posJ = positionListI[iPosActual], positionListJ[jPosActual]
if (move[-2] == 'RemoveInsertAllArcsAllRoutes'):
(routeI, routeJ) = self.RNmultiRoute.removeInsertGivenArcsTwoRoutes(routeI, routeJ, posI, posJ)
for i in range(iPosActual, len(positionListI)):
positionListI[i] += -1
for i in range(jPosActual, len(positionListJ)):
positionListJ[i] += 1
elif (move[-2] == 'ExchangeAllArcsAllRoutes'):
(routeI, routeJ) = self.RNmultiRoute.exchangeGivenArcsTwoRoutes(routeI, routeJ, posI, posJ)
elif (move[-2] == 'RemoveInsertAllDoubleArcsAllRoutes'):
(routeI, routeJ) = self.RNmultiRoute.removeInsertGivenDoubleArcsTwoRoutes(routeI, routeJ, posI, posJ)
for i in range(iPosActual+1, len(positionListI)):
positionListI[i] += -2
for i in range(jPosActual, len(positionListJ)):
positionListJ[i] += 2
elif (move[-2] == 'ExchangeAllDoubleArcsAllRoutes'):
(routeI, routeJ) = self.RNmultiRoute.exchangeGivenDoubleArcsTwoRoutes(routeI, routeJ, posI, posJ)
return(routeI, routeJ, positionListI, positionListJ)
def makeMoves(self, solution, movesToMake):
format = '%s%*d%*s%*d%*s%s'
(routes, loads, costs, serviceCosts, routeIndex) = self.makeSolutionList(solution)
self.temp = (loads, costs, serviceCosts)
nRoutes = len(routes)
positionDict = {}.fromkeys(range(nRoutes))
for i in range(nRoutes):
positionDict[i] = range(len(routes[i]))
itteration = 0
for move in movesToMake:
moveInfo = move[1]
itteration += 1
if move[-1] == 'oneRoute':
routeI = moveInfo['routes']
(iPos, jPos) = moveInfo['pos']
positionList = positionDict[routeI]
(routes[routeI], positionDict[routeI]) = self.singleRouteMove(move, routes[routeI], iPos, jPos, positionList)
elif move[-1] == 'twoRoutes':
(routeI, routeJ) = moveInfo['routes']
(iPos, jPos) = moveInfo['pos']
positionListI, positionListJ = positionDict[routeI], positionDict[routeJ]
(routes[routeI], routes[routeJ], positionDict[routeI], positionDict[routeJ]) = self.doubleRouteMove(move, routes[routeI], routes[routeJ], iPos, jPos, positionListI, positionListJ)
if self.printOutput:
line = format %(' ',3,itteration,10,'Delta : ',5,move[0],5,' ',move[-2])
self.outputLines.append(line)
print(line)
return(routes)
#===============================================================================
#
#===============================================================================
class LocalSearchIFs(ImplementLocalSearchIFs, makeAllMoves, checkAllMoves):
def __init__(self, info, timeIt = False):
self.info = info
ImplementLocalSearchIFs.__init__(self, self.info)
makeAllMoves.__init__(self, self.info)
checkAllMoves.__init__(self, self.info)
self.gen_neighbours = gen_neighbours_beta1.genAllNeighbourhoods(self.info)
self.RNsingleRoute = gen_neighbours_beta1.SingleRouteRemoveInsertProcedure(info)
self.RNmultiRoute = gen_neighbours_beta1.MultipleRouteRemoveInsertProcedure(info)
self.neighbourSearchStrategy = 'MultiLocalSearch'
#--------------------
self.reduceRoutes = True
if self.reduceRoutes:
self.reduce = RED.ReduceNumberOfVehicleRoutes(self.info)
self.reduce.printOutput = False
#--------------------
self.redetermineIFs = True
if self.redetermineIFs:
self.retFit = refitIFs.refitIFs(self.info)
#--------------------
self.captureStats = True
self.neighbourhoodSearchStats = {'RemoveInsertAllArcs' :{'Executions':0, 'Total saving':0},
'ExchangeAllArcs' :{'Executions':0, 'Total saving':0},
'RemoveInsertAllDoubleArcs' :{'Executions':0, 'Total saving':0},
'ExchangeAllDoubleArcs' :{'Executions':0, 'Total saving':0},
'RemoveInsertAllArcsAllRoutes' :{'Executions':0, 'Total saving':0},
'ExchangeAllArcsAllRoutes' :{'Executions':0, 'Total saving':0},
'RemoveInsertAllDoubleArcsAllRoutes' :{'Executions':0, 'Total saving':0},
'ExchangeAllDoubleArcsAllRoutes' :{'Executions':0, 'Total saving':0},
'ReduceRoutes' :{'Executions':0, 'Total saving':0},
'DeterminIFs' :{'Executions':0, 'Total saving':0}}
self.neighbourhoodSearchStats2 = {'RemoveInsertAllArcs' :{'Executions':0, 'Total saving':0},
'ExchangeAllArcs' :{'Executions':0, 'Total saving':0},
'RemoveInsertAllDoubleArcs' :{'Executions':0, 'Total saving':0},
'ExchangeAllDoubleArcs' :{'Executions':0, 'Total saving':0},
'RemoveInsertAllArcsAllRoutes' :{'Executions':0, 'Total saving':0},
'ExchangeAllArcsAllRoutes' :{'Executions':0, 'Total saving':0},
'RemoveInsertAllDoubleArcsAllRoutes' :{'Executions':0, 'Total saving':0},
'ExchangeAllDoubleArcsAllRoutes' :{'Executions':0, 'Total saving':0},
'ReduceRoutes' :{'Executions':0, 'Total saving':0},
'DeterminIFs' :{'Executions':0, 'Total saving':0}}
#--------------------
self.printOutput = True
self.outputLines = []
def captureStatsDef(self, oldSolution, newSolution, statType):
if self.captureStats:
if newSolution < oldSolution:
self.neighbourhoodSearchStats[statType]['Executions'] += 1
self.neighbourhoodSearchStats[statType]['Total saving'] += newSolution - oldSolution
def intialiseLocalSearch(self, solution):
solutionOldCost = solution['Total cost']
if self.reduceRoutes:
(solution, reduced, nTrips) = self.reduce.variableReduction(solution)
self.captureStatsDef(solutionOldCost, solution['Total cost'], 'ReduceRoutes')
solutionOldCost = solution['Total cost']
if self.printOutput:
self.outputLines += self.reduce.outputLines
self.reduce.outputLines = []
if self.redetermineIFs:
solution = self.retFit.IFplacementLocalSearch(solution)
self.captureStatsDef(solutionOldCost, solution['Total cost'], 'DeterminIFs')
if self.printOutput:
self.outputLines += self.retFit.outputLines
self.retFit.outputLines = []
return(solution)
def performFullLocalSearch(self, solution):
format = '%s%*d%*s%*d%*s%s'
(routes, routeIndex) = self.makeRouteList(solution)
(loads) = self.makeLoadList(solution)
(costs, tripCosts) = self.makeCostList(solution)
neighbours = self.gen_neighbours.genBestNeighbour(routes, loads, routeIndex, tripCosts)
movesToMake = False
if neighbours:
solutionOldCost = solution['Total cost']
(solution, legalMove) = self.changeSingleSolution(solution, neighbours[0], routeIndex)
solutionNewCost = solution['Total cost']
self.captureStatsDef(solutionOldCost, solutionNewCost, neighbours[0][-2])
if self.printOutput:
line = format %(' ',3,self.iteration,10,'Delta : ',5,neighbours[0][0],5,' ',neighbours[0][-2])
self.outputLines.append(line)
print(line)
movesToMake = True
return(movesToMake, solution)
def performFastLocalSearch(self, solution):
format = '%s%*d%*s%*d%*s%s'
(routes, routeIndex) = self.makeRouteList(solution)
(loads) = self.makeLoadList(solution)
(costs, tripCosts) = self.makeCostList(solution)
neighbours = self.gen_neighbours.genFirstNeighbourOrdered(routes, loads, routeIndex, tripCosts)
movesToMake = False
if neighbours:
solutionOldCost = solution['Total cost']
(solution, legalMove) = self.changeSingleSolution(solution, neighbours[0], routeIndex)
solutionNewCost = solution['Total cost']
self.captureStatsDef(solutionOldCost, solutionNewCost, neighbours[0][-2])
if self.printOutput:
line = format %(' ',3,self.iteration,10,'Delta : ',5,neighbours[0][0],5,' ',neighbours[0][-2])
self.outputLines.append(line)
print(line)
movesToMake = True
return(movesToMake, solution)
def performFastLocalSearch2(self, solution):
format = '%s%*d%*s%*d%*s%s'
(routes, routeIndex) = self.makeRouteList(solution)
(loads) = self.makeLoadList(solution)
(costs, tripCosts) = self.makeCostList(solution)
neighbours = self.gen_neighbours.genFirstNeighbour(routes, loads, routeIndex, tripCosts)
movesToMake = False
if neighbours:
solutionOldCost = solution['Total cost']
(solution, legalMove) = self.changeSingleSolution(solution, neighbours[0], routeIndex)
solutionNewCost = solution['Total cost']
self.captureStatsDef(solutionOldCost, solutionNewCost, neighbours[0][-2])
if self.printOutput:
line = format %(' ',3,self.iteration,10,'Delta : ',5,neighbours[0][0],5,' ',neighbours[0][-2])
self.outputLines.append(line)
print(line)
movesToMake = True
return(movesToMake, solution)
def performMulitLocalSearch(self, solution):
format = '%*s%*d%*s%*d%*s%*d%*s%*d'
(routes, routeIndex) = self.makeRouteList(solution)
(loads) = self.makeLoadList(solution)
(costs, tripCosts) = self.makeCostList(solution)
self.nRoutes = len(routes)
neighbours = self.gen_neighbours.genMulitNeighbour(routes,loads)
(movesToMake, tempLoads, tempCosts, tempServiceCosts, totalCostChange, tripCostsTemp) = self.chechMoves(neighbours, solution, routeIndex, tripCosts)
self.temp = tempServiceCosts
if movesToMake:
if self.printOutput:
line = format %(5,'-',5,self.iteration,18,' # of neighbours : ',3,len(neighbours),18,'# of moves : ',3,len(movesToMake),25,'Total delta change : ',5,totalCostChange)
self.outputLines.append(line)
print(line)
routes = self.makeMoves(solution, movesToMake)
solution = self.makeSolutionDictionaryIFs(solution, routes, tempLoads, tempCosts, routeIndex, tripCostsTemp)
if self.printOutput:
line = ''
self.outputLines.append(line)
print(line)
return(movesToMake, solution)
def localSearch(self, solutionOld):
format = '%*s%*d%*s%*d%*s%*d%*s%*d'
self.iteration = 0
solution = self.intialiseLocalSearch(solutionOld)
while True:
self.iteration += 1
if self.neighbourSearchStrategy == 'MultiLocalSearch':
(movesToMake, solution) = self.performMulitLocalSearch(solution)
elif self.neighbourSearchStrategy == 'FastLocalSearch':
(movesToMake, solution) = self.performFastLocalSearch(solution)
elif self.neighbourSearchStrategy == 'FastLocalSearch2':
(movesToMake, solution) = self.performFastLocalSearch(solution)
elif self.neighbourSearchStrategy == 'FullLocalSearch':
(movesToMake, solution) = self.performFullLocalSearch(solution)
if not movesToMake:break
solution = self.intialiseLocalSearch(solution)
nTrips = len(solution.keys())-1
return(solution, nTrips)
#===============================================================================
#
#===============================================================================
def unitTest(info, solution):
import testSolutions
FS = LocalSearchIFs(info)
# FS.neighbourSearchStrategy = 'MultiLocalSearch'
FS.neighbourSearchStrategy = 'MultiLocalSearch'
#FS.neighbourSearchStrategy = 'FullLocalSearch'
if not solution[1].get('Subtrip cost'):
solution = transformSolution.transformSolution(info).newRoute(solution)
er = testSolutions.testSolution(info, solution)
er.checkFeasibilityIFs()
er.testReportSolutionIFs()
print('')
t1 = time.clock()
(solution, nTrips) = FS.localSearch(solution)
e1 = time.clock() - t1
print(e1)
print('')
er = testSolutions.testSolution(info, solution)
er.checkFeasibilityIFs()
er.testReportSolutionIFs()
if __name__ == "__main__":
import cPickle
import LancommeARPconversions3 as LARP
# import psyco
# psyco.full()
fileName1 = 'cen_IF_ProblemInfo/Centurion_a_pickled.dat'
info1 = LARP.ReadProblemDataIFs(fileName1)
s1 = open('cen_IF_Results/Centurion_a_Init_sol.txt')
solution1 = cPickle.load(s1)
unitTest(info1, solution1)
|
# -*- coding: utf-8 -*-
import platform
import sys
import pytest
LINUX = sys.platform.startswith("linux")
MACOS = sys.platform.startswith("darwin")
WIN = sys.platform.startswith("win32") or sys.platform.startswith("cygwin")
CPYTHON = platform.python_implementation() == "CPython"
PYPY = platform.python_implementation() == "PyPy"
PY2 = sys.version_info.major == 2
PY = sys.version_info
def deprecated_call():
"""
pytest.deprecated_call() seems broken in pytest<3.9.x; concretely, it
doesn't work on CPython 3.8.0 with pytest==3.3.2 on Ubuntu 18.04 (#2922).
This is a narrowed reimplementation of the following PR :(
https://github.com/pytest-dev/pytest/pull/4104
"""
# TODO: Remove this when testing requires pytest>=3.9.
pieces = pytest.__version__.split(".")
pytest_major_minor = (int(pieces[0]), int(pieces[1]))
if pytest_major_minor < (3, 9):
return pytest.warns((DeprecationWarning, PendingDeprecationWarning))
else:
return pytest.deprecated_call()
|
import argparse
import logging
from typing import Any, Dict
__all__ = ["add_options", "apply_options", "update_config"]
logger = logging.getLogger(__name__)
def add_options(parser: argparse.ArgumentParser) -> None:
"""
Add options to parser.
"""
parser.add_argument(
"opts",
help="modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
def apply_options(config: Dict[str, Any], args: argparse.Namespace) -> None:
"""
Update config using opts.
"""
try:
from yacs.config import CfgNode as CN
except ImportError as e:
logger.error(
"`apply_options` requiring yacs(version==0.1.8), "
"but not installed."
)
raise e
opts = args.opts
config_CN = CN(config)
if opts is not None and len(opts) > 0:
config_CN.merge_from_list(opts)
config.update(config_CN)
def update_config(config: Dict[str, Any], vars_dict: Dict[str, Any]) -> None:
"""Update str in config, with vars_dict.
When you want to set some lazy-compute value in config, `update_config` is
very useful.
Args:
config: input config dict.
vars_dict: key-value pair.
Examples:
config = dict(
a="python",
b="FILENAME",
c=["python", "FILENAME", "python FILENAME"],
)
vars_dict = {"FILENAME": "rust"}
update_config(config, vars_dict)
assert config["b"] == "rust"
assert config["c"][1] == "rust"
assert config["c"][2] == "python rust"
"""
def _find_and_replace(_str):
for _k, _v in vars_dict.items():
_str = _str.replace(_k, _v)
return _str
if isinstance(config, dict):
for k in config:
if isinstance(config[k], str):
config[k] = _find_and_replace(config[k])
elif isinstance(config[k], dict):
update_config(config[k], vars_dict)
elif isinstance(config[k], list):
update_config(config[k], vars_dict)
elif isinstance(config, list):
for i, _ in enumerate(config):
if isinstance(config[i], str):
config[i] = _find_and_replace(config[i])
elif isinstance(config[i], dict):
update_config(config[i], vars_dict)
elif isinstance(config[i], list):
update_config(config[i], vars_dict)
elif isinstance(config, (int, float, bool)):
pass
else:
raise ValueError(f"Unknown type: {type(config)}")
|
from model.contact import Contact
from model.group import Group
import random
from fixture.orm import ORMFixture
orm = ORMFixture(host="127.0.0.1", name="addressbook", user="root", password="")
def test_add_contact_to_group(app, db):
if len(db.get_contact_list()) == 0:
app.contact.create(Contact(first_name="first_name1", last_name="last_name1", address="address1",
home_phone="home 111", mobile_phone="mobile 3333", work_phone="work_852", secondary_phone="secondary1",
email="email11", email2="email12", email3="email13"))
if len(db.get_group_list()) == 0:
app.group.create(Group(name="name1", header="header1", footer="footer1"))
old_contacts = db.get_contact_list()
contact = random.choice(old_contacts)
old_group = db.get_group_list()
group = random.choice(old_group)
app.contact.add_contact_to_group(contact.id, group.id)
contacts_in_group = app.contact.get_contact_list_from_group_page(group.id)
orm_contacts_in_group = orm.get_contacts_in_group(group)
assert contacts_in_group == orm_contacts_in_group |
# Generated by Django 2.2.1 on 2019-10-14 05:56
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='History',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('updated', models.DateTimeField(auto_now=True)),
('created', models.DateTimeField(auto_now_add=True)),
('gameName', models.CharField(max_length=256, verbose_name='游戏名称')),
('gameID', models.CharField(blank=True, max_length=256, null=True, verbose_name='游戏ID')),
('platform', models.IntegerField(blank=True, choices=[(12, 'AG国际厅'), (27, 'AE厅'), (9, 'BBIN厅'), (30, 'CQ9厅'), (21, 'DT厅'), (23, 'PP厅'), (24, 'PS厅'), (15, 'PT厅'), (26, 'PT国际厅'), (35, 'PT亚洲厅'), (31, 'PNG厅'), (34, 'NT厅'), (17, 'MG国际厅'), (22, 'SW厅'), (18, 'TTG厅')], default=1, null=True, verbose_name='平台')),
('bet', models.DecimalField(decimal_places=2, max_digits=10, verbose_name='投注金额')),
('awards', models.DecimalField(decimal_places=2, max_digits=19, verbose_name='爆奖金额')),
('awardTime', models.DateField(verbose_name='中奖时间')),
('banner', models.ImageField(blank=True, null=True, upload_to='bigWinImgs', verbose_name='图片')),
('isH5', models.BooleanField(default=False, verbose_name='是否H5')),
('isXiqi', models.BooleanField(default=False, verbose_name='是否红包')),
('name', models.CharField(default='', max_length=20, verbose_name='中奖用户名')),
],
options={
'db_table': 'bigwin_history',
},
),
]
|
import os # path
import sys # path
import yaml # safe_load, YAMLError
import glob # glob
import importlib # import_module
import pytest # skip
import warnings # warn
# For type hints only:
from typing import Union
from types import ModuleType
from _pytest.config import Config
def getSingleFileFromName(name: str, rootdir: str) -> str:
# From your current dir, find all the files with this name:
recursive_path = os.path.abspath(os.path.join(rootdir, "**", name))
possible_paths = glob.glob(recursive_path, recursive=True)
# Make sure you got only found one config:
assert len(possible_paths) == 1, f"WRONG NUMBER OF FILES: Must have exactly one '{name}' file inside project. Found {len(possible_paths)} instead.\nBase path used to find files: {recursive_path}."
return possible_paths[0]
## Open yml/yaml File:
# Opens it and returns contents, or None if problems happen
# (Or throw if problems happen, if required == True)
def loadYamlFile(path: str, required: bool=False) -> Union[list,dict,None]:
path = os.path.abspath(path)
if not os.path.isfile(path):
error_msg = f"YAML ERROR: File not found: '{path}'."
# Throw if this is required to work, or warn otherwise
assert not required, error_msg
warnings.warn(UserWarning(error_msg))
return None
with open(path, "r") as yaml_file:
try:
yaml_dict = yaml.safe_load(yaml_file)
except yaml.YAMLError as e:
error_msg = f"YAML ERROR: Couldn't read file: '{path}'. Error '{e}'."
# Throw if this is required to work, or warn otherwise
assert not required, error_msg
warnings.warn(UserWarning(error_msg))
return None
if yaml_dict is None:
error_msg = f"YAML ERROR: File is empty: '{path}'."
# Throw if this is required to work, or warn otherwise
assert not required, error_msg
warnings.warn(UserWarning(error_msg))
return yaml_dict
## Given "key1: {key2: val}", returns -> {key2: val, 'title': key1} to keep everything top-level
# Usefull with "title: {test dict}" senarios
# file and dict_desc for error reporting if something's not formated correctly
def seperateKeyVal(to_seperate: dict, file: str, dict_desc: str) -> dict:
dict_desc = dict_desc.upper()
num_test_titles = len(list(to_seperate.keys()))
assert num_test_titles == 1, f"MISFORMATTED {dict_desc}: {num_test_titles} keys found in a {dict_desc.lower()}. Only have 1, the title of the test. File: '{file}'."
# Seperate the key and val, to opperate on each individually
title, test_info = next(iter( to_seperate.items() ))
# Make sure the value 'test_info' is a dict, not a list or anything:
assert isinstance(test_info, type({})), f"MISFORMATED {dict_desc}: Contents of {dict_desc.lower()} '{title}' is not a dict, can't collaps test correctly. File: '{file}'."
# Make sure the title key isn't in use already, it's reserved:
assert "title" not in test_info, f"MISFORMATTED {dict_desc}: 'title' key found in {dict_desc.lower()} '{title}'. This key is reserved for internal use only. File: '{file}'."
# Save title to test_info. (Might seem reduntant, but this gets all test_info keys at base level, AND still saves the test title)
test_info["title"] = title
return test_info
def getPytestManagerModule(pytest_managers_path: str) -> ModuleType:
# Add the path to PYTHONPATH, so you can import pytest-managers:
sys.path.append(os.path.dirname(pytest_managers_path))
try:
# Actually import pytest-managers now:
pytest_managers_module = importlib.import_module("pytest-managers")
except ImportError as e:
assert False, f"IMPORT ERROR: Problem importing '{pytest_managers_path}'. Error '{e}'."
# Done with the import, cleanup your path:
sys.path.remove(os.path.dirname(pytest_managers_path))
return pytest_managers_module
def skipTestsOnlyRunFilter(config: Config, option: str, check_against: str, option_description: str):
# If the option exists:
if config.getoption(option) is not None:
# If you match with ANY of the values passed to 'option':
found_in_title = False
for only_run_filter in config.getoption(option):
if only_run_filter.lower() in check_against.lower():
# Found it!
found_in_title = True
break
# Check if you found it. If you didn't, skip the test:
if not found_in_title:
pytest.skip(f"{option_description} did not contain {option} param (case insensitive)")
def skipTestsDontRunFilter(config: Config, option: str, check_against: str, option_description: str):
# If the option exists:
if config.getoption(option) is not None:
# Nice thing here is, you can skip the second you find it:
for dont_run_filter in config.getoption(option):
if dont_run_filter.lower() in check_against.lower():
pytest.skip(f"{option_description} contained {option} param (case insensitive)")
def skipTestsIfNecessary(config: Config, test_name: str, file_name: str, test_type: str) -> None:
# If they want to skip EVERYTHING:
if config.getoption("--skip-all"):
pytest.skip("Skipping ALL tests. (--skip-all cli arg was found).")
### ONLY/DONT RUN NAME:
# If they only want to run something, based on the test title:
skipTestsOnlyRunFilter(config, "--only-run-name", test_name, "Title of test")
# If they DONT want to run something, based on test title:
skipTestsDontRunFilter(config, "--dont-run-name", test_name, "Title of test")
### ONLY/DONT RUN FILE:
# If they only want to run something, based on the file name:
skipTestsOnlyRunFilter(config, "--only-run-file", file_name, "Name of file")
# If they DONT want to run something, based on file name:
skipTestsDontRunFilter(config, "--dont-run-file", file_name, "Name of file")
### ONLY/DONT RUN TYPE:
# If they only want to run something, based on the test type:
skipTestsOnlyRunFilter(config, "--only-run-type", test_type, "Test type")
# If they DONT want to run something, based on test type:
skipTestsDontRunFilter(config, "--dont-run-type", test_type, "Test type")
## Validates both pytest-managers.py and pytest-config.py, then loads their methods
# and stores pointers in a dict, for tests to run from.
def loadTestTypes(pytest_managers_path: str, pytest_config_path: str) -> dict:
## Load those methods, import what's needed. Save as global (to load in each YamlItem)
pytest_config_info = loadYamlFile(pytest_config_path, required=True)
assert "test_types" in pytest_config_info, "CONFIG ERROR: Required key 'test_types' not found in 'pytest-config.yml'."
assert isinstance(pytest_config_info["test_types"], type([])), f"CONFIG ERROR: 'test_types' must be a list inside 'pytest-config.yml'. (Currently type: {type(pytest_config_info['test_types'])})."
list_of_test_types = pytest_config_info["test_types"]
pytest_managers_module = getPytestManagerModule(pytest_managers_path)
# Time to load the tests inside the config:
for ii, test_type_config in enumerate(list_of_test_types):
test_info = seperateKeyVal(test_type_config, pytest_config_path, "test type")
# If "required_keys" or "required_files" field contain one item, turn into list of that one item:
if "required_keys" in test_info and not isinstance(test_info["required_keys"], type([])):
test_info["required_keys"] = [test_info["required_keys"]]
if "required_files" in test_info and not isinstance(test_info["required_files"], type([])):
test_info["required_files"] = [test_info["required_files"]]
# If neither are used, AND you have tests after this one, warn that those tests can't be reached:
if "required_keys" not in test_info and "required_files" not in test_info and ii < (len(list_of_test_types)-1):
warnings.warn(UserWarning(f"Test type found without required_keys AND required_files used, but there are test types after this one. Tests can't pass '{test_info['title']}' and run on those."))
# Make sure test_info has required keys:
assert "method" in test_info, f"CONFIG ERROR: Required key 'method' not found in test '{test_info['title']}'. File: '{pytest_config_path}'."
# Import the method inside the module:
try:
# This makes it so you can write test_info["method_pointer"](args) to actually call the method:
test_info["method_pointer"] = getattr(pytest_managers_module, test_info["method"])
except AttributeError:
assert False, f"IMPORT ERROR: '{test_info['method']}' not found in 'pytest-managers.py'. Tried loading from: '{pytest_managers_module.__file__}'."
# Just a guarantee this field is declared, to pass into functions:
if "variables" not in test_info:
test_info["variables"] = None
# Save it:
list_of_test_types[ii] = test_info
return pytest_config_info |
from Statistics.Proportion import proportion
from Calculators.Subtraction import subtraction
from Calculators.Division import division
from Calculators.Multiplication import multiplication
def var_pop_proportion(data):
p = proportion(data)
q = subtraction(1, p)
data = [num for elem in data for num in elem]
new_data = [float(x) for x in data]
n = len(new_data)
return division(n, multiplication(p, q))
|
# -*- coding: utf-8 -*-
"""
******* 文档说明 ******
通过 SlideCutWord 脚本 提取的词出现次数统计数据 计算 n_gram 下各个新词的 凝固度、自由度
并通过 p_min(最小概率或频数)、co_min(凝固度)、h_max(自由度) 的设置阈值 找出训练样本中可能性最大的用户词典
Time: 2020/11/26 21:46
Author: ChenXin
Version: V 0.1
File: bulletDensity.py
Describe: Github link: https://github.com/Chen-X666
"""
import math
import os
import pickle
import re
import time
from math import log
import logging
from multiprocessing import Process, Queue, cpu_count
from NewWordDiscovery.tool.LOG import logger_set # 日志设置文件
logger = logging.getLogger('NLP')
#测试类
class Arguments:
# 当前文件路径 的上层路径
CWD = os.path.abspath(os.path.join(os.path.abspath(os.path.dirname(__file__)), '..'))
# 调用当前文件时的系统日期时间
Call_Time = time.strftime('%Y%m%d%H%M%S', time.localtime()) # 初始化时间, 导入此文件时间,实例化时不变
def __init__(self):
self.start_time = time.time() # 实例化时间
# 打印当前存储类中的所有参数 取值
def __repr__(self):
arg_values = '\n'.join([' {}: {}'.format(x, self.__getattribute__(x)) for x in dir(self) if x[0] != '_'])
return 'Arguments Values: \n{}'.format(arg_values)
#覆盖率
def get_den(args,keyword,corpus):
videoLength = args.videoLength
count = 0
macthall = re.finditer(r'(?im)^.*?' + re.escape(keyword) + '.*', corpus)
if macthall:
for everymatch in macthall:
count += 1
#无效数据
if videoLength==0:
count = 0
videoLength=1
return count/videoLength
#复用率
def get_rep(corpus,keyword):
num=0
macthall = re.finditer(r'(?i)(' + re.escape(keyword) + ')+' , corpus)
if macthall:
for everymatch in macthall:
#print(everymatch.group())
macthLine = re.finditer(r'(?i)' + re.escape(keyword) , everymatch.group())
if macthLine:
countnum=0
for mctline in macthLine:
countnum+=1
countnum = countnum - 1
num = num + countnum
return num
# 以信息熵的方式 计算自由度 sum(-pi*Log(pi))
def get_freedom(word_count):
"""
:param word_count: # 每个词组出现的次数 【1,2,4,6,2】
:return: 词组次数列表 熵值, 即代表词的自由度
"""
word_count_sum = sum(word_count) # 词数量
entropy = 0
for word_x in word_count:
p = word_x / word_count_sum
entropy = entropy - p * math.log(p)
return entropy
# 搜索 3 个字以上词
def search_n_word(process_i, queue_data, n_gram, args, p_min=0.00001, co_min=100, h_min=2):
"""
:param process_i:
:param queue_data:
:param n_gram:
:param args: 参数字典
:param p_min: 词出现的最小概率 (p_min = 3 整数则为频数, p_min = 0.00001 浮点数则为概率)
:param co_min: 最小凝固度,只有超过最小凝固度才继续判断自由度并进入下一步搜索
:param h_min: 最大自由度,若小于最大自由度,则认为词组不完整,为大词组中的一小部分
:return:
"""
# 日志创建设置
logger_set(path=args.path_log, s_level=args.level_s, f_level=args.level_f,
name="process%d_%d_ngram" % (process_i, n_gram))
logger_i = logging.getLogger("process%d_%d_ngram" % (process_i, n_gram))
# 多进程临时文件夹
temp_path = os.path.join(args.CWD, 'Temp')
with open(os.path.join(temp_path, 'WordCount_%s_001.tmp' % args.file_name), 'rb') as f_read_tmp:
# 读取 文本行数 及 各词组词频数
word_1_count = pickle.load(f_read_tmp)
with open(os.path.join(temp_path, 'WordCount_%s_%03d.tmp' % (args.file_name, n_gram-1)), 'rb') as f_read_tmp:
# 读取 文本行数 及 各词组词频数
word_1n_count = pickle.load(f_read_tmp)
with open(os.path.join(temp_path, 'WordCount_%s_%03d.tmp' % (args.file_name, n_gram)), 'rb') as f_read_tmp:
# 读取 文本行数 及 各词组词频数
word_n_count = pickle.load(f_read_tmp)
with open(os.path.join(temp_path, 'WordCount_%s_%03d.tmp' % (args.file_name, n_gram+1)), 'rb') as f_read_tmp:
# 读取 文本行数 及 各词组词频数
word_n1_count = pickle.load(f_read_tmp)
#载入语料
with open(os.path.join(temp_path, 'Corpus_%s.tmp' % args.file_name), 'rb') as f:
corpus = pickle.load(f)
# 所有文本字符总长度
word_count_sum = sum(word_1_count.values())
#print(sum(word_1_count.values()))
# 若设置为概率,则将其转换成数值
if isinstance(p_min, float): # 若输入为浮点型数据【0,1】 则认为其为最小概率
p_min = p_min * word_count_sum
values_list = sorted(list(word_n_count.values()), reverse=True)
word_no = 0 # 符合频数要求的词数量
for word_no, x in enumerate(values_list):
if x < p_min:
break
# 满足频数要求的词 及对应出现次数列表
word_count_list = word_n_count.most_common(word_no + 1)
logger_i.info('{:d} n_gram 满足最低概率词数量: {:d} '.format(n_gram, word_no + 1))
# 若满足最低概率词的数量 超过 Top_n 值的一半,则 Top_n 可能设置过低,将导致部分低频词无法找到
if word_no + 1 > args.top_n*0.5:
logger_i.warning('参数 top_n {} 可能设置太小!'.format(args.top_n))
# 搜索结果列表
search_result = []
# 判断每个符合频数要求的词 凝固度、 自由度 是否满足要求
for i, (word_i, word_i_count) in enumerate(word_count_list):
print('\r%2d: %-8d ' % (process_i, i), end='')
# 凝聚度
co_f = log(word_count_sum * word_i_count / ((word_1_count[word_i[0]]+1) * (word_1n_count[word_i[1:]]+1)))
co_b = log(word_count_sum * word_i_count / ((word_1n_count[word_i[:-1]]+1) * (word_1_count[word_i[-1]]+1)))
# 满足凝聚度要求的 继续计算其左右自由度
co = min(co_f, co_b) # TODO min | max | avg ??????????
if co > co_min:
front_word_num = []
back_word_num = []
# result_count_3 越大,数据越稀疏时,此方法效率越高
for word_n1_i in word_n1_count:
if word_n1_i[1:] == word_i:
front_word_num.append(word_n1_count[word_n1_i])
if word_n1_i[:-1] == word_i:
back_word_num.append(word_n1_count[word_n1_i])
front_freedom = get_freedom(front_word_num)
back_freedom = get_freedom(back_word_num)
# 计算满足自由度要求的词组的覆盖率与重复率
if min(front_freedom, back_freedom) > h_min:
den = get_den(args=args,keyword=word_i,corpus=corpus)
rep = get_rep(keyword=word_i, corpus=corpus)
search_result.append([word_i, n_gram, word_i_count, co, front_freedom, back_freedom,den,rep])
logger_i.debug('{},{},{},{:.1f},{:.3f},{:.3f}'.format
(word_i, n_gram, word_i_count, co, front_freedom, back_freedom))
# 将词搜索结果 保存到临时文件中
with open(os.path.join(temp_path, 'CandidateWordResult_%s_%d_ngram.tmp' % (args.file_name, n_gram)), 'wb') as f:
pickle.dump(search_result, f)
queue_data.put({process_i: 'OVER'})
logger_i.info('Process_i {:d} Finish! '.format(process_i))
# 搜索 2 个字组成词
def search_2_word(process_i, queue_data, args, p_min=0.00001, co_min=100, h_min=2):
"""
:param process_i:
:param queue_data:
:param args: 参数字典
:param p_min: 词出现的最小概率 (p_min = 3 整数则为频数, p_min = 0.00001 浮点数则为概率)
:param co_min: 最小凝固度,只有超过最小凝固度才继续判断自由度并进入下一步搜索
:param h_min: 最大自由度,若小于最大自由度,则认为词组不完整,为大词组中的一小部分
:return:
"""
# 日志创建设置
logger_set(path=args.path_log, s_level=args.level_s, f_level=args.level_f,
name="process%d_%d_ngram" % (process_i, 2))
logger_i = logging.getLogger("process%d_%d_ngram" % (process_i, 2))
# 多进程临时文件夹
temp_path = os.path.join(args.CWD, 'Temp')
with open(os.path.join(temp_path, 'WordCount_%s_001.tmp' % args.file_name), 'rb') as f_read_tmp:
# 读取 文本行数 及 各词组词频数
word_1_count = pickle.load(f_read_tmp)
with open(os.path.join(temp_path, 'WordCount_%s_002.tmp' % args.file_name), 'rb') as f_read_tmp:
# 读取 文本行数 及 各词组词频数
word_2_count = pickle.load(f_read_tmp)
with open(os.path.join(temp_path, 'WordCount_%s_003.tmp' % args.file_name), 'rb') as f_read_tmp:
# 读取 文本行数 及 各词组词频数
word_n1_count = pickle.load(f_read_tmp)
#载入语料
with open(os.path.join(temp_path, 'Corpus_%s.tmp' % args.file_name), 'rb') as f:
corpus = pickle.load(f)
# 所有文本字符总长度
word_count_sum = sum(word_1_count.values())
# 若设置为概率,则将其转换成数值
if isinstance(p_min, float): # 若输入为浮点型数据【0,1】 则认为其为最小概率
p_min = p_min * word_count_sum
values_list = sorted(list(word_2_count.values()), reverse=True)
word_no = 0 # 符合频数要求的词数量
for word_no, x in enumerate(values_list):
if x < p_min:
break
# 满足频数要求的词 及对应出现次数列表
word_count_list = word_2_count.most_common(word_no + 1)
logger_i.info('文本字符串总长度: {:d} '.format(word_count_sum))
logger_i.info('{:d} n_gram 满足最低概率词数量: {:d} '.format(2, word_no + 1))
# 若满足最低概率词的数量 超过 Top_n 值的一半,则 Top_n 可能设置过低,将导致部分低频词无法找到
if word_no + 1 > args.top_n*0.5:
logger_i.warning('参数 top_n {} 可能设置太小!'.format(args.top_n))
# 搜索结果列表
search_result = []
# 判断每个符合频数要求的词 凝固度、 自由度 是否满足要求
for i, (word_i, word_i_count) in enumerate(word_count_list):
print('\r%2d: %-8d ' % (process_i, i), end='')
# 凝聚度
co = log(word_count_sum * word_i_count / (word_1_count[word_i[0]] * word_1_count[word_i[1]]))
# 满足凝聚度要求的 继续计算其左右自由度
if co > co_min:
front_word_num = []
back_word_num = []
# result_count_3 越大,数据越稀疏时,此方法效率越高
for word_n1_i in word_n1_count:
#字串[1:]==关键字
#print(word_n1_i[1:])
if word_n1_i[1:] == word_i:
front_word_num.append(word_n1_count[word_n1_i])
if word_n1_i[:-1] == word_i:
back_word_num.append(word_n1_count[word_n1_i])
front_freedom = get_freedom(front_word_num)
back_freedom = get_freedom(back_word_num)
# 计算满足自由度要求的词组的覆盖率与重复率
if min(front_freedom, back_freedom) > h_min:
den = get_den(args=args,keyword=word_i, corpus=corpus)
rep = get_rep(keyword=word_i, corpus=corpus)
search_result.append([word_i, 2, word_i_count, co, front_freedom, back_freedom,den,rep])
logger_i.debug('{},{},{},{:.1f},{:.3f},{:.3f}'.format
(word_i, 2, word_i_count, co, front_freedom, back_freedom,den,rep))
# 将词搜索结果 保存到临时文件中
with open(os.path.join(temp_path, 'CandidateWordResult_%s_%d_ngram.tmp' % (args.file_name, 2)), 'wb') as f:
pickle.dump(search_result, f)
queue_data.put({process_i: 'OVER'})
logger_i.info('Process_i {:d} Finish! '.format(process_i))
def search_word(n_gram, process_i, queue_data, args, parameter):
if n_gram == 2:
search_2_word(process_i, queue_data, args, parameter[0], parameter[1], parameter[2])
elif n_gram > 2:
search_n_word(process_i, queue_data, n_gram, args, parameter[0], parameter[1], parameter[2])
# 读取多进程队列 中的数据
def read_queue_data(queue_data):
result = {}
# 循环读取队列中数据,直到读取结束
while queue_data.qsize() > 0:
value = queue_data.get(True)
result[list(value.keys())[0]] = list(value.values())[0]
return result
# 主程序
def word_discover(args, parameter, process_no=None):
"""
:param args: 参数字典
:param parameter: 搜索词的参数 n_gram: [p_min=0.00001, co_min=100, h_min=2]
:param process_no: 进程数量,默认为 None, 即 CPU核数
:return:
"""
if process_no is None:
process_no = cpu_count()
for p_i in parameter:
logger.info('{} n_gram 词出现最小概率 p_min:{} 最小凝聚度 co_min:{} 最小自由度 h_min:{} '.
format(p_i, parameter[p_i][0], parameter[p_i][1], parameter[p_i][2]))
logger.info('- ' * 30)
logger.info(' {:d} 进程 n_gram 新词发现程序开始。。。。'.format(process_no))
# 父进程创建Queue,并传给各个子进程:
queue_data = Queue(len(parameter)) # 用于数据 传输 每次抽样为一个进程
# 进程输出数据
queue_data_out = {}
# 创建进程列表
process_list = {}
# 进行多进程处理 每次最大同时运行的进行数为 设定的 process_no
for process_i, n_gram_i in enumerate(parameter):
logger.info('进程 {:d}/{:d} 进入处理池。。。'.format(process_i + 1, len(parameter)))
# 创建进程
process_list[process_i] = Process(target=search_word, args=(n_gram_i, process_i, queue_data, args,
parameter[n_gram_i]))
# 启动进程
process_list[process_i].start()
# 循环判断进行中的进程数量,完成一个进程后再加入另一个新进程
while True:
# 判断进行中的进程数量
process_alive_no = 0
for process_x in process_list.keys():
if process_list[process_x].is_alive():
process_alive_no = process_alive_no + 1
# 当少于指定进程数时,跳出循环,加入下一个进程
if process_alive_no < process_no:
break
else: # 等待 1 秒
time.sleep(1)
# 读取队列中数据 并更新输出结果字典
queue_data_out.update(read_queue_data(queue_data))
# 判断进程是否结束,等待所有进程结束
for process_i in process_list.keys():
# 进程未结束
if process_list[process_i].is_alive():
logger.info('进程 {:d} 等待结束中。。。 '.format(process_i + 1))
process_list[process_i].join(10000) # 等待进程结束,最长等待 10000 秒
# process_list[process_i].terminate() ## 强行关闭进程
time.sleep(2) # 延迟 1 秒,缓冲时延
# 读取队列中数据 并更新输出结果字典
queue_data_out.update(read_queue_data(queue_data))
# 判断 输出的数据是否与抽样次数一样
if (len(parameter)) != len(queue_data_out):
logger.warning('进程信息: {} '.format(queue_data_out))
logger.warning('警告 !!! {} 个进程尚未结束。。。。'.format(len(parameter) - len(queue_data_out)))
logger.info('进程信息: {} '.format(queue_data_out))
logger.info('- ' * 30)
if __name__ == '__main__':
# temp_path = os.path.join(os.path.abspath(os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')), 'Temp')
# file_name = 'BV1Ct411c7tQ.csv'
# with open(os.path.join(temp_path, 'WordCount_%s_002.tmp' % file_name), 'rb') as f_read_tmp:
# # 读取 文本行数 及 各词组词频数
# print(f_read_tmp)
# word_1_count = pickle.load(f_read_tmp)
# print(word_1_count)
args = Arguments()
args.videoLength = 100
args.file_name = ''
get_den(args=args,keyword='的土') |
import re
# Read the file (using a different, more python 3 way, just for fun!)
with open('../data/blackbirds.txt', 'r') as f:
text = f.read()
# replace \t's and \n's with a spaces:
text = text.replace('\t',' ')
text = text.replace('\n',' ')
# You may want to make other changes to the text.
# In particular, note that there are "strange characters" (these are accents and
# non-ascii symbols) because we don't care for them, first transform to ASCII:
text = text.encode('ascii', 'ignore') # first encode into ascii bytes
text = text.decode('ascii', 'ignore') # Now decode back to string
# Now extend this script so that it captures the Kingdom, Phylum and Species
# name for each species and prints it out to screen neatly.
# Hint: you may want to use re.findall(my_reg, text)... Keep in mind that there
# are multiple ways to skin this cat! Your solution could involve multiple
# regular expression calls (easier!), or a single one (harder!) |
import datetime
import boto3
import json
import os
# Prefix to give to training jobs, models, and endpoint configs, and endpoint name
MODEL_PREFIX = os.environ['MODEL_PREFIX']
# Name of Parameter Store key corresponding to value of last successful training job date
LAST_TRAIN_PARAM = '/models/{}/train/latest'.format(MODEL_PREFIX)
# Time interval to look for training data files to run training on
INTERVAL = int(os.environ['INTERVAL'])
# Name of bucket containing training data and to output model artifacts to
BUCKET = os.environ['BUCKET'] # Name of bucket
# Prefix of training data files in bucket
TRAIN_SET_PREFIX = os.path.join('data', MODEL_PREFIX, 'train')
# Path to location of training data files
TRAIN_SET_PATH = os.path.join('s3://', BUCKET, TRAIN_SET_PREFIX, '')
# Training manifest object key
TRAIN_MANIFEST_KEY = os.path.join(TRAIN_SET_PREFIX, 'manifest')
# Full URI of training data manifest file
TRAIN_MANIFEST_URI = os.path.join('s3://', BUCKET, TRAIN_MANIFEST_KEY)
# Path to output model artifacts to
OUTPUT_PATH = os.path.join('s3://', BUCKET, 'models', MODEL_PREFIX, '')
s3 = boto3.client('s3')
ssm = boto3.client('ssm')
def lambda_handler(event, context):
time = event['time']
print('Generating manifest...')
dates = get_dates(INTERVAL) # Get datetime strings for the dates in the interval we want to train on
filenames = ['{}.csv'.format(date) for date in dates] # Convert datetime strings to filenames
extant_objects = check_objects_exist(filenames) # Check to see if objects matching the filenames exist in S3
latest_data_upload = get_latest_date(extant_objects) # Find the date of the last data file uploaded
last_train_date = ssm.get_parameter(Name=LAST_TRAIN_PARAM)['Parameter']['Value'] # Retrieve the date of the last successful training job from Parameter Store
if (latest_data_upload is None) or (latest_data_upload <= last_train_date):
print('No new data uploaded since last training run.')
print('Skipping training until next scheduled training run.')
return {
'no_new_data': True
}
body = make_manifest(extant_objects)
print('Uploading manifest to S3...')
put_manifest(body)
return {
'time': time,
'train_manifest_uri': TRAIN_MANIFEST_URI,
's3_output_path': OUTPUT_PATH,
'last_train_param': LAST_TRAIN_PARAM,
'latest_data_upload': latest_data_upload,
'endpoint': MODEL_PREFIX,
'no_new_data': False
}
def get_dates(interval):
""" Creates datetime year-month-date strings for the input time interval.
Args:
interval (int): Time interval in days
Returns:
(list)
List of datetime strings.
"""
dates = list()
today = datetime.date.today()
for num_day in range(interval):
day = today - datetime.timedelta(days=num_day)
dates.append(day.strftime("%Y-%m-%d"))
return dates
def get_latest_date(keys):
""" Munges datetimes from a list of object keys where each key contains a datetime substring and returns latest
Args:
keys (list): List of object key strings
Returns:
(string or None)
Latest datetime munged from datetimes in keys
None if the input list of keys is empty or there are no valid datetimes munged from the keys
"""
dates = [os.path.basename(key).split('.')[0] for key in keys]
if len(dates) == 0:
return None
elif len(dates) == 1:
return dates[0]
return sorted(dates)[-1]
def check_objects_exist(filenames):
""" Checks to see if the input filenames exist as objects in S3
Args:
filenames (list): List of filename strings to check S3 for
Returns:
(list)
Filtered list containing string paths of filenames that exist as objects in S3
"""
exists = list()
for filename in filenames:
val = check_object_exists(filename)
if val is True:
exists.append(filename)
return exists
def check_object_exists(filename):
""" Checks to see if the input filename exists as object in S3
Args:
filename (string): Filename to check S3 for
Returns:
(boolean)
True if object corresponding to filename exists in S3
False otherwise
"""
key = os.path.join(TRAIN_SET_PREFIX, filename)
try:
response = s3.head_object(
Bucket=BUCKET,
Key=key
)
except Exception as e:
print('Unable to find object "{}" in bucket "{}".'.format(key, BUCKET))
print('The URI for this object will not be added to the manifest.')
return False
return True
def make_manifest(keys):
""" Creates a SageMaker S3 object json manifest from the input object keys
Args:
keys (list): S3 object keys to add to manifest
Returns:
(bytes)
UTF-8 encoded bytes object of stringified manifest json
"""
payload = list()
prefix = {'prefix': TRAIN_SET_PATH}
payload.append(prefix)
for key in keys:
payload.append(key)
return json.dumps(payload).encode()
def put_manifest(body):
""" Upload manifest body to object in S3
Args:
body (bytes): UTF-8 encoded bytes object of stringified manifest json
Returns:
(None)
"""
try:
s3.put_object(
Body=body,
Bucket=BUCKET,
ContentType='text/plain',
Key=TRAIN_MANIFEST_KEY
)
except Exception as e:
print(e)
print('Unable to put manifest to s3.')
raise(e)
|
from sqlalchemy import Column, Integer, NVARCHAR, CHAR, DateTime, ForeignKey, NVARCHAR
from sqlalchemy.orm import relationship
from database import Base
class ShopMember(Base):
__tablename__ = 'shop_member'
id = Column(Integer, primary_key=True)
name = Column(NVARCHAR(50))
email = Column(NVARCHAR(200), unique=True)
password = Column(NVARCHAR(32))
post_code = Column(NVARCHAR(5))
address = Column(NVARCHAR(255))
detail_address = Column(NVARCHAR(255))
is_admin = Column(CHAR(1))
create_date = Column(DateTime)
class Goods(Base):
__tablename__ = 'goods'
id = Column(Integer, primary_key=True)
goods_name = Column(NVARCHAR(255))
price = Column(Integer)
goods_photo = Column(NVARCHAR(255))
goods_cnt = Column(Integer)
goods_ranking = Column(Integer)
goods_description = Column(NVARCHAR(None))
class Orders(Base):
__tablename__ = 'orders'
id = Column(Integer, primary_key=True)
order_str_id = Column(NVARCHAR(100))
member = Column(Integer, ForeignKey('shop_member.id'))
order_date = Column(DateTime)
class OrdersItem(Base):
__tablename__ ='orders_item'
id = Column(Integer, primary_key=True)
goods = Column(Integer, ForeignKey('goods.id'))
goods_price = Column(Integer)
goods_cnt = Column(Integer)
class GoodsTracking(Base):
__tablename__ = 'goods_tracking'
id = Column(Integer, primary_key=True)
order_id = Column(Integer, ForeignKey('orders.id'))
delivery_start_date = Column(DateTime)
delivery_end_date = Column(DateTime)
tracking_number = Column(NVARCHAR(50))
tracking_status = Column(NVARCHAR(30))
class Basket(Base):
__tablename__ = 'basket'
id = Column(Integer, primary_key=True)
member = Column(Integer, ForeignKey('shop_member.id'))
goods = Column(Integer, ForeignKey('goods.id'))
goods_item = relationship("Goods")
goods_cnt = Column(Integer)
|
from unittest.mock import patch
import numpy as np
import pandas as pd
import pytest
import woodwork as ww
from pandas.testing import assert_frame_equal
from woodwork.logical_types import (
Boolean,
Categorical,
Datetime,
Double,
Integer,
)
from rayml.pipelines import TimeSeriesFeaturizer
ROLLING_TRANSFORM_METHOD_NAME = "_compute_rolling_transforms"
DELAYED_FEATURES_METHOD_NAME = "_compute_delays"
@pytest.fixture
def delayed_features_data():
X = pd.DataFrame(
{"feature": range(1, 32), "date": pd.date_range("2021-01-01", periods=31)}
)
y = pd.Series(range(1, 32))
return X, y
def test_delayed_features_transformer_init():
delayed_features = TimeSeriesFeaturizer(
max_delay=4,
delay_features=True,
delay_target=False,
time_index="date",
random_seed=1,
)
assert delayed_features.parameters == {
"max_delay": 4,
"delay_features": True,
"delay_target": False,
"gap": 0,
"forecast_horizon": 1,
"time_index": "date",
"conf_level": 0.05,
"rolling_window_size": 0.25,
}
@pytest.mark.parametrize("conf_level", [-0.05, 0, 1.2])
def test_delayed_features_init_raises_if_conf_level_not_in_range(conf_level):
with pytest.raises(
ValueError, match="Parameter conf_level must be in range \\(0, 1\\]"
):
TimeSeriesFeaturizer(conf_level=conf_level)
def test_delayed_features_init_raises_if_conf_level_None():
with pytest.raises(ValueError, match="Parameter conf_level cannot be None"):
TimeSeriesFeaturizer(conf_level=None)
def test_delayed_features_raises_if_time_index_None(delayed_features_data):
X, y = delayed_features_data
with pytest.raises(ValueError, match=" cannot be None"):
dft = TimeSeriesFeaturizer(time_index=None)
dft.fit_transform(X, y)
def encode_y_as_string(y):
y = y.astype("category")
y_answer = y.astype(int) - 1
y = y.map(lambda val: str(val).zfill(2))
return y, y_answer
def encode_X_as_string(X):
X_answer = X.copy()
X_answer.feature = X.feature.astype(int) - 1
# So that the encoder encodes the values in ascending order. This makes it easier to
# specify the answer for each unit test
X.feature = pd.Categorical(X.feature.map(lambda val: str(val).zfill(2)))
return X, X_answer
def encode_X_y_as_strings(X, y, encode_X_as_str, encode_y_as_str):
y_answer = y
if encode_y_as_str:
y, y_answer = encode_y_as_string(y)
X_answer = X
if encode_X_as_str:
X, X_answer = encode_X_as_string(X)
return X, X_answer, y, y_answer
@pytest.mark.parametrize("encode_X_as_str", [True, False])
@pytest.mark.parametrize("encode_y_as_str", [True, False])
@patch(
f"rayml.pipelines.components.transformers.TimeSeriesFeaturizer.{ROLLING_TRANSFORM_METHOD_NAME}",
return_value=pd.DataFrame(),
)
def test_delayed_feature_extractor_maxdelay3_forecasthorizon1_gap0(
mock_roll, encode_X_as_str, encode_y_as_str, delayed_features_data
):
X, y = delayed_features_data
X, X_answer, y, y_answer = encode_X_y_as_strings(
X, y, encode_X_as_str, encode_y_as_str
)
answer = pd.DataFrame(
{
"date": X["date"],
"feature_delay_1": X_answer.feature.shift(1),
"feature_delay_2": X_answer.feature.shift(2),
"feature_delay_3": X_answer.feature.shift(3),
"feature_delay_4": X_answer.feature.shift(4),
"target_delay_1": y_answer.shift(1),
"target_delay_2": y_answer.shift(2),
"target_delay_3": y_answer.shift(3),
"target_delay_4": y_answer.shift(4),
}
)
assert_frame_equal(
answer,
TimeSeriesFeaturizer(
max_delay=3, gap=0, forecast_horizon=1, conf_level=1.0, time_index="date"
).fit_transform(X=X, y=y),
)
answer_only_y = pd.DataFrame(
{
"date": X["date"],
"target_delay_1": y_answer.shift(1),
"target_delay_2": y_answer.shift(2),
"target_delay_3": y_answer.shift(3),
"target_delay_4": y_answer.shift(4),
}
)
assert_frame_equal(
answer_only_y,
TimeSeriesFeaturizer(
max_delay=3,
gap=0,
forecast_horizon=1,
conf_level=1.0,
time_index="date",
delay_features=False,
).fit_transform(X=X, y=y),
)
@pytest.mark.parametrize("encode_X_as_str", [True, False])
@pytest.mark.parametrize("encode_y_as_str", [True, False])
@patch(
f"rayml.pipelines.TimeSeriesFeaturizer.{ROLLING_TRANSFORM_METHOD_NAME}",
return_value=pd.DataFrame(),
)
def test_delayed_feature_extractor_maxdelay5_forecasthorizon1_gap0(
mock_roll, encode_X_as_str, encode_y_as_str, delayed_features_data
):
X, y = delayed_features_data
X, X_answer, y, y_answer = encode_X_y_as_strings(
X, y, encode_X_as_str, encode_y_as_str
)
answer = pd.DataFrame(
{
"date": X["date"],
"feature_delay_1": X_answer.feature.shift(1),
"feature_delay_2": X_answer.feature.shift(2),
"feature_delay_3": X_answer.feature.shift(3),
"feature_delay_4": X_answer.feature.shift(4),
"feature_delay_5": X_answer.feature.shift(5),
"feature_delay_6": X_answer.feature.shift(6),
"target_delay_1": y_answer.shift(1),
"target_delay_2": y_answer.shift(2),
"target_delay_3": y_answer.shift(3),
"target_delay_4": y_answer.shift(4),
"target_delay_5": y_answer.shift(5),
"target_delay_6": y_answer.shift(6),
}
)
assert_frame_equal(
answer,
TimeSeriesFeaturizer(
max_delay=5,
gap=0,
forecast_horizon=1,
conf_level=1.0,
time_index="date",
).fit_transform(X, y),
)
answer_only_y = pd.DataFrame(
{
"date": X["date"],
"target_delay_1": y_answer.shift(1),
"target_delay_2": y_answer.shift(2),
"target_delay_3": y_answer.shift(3),
"target_delay_4": y_answer.shift(4),
"target_delay_5": y_answer.shift(5),
"target_delay_6": y_answer.shift(6),
}
)
assert_frame_equal(
answer_only_y,
TimeSeriesFeaturizer(
max_delay=5,
gap=0,
forecast_horizon=1,
conf_level=1.0,
time_index="date",
delay_features=False,
).fit_transform(X=X, y=y),
)
@pytest.mark.parametrize("encode_X_as_str", [True, False])
@pytest.mark.parametrize("encode_y_as_str", [True, False])
@patch(
f"rayml.pipelines.TimeSeriesFeaturizer.{ROLLING_TRANSFORM_METHOD_NAME}",
return_value=pd.DataFrame(),
)
def test_delayed_feature_extractor_maxdelay3_forecasthorizon7_gap1(
mock_roll, encode_X_as_str, encode_y_as_str, delayed_features_data
):
X, y = delayed_features_data
X, X_answer, y, y_answer = encode_X_y_as_strings(
X, y, encode_X_as_str, encode_y_as_str
)
answer = pd.DataFrame(
{
"date": X["date"],
"feature_delay_8": X_answer.feature.shift(8),
"feature_delay_9": X_answer.feature.shift(9),
"feature_delay_10": X_answer.feature.shift(10),
"feature_delay_11": X_answer.feature.shift(11),
"target_delay_8": y_answer.shift(8),
"target_delay_9": y_answer.shift(9),
"target_delay_10": y_answer.shift(10),
"target_delay_11": y_answer.shift(11),
}
)
assert_frame_equal(
answer,
TimeSeriesFeaturizer(
max_delay=3, forecast_horizon=7, gap=1, conf_level=1.0, time_index="date"
).fit_transform(X, y),
)
answer_only_y = pd.DataFrame(
{
"date": X["date"],
"target_delay_8": y_answer.shift(8),
"target_delay_9": y_answer.shift(9),
"target_delay_10": y_answer.shift(10),
"target_delay_11": y_answer.shift(11),
}
)
assert_frame_equal(
answer_only_y,
TimeSeriesFeaturizer(
max_delay=3,
forecast_horizon=7,
gap=1,
conf_level=1.0,
time_index="date",
delay_features=False,
).fit_transform(X=X, y=y),
)
@patch(
f"rayml.pipelines.TimeSeriesFeaturizer.{ROLLING_TRANSFORM_METHOD_NAME}",
return_value=pd.DataFrame(),
)
def test_delayed_feature_extractor_numpy(mock_roll, delayed_features_data):
X, y = delayed_features_data
X, X_answer, y, y_answer = encode_X_y_as_strings(X, y, False, False)
X_np = X.values
y_np = y.values
answer = pd.DataFrame(
{
1: X["date"],
"target_delay_8": y_answer.shift(8),
"target_delay_9": y_answer.shift(9),
"target_delay_10": y_answer.shift(10),
"target_delay_11": y_answer.shift(11),
}
)
assert_frame_equal(
answer,
TimeSeriesFeaturizer(
max_delay=3, forecast_horizon=7, gap=1, conf_level=1.0, time_index=1
).fit_transform(X_np, y_np),
)
answer_only_y = pd.DataFrame(
{
"date": X["date"],
"target_delay_8": y_answer.shift(8),
"target_delay_9": y_answer.shift(9),
"target_delay_10": y_answer.shift(10),
"target_delay_11": y_answer.shift(11),
}
)
assert_frame_equal(
answer_only_y,
TimeSeriesFeaturizer(
max_delay=3,
forecast_horizon=7,
gap=1,
conf_level=1.0,
time_index="date",
delay_features=False,
).fit_transform(X=X, y=y_np),
)
@pytest.mark.parametrize(
"delay_features,delay_target", [(False, True), (True, False), (False, False)]
)
@pytest.mark.parametrize("encode_X_as_str", [True, False])
@pytest.mark.parametrize("encode_y_as_str", [True, False])
@patch(
f"rayml.pipelines.TimeSeriesFeaturizer.{ROLLING_TRANSFORM_METHOD_NAME}",
return_value=pd.DataFrame(),
)
def test_lagged_feature_extractor_delay_features_delay_target(
mock_roll,
encode_y_as_str,
encode_X_as_str,
delay_features,
delay_target,
delayed_features_data,
):
X, y = delayed_features_data
X, X_answer, y, y_answer = encode_X_y_as_strings(
X, y, encode_X_as_str, encode_y_as_str
)
all_delays = pd.DataFrame(
{
"date": X["date"],
"feature_delay_1": X_answer.feature.shift(1),
"feature_delay_2": X_answer.feature.shift(2),
"feature_delay_3": X_answer.feature.shift(3),
"feature_delay_4": X_answer.feature.shift(4),
"target_delay_1": y_answer.shift(1),
"target_delay_2": y_answer.shift(2),
"target_delay_3": y_answer.shift(3),
"target_delay_4": y_answer.shift(4),
}
)
if not delay_features:
all_delays = all_delays.drop(
columns=[c for c in all_delays.columns if "feature_" in c]
)
if not delay_target:
all_delays = all_delays.drop(
columns=[c for c in all_delays.columns if "target" in c]
)
transformer = TimeSeriesFeaturizer(
max_delay=3,
forecast_horizon=1,
delay_features=delay_features,
delay_target=delay_target,
conf_level=1.0,
time_index="date",
)
assert_frame_equal(all_delays, transformer.fit_transform(X, y))
@pytest.mark.parametrize(
"delay_features,delay_target", [(False, True), (True, False), (False, False)]
)
@pytest.mark.parametrize("encode_X_as_str", [True, False])
@pytest.mark.parametrize("encode_y_as_str", [True, False])
@patch(
f"rayml.pipelines.TimeSeriesFeaturizer.{ROLLING_TRANSFORM_METHOD_NAME}",
return_value=pd.DataFrame(),
)
def test_lagged_feature_extractor_delay_target(
mock_roll,
encode_y_as_str,
encode_X_as_str,
delay_features,
delay_target,
delayed_features_data,
):
X, y = delayed_features_data
X, X_answer, y, y_answer = encode_X_y_as_strings(
X, y, encode_X_as_str, encode_y_as_str
)
answer = pd.DataFrame({"date": X["date"]})
if delay_features:
delayed_features = pd.DataFrame(
{
"feature_delay_1": X_answer.feature.shift(1),
"feature_delay_2": X_answer.feature.shift(2),
"feature_delay_3": X_answer.feature.shift(3),
"feature_delay_4": X_answer.feature.shift(4),
}
)
answer = pd.concat([answer, delayed_features], axis=1)
if delay_target:
delayed_target = pd.DataFrame(
{
"target_delay_1": y_answer.shift(1),
"target_delay_2": y_answer.shift(2),
"target_delay_3": y_answer.shift(3),
"target_delay_4": y_answer.shift(4),
}
)
answer = pd.concat([answer, delayed_target], axis=1)
transformer = TimeSeriesFeaturizer(
max_delay=3,
forecast_horizon=1,
delay_features=delay_features,
delay_target=delay_target,
conf_level=1.0,
time_index="date",
)
assert_frame_equal(answer, transformer.fit_transform(X, y))
@pytest.mark.parametrize("encode_X_as_str", [True, False])
@pytest.mark.parametrize("encode_y_as_str", [True, False])
@pytest.mark.parametrize("data_type", ["ww", "pd"])
def test_delay_feature_transformer_supports_custom_index(
encode_X_as_str, encode_y_as_str, data_type, make_data_type, delayed_features_data
):
X, y = delayed_features_data
X, X_answer, y, y_answer = encode_X_y_as_strings(
X, y, encode_X_as_str, encode_y_as_str
)
X.index = pd.RangeIndex(50, 81)
X_answer.index = pd.RangeIndex(50, 81)
y.index = pd.RangeIndex(50, 81)
y_answer.index = pd.RangeIndex(50, 81)
answer = pd.DataFrame(
{
"date": X["date"],
"feature_delay_7": X_answer.feature.shift(7),
"feature_delay_8": X_answer.feature.shift(8),
"feature_delay_9": X_answer.feature.shift(9),
"feature_delay_10": X_answer.feature.shift(10),
"target_delay_7": y_answer.shift(7),
"target_delay_8": y_answer.shift(8),
"target_delay_9": y_answer.shift(9),
"target_delay_10": y_answer.shift(10),
},
index=pd.RangeIndex(50, 81),
)
if not encode_y_as_str and not encode_X_as_str:
rolling_features = pd.DataFrame(
{
"feature_rolling_mean": X_answer.feature.shift(7).rolling(4, 4).mean(),
"target_rolling_mean": y_answer.shift(7).rolling(4, 4).mean(),
},
index=pd.RangeIndex(50, 81),
)
rolling_features_target_only = rolling_features
elif encode_y_as_str and not encode_X_as_str:
rolling_features = pd.DataFrame(
{
"feature_rolling_mean": X_answer.feature.shift(7).rolling(4, 4).mean(),
},
index=pd.RangeIndex(50, 81),
)
rolling_features_target_only = rolling_features
elif not encode_y_as_str and encode_X_as_str:
rolling_features = pd.DataFrame(
{
"target_rolling_mean": y_answer.shift(7).rolling(4, 4).mean(),
},
index=pd.RangeIndex(50, 81),
)
rolling_features_target_only = pd.DataFrame(
{
"target_rolling_mean": y_answer.shift(7).rolling(4, 4).mean(),
},
index=pd.RangeIndex(50, 81),
)
else:
rolling_features = pd.DataFrame()
rolling_features_target_only = pd.DataFrame()
X = make_data_type(data_type, X)
y = make_data_type(data_type, y)
answer = ww.concat_columns([answer, rolling_features])
assert_frame_equal(
answer,
TimeSeriesFeaturizer(
max_delay=3,
forecast_horizon=7,
conf_level=1.0,
rolling_window_size=1.0,
time_index="date",
).fit_transform(X, y),
)
answer_only_y = pd.DataFrame(
{
"date": X["date"],
"target_delay_7": y_answer.shift(7),
"target_delay_8": y_answer.shift(8),
"target_delay_9": y_answer.shift(9),
"target_delay_10": y_answer.shift(10),
},
index=pd.RangeIndex(50, 81),
)
answer_only_y = ww.concat_columns([answer_only_y, rolling_features_target_only])
assert_frame_equal(
answer_only_y,
TimeSeriesFeaturizer(
max_delay=3,
forecast_horizon=7,
conf_level=1.0,
time_index="date",
rolling_window_size=1.0,
delay_features=False,
).fit_transform(X=X, y=y),
)
def test_delay_feature_transformer_multiple_categorical_columns(delayed_features_data):
X, y = delayed_features_data
X, X_answer, y, y_answer = encode_X_y_as_strings(X, y, True, True)
X["feature_2"] = pd.Categorical(["a"] * 10 + ["aa"] * 10 + ["aaa"] * 10 + ["aaaa"])
X_answer["feature_2"] = pd.Series([0] * 10 + [1] * 10 + [2] * 10 + [3])
answer = pd.DataFrame(
{
"date": X["date"],
"feature_delay_11": X_answer.feature.shift(11),
"feature_delay_12": X_answer.feature.shift(12),
"feature_2_delay_11": X_answer.feature_2.shift(11),
"feature_2_delay_12": X_answer.feature_2.shift(12),
"target_delay_11": y_answer.shift(11),
"target_delay_12": y_answer.shift(12),
}
)
assert_frame_equal(
answer,
TimeSeriesFeaturizer(
max_delay=1,
forecast_horizon=9,
gap=2,
conf_level=1.0,
time_index="date",
).fit_transform(X, y),
)
def test_delay_feature_transformer_y_is_none(delayed_features_data):
X, _ = delayed_features_data
answer = pd.DataFrame(
{
"date": X["date"],
"feature_delay_11": X.feature.shift(11),
"feature_delay_12": X.feature.shift(12),
"feature_rolling_mean": X.feature.shift(11).rolling(2, 2).mean(),
}
)
assert_frame_equal(
answer,
TimeSeriesFeaturizer(
max_delay=1,
forecast_horizon=11,
conf_level=1.0,
rolling_window_size=1.0,
time_index="date",
).fit_transform(X, y=None),
)
def test_delayed_feature_transformer_does_not_modify_input_data(delayed_features_data):
X, _ = delayed_features_data
expected = X.copy()
_ = TimeSeriesFeaturizer(
max_delay=1,
forecast_horizon=11,
conf_level=1.0,
time_index="date",
).fit_transform(X, y=None)
assert_frame_equal(X, expected)
@pytest.mark.parametrize("rolling_window_size", [0.1, 0.2, 0.5, 0.75, 1.0])
@pytest.mark.parametrize("gap", [0, 1, 2])
@pytest.mark.parametrize("forecast_horizon", [1, 5, 10])
@pytest.mark.parametrize("max_delay", [1, 3, 9])
@patch(
f"rayml.pipelines.TimeSeriesFeaturizer.{DELAYED_FEATURES_METHOD_NAME}",
)
def test_time_series_featurizer_rolling_mean(
mock_delay,
max_delay,
forecast_horizon,
rolling_window_size,
gap,
delayed_features_data,
):
X, y = delayed_features_data
mock_delay.return_value = X
output = TimeSeriesFeaturizer(
max_delay=max_delay,
forecast_horizon=forecast_horizon,
gap=gap,
rolling_window_size=rolling_window_size,
time_index="date",
).fit_transform(X, y)
size = int(rolling_window_size * max_delay)
rolling_means = (
X.feature.shift(forecast_horizon + gap).rolling(size + 1, size + 1).mean()
)
rolling_means_target = (
y.shift(forecast_horizon + gap).rolling(size + 1, size + 1).mean()
)
expected = pd.DataFrame(
{
"date": X["date"],
"feature_rolling_mean": rolling_means,
"target_rolling_mean": rolling_means_target,
}
)
assert_frame_equal(output, expected)
@pytest.mark.parametrize("rolling_window_size", [0.1, 0.2, 0.5, 0.75, 1.0])
@pytest.mark.parametrize("gap", [0, 1, 2])
@pytest.mark.parametrize("forecast_horizon", [1, 5, 10])
@pytest.mark.parametrize("max_delay", [1, 3, 9])
def test_time_series_featurizer_does_not_need_to_delay_to_compute_means(
max_delay, forecast_horizon, gap, rolling_window_size, delayed_features_data
):
X, y = delayed_features_data
output = TimeSeriesFeaturizer(
max_delay=max_delay,
forecast_horizon=forecast_horizon,
gap=gap,
delay_features=False,
delay_target=False,
time_index="date",
rolling_window_size=rolling_window_size,
).fit_transform(X, y)
size = int(rolling_window_size * max_delay)
rolling_means = (
X.feature.shift(forecast_horizon + gap).rolling(size + 1, size + 1).mean()
)
rolling_means_target = (
y.shift(forecast_horizon + gap).rolling(size + 1, size + 1).mean()
)
expected = pd.DataFrame(
{
"date": X["date"],
"feature_rolling_mean": rolling_means,
"target_rolling_mean": rolling_means_target,
}
)
assert_frame_equal(output, expected)
@pytest.mark.parametrize("peaks", [[14, 21, 28], [32, 45], [18, 29, 56], [5, 8, 12]])
@pytest.mark.parametrize(
"significant_lags",
[[1, 2, 8, 14, 21], [1, 19, 20, 32], [14, 21, 28, 56, 18], [13, 25, 8]],
)
@patch(
"rayml.pipelines.components.transformers.preprocessing.time_series_featurizer.find_peaks"
)
@patch(
"rayml.pipelines.components.transformers.preprocessing.time_series_featurizer.acf"
)
@patch(
f"rayml.pipelines.TimeSeriesFeaturizer.{ROLLING_TRANSFORM_METHOD_NAME}",
return_value=pd.DataFrame(),
)
def test_delayed_feature_transformer_conf_level(
mock_roll, mock_acf, mock_peaks, peaks, significant_lags
):
X = pd.DataFrame(
{
"feature": np.arange(10000),
"date": pd.date_range("2021-01-01", periods=10000),
}
)
y = pd.Series(np.arange(10000))
def create_acf_return_value(y, significant_lags, peaks):
"""Create ci intervals such that significant_lags and peaks are significant."""
acf_series = np.arange(len(y))
ci = np.ones((len(y), 2))
ci[:, 0] = -1
ci[significant_lags + peaks, 0] = 0.1
ci[significant_lags + peaks, 1] = 0.3
return acf_series, ci
mock_acf.return_value = create_acf_return_value(y, significant_lags, peaks)
mock_peaks.return_value = peaks, None
MAX_DELAY = 50
FORECAST_HORIZON = 10
# Although the conf_level is hard-coded we mock the return values of
# find_peaks and acf so that we simulate different significant lags.
dft = TimeSeriesFeaturizer(
max_delay=MAX_DELAY,
forecast_horizon=FORECAST_HORIZON,
conf_level=0.05,
gap=0,
time_index="date",
)
new_X = dft.fit_transform(X, y)
first_significant_10 = [l for l in significant_lags if l < 10]
expected_lags = set(peaks).union(first_significant_10)
expected_lags = sorted(expected_lags.intersection(np.arange(MAX_DELAY + 1)))
answer = pd.DataFrame({"date": X["date"]})
answer = answer.assign(
**{
f"feature_delay_{t + FORECAST_HORIZON}": X["feature"].shift(
t + FORECAST_HORIZON
)
for t in expected_lags
}
)
answer = answer.assign(
**{
f"target_delay_{t + FORECAST_HORIZON}": y.shift(t + FORECAST_HORIZON)
for t in expected_lags
}
)
# Sort columns in alphabetical order
answer = answer.sort_index(axis=1)
assert_frame_equal(new_X, answer)
@patch(
"rayml.pipelines.components.transformers.preprocessing.time_series_featurizer.find_peaks"
)
@patch(
"rayml.pipelines.components.transformers.preprocessing.time_series_featurizer.acf"
)
@patch(
f"rayml.pipelines.TimeSeriesFeaturizer.{ROLLING_TRANSFORM_METHOD_NAME}",
return_value=pd.DataFrame(),
)
def test_delayed_feature_transformer_selects_first_lag_if_none_significant(
mock_roll,
mock_acf,
mock_peaks,
):
X = pd.DataFrame(
{
"feature": np.arange(10000),
"date": pd.date_range("2021-01-01", periods=10000),
}
)
y = pd.Series(np.arange(10000))
acf_series = np.arange(len(y))
ci = np.ones((len(y), 2))
ci[:, 0] = -1
mock_acf.return_value = acf_series, ci
mock_peaks.return_value = [], None
MAX_DELAY = 50
FORECAST_HORIZON = 10
dft = TimeSeriesFeaturizer(
max_delay=MAX_DELAY,
forecast_horizon=FORECAST_HORIZON,
conf_level=0.1,
gap=0,
time_index="date",
)
new_X = dft.fit_transform(X, y)
answer = pd.DataFrame(
{
"date": X["date"],
f"feature_delay_{1 + FORECAST_HORIZON}": X["feature"].shift(
1 + FORECAST_HORIZON
),
f"target_delay_{1 + FORECAST_HORIZON}": y.shift(1 + FORECAST_HORIZON),
}
)
assert_frame_equal(new_X, answer)
@pytest.mark.parametrize(
"X_df",
[
pd.DataFrame(pd.Series([0, 0, 3, 1] * 5, dtype="int64")),
pd.DataFrame(pd.Series([0, 0, 3.0, 2] * 5, dtype="float")),
pd.DataFrame(pd.Series(["a", "b", "a"] * 5, dtype="category")),
],
)
@pytest.mark.parametrize("fit_transform", [True, False])
@patch(
f"rayml.pipelines.TimeSeriesFeaturizer.{ROLLING_TRANSFORM_METHOD_NAME}",
return_value=pd.DataFrame(),
)
def test_delay_feature_transformer_woodwork_custom_overrides_returned_by_components(
mock_roll, X_df, fit_transform
):
y = pd.Series(range(X_df.shape[0]))
override_types = [Integer, Double, Categorical, Boolean]
for logical_type in override_types:
try:
X = X_df.copy()
X["date"] = pd.date_range("2021-01-01", periods=X.shape[0])
X.ww.init(logical_types={0: logical_type})
except (ww.exceptions.TypeConversionError, ValueError):
continue
if X.loc[:, 0].isna().all():
# Casting the fourth and fifth dataframes to datetime will produce all NaNs
continue
dft = TimeSeriesFeaturizer(
max_delay=1, forecast_horizon=1, conf_level=1.0, time_index="date"
)
if fit_transform:
transformed = dft.fit_transform(X, y)
else:
dft.fit(X, y)
transformed = dft.transform(X, y)
assert isinstance(transformed, pd.DataFrame)
transformed_logical_types = {
k: type(v) for k, v in transformed.ww.logical_types.items()
}
if logical_type in [Integer, Double, Categorical]:
assert transformed_logical_types == {
"date": Datetime,
"0_delay_1": Double,
"0_delay_2": Double,
"target_delay_1": Double,
"target_delay_2": Double,
}
elif logical_type == Boolean:
assert transformed_logical_types == {
"date": Datetime,
"0_delay_1": Double,
"0_delay_2": Double,
"target_delay_1": Double,
"target_delay_2": Double,
}
else:
assert transformed_logical_types == {
"date": Datetime,
"0_delay_1": Double,
"0_delay_2": Double,
"target_delay_1": Double,
"target_delay_2": Double,
}
|
# -*- coding: utf-8 -*-
"""
Module for functional test '22_delete' of gitcache.
Copyright:
2022 by Clemens Rabe <clemens.rabe@clemensrabe.de>
All rights reserved.
This file is part of gitcache (https://github.com/seeraven/gitcache)
and is released under the "BSD 3-Clause License". Please see the ``LICENSE`` file
that is included as part of this package.
"""
# -----------------------------------------------------------------------------
# Module Import
# -----------------------------------------------------------------------------
import logging
import os
from .helpers.test_base import TestBase
from .helpers.test_registry import functional_test
# -----------------------------------------------------------------------------
# Logger
# -----------------------------------------------------------------------------
LOG = logging.getLogger(__name__)
# -----------------------------------------------------------------------------
# Functional Test
# -----------------------------------------------------------------------------
@functional_test("22_delete")
# pylint: disable=too-few-public-methods
class DeleteTest(TestBase):
"""Test the delete mirror support."""
def test_delete(self):
"""Test the 'git delete-mirror' command."""
# Initial clone
repo = "https://github.com/seeraven/gitcache.git"
checkout = os.path.join(self._workspace.workspace_path, "gitcache")
self.assert_gitcache_ok(["git", "clone", repo, checkout])
self.assert_db_field('mirror-updates', repo, 0)
# Delete giving the URL
self.assert_gitcache_ok(["git", "delete-mirror", repo])
self.assert_db_field('mirror-updates', repo, None)
# Delete giving the path
checkout = os.path.join(self._workspace.workspace_path, "gitcache2")
self.assert_gitcache_ok(["git", "clone", repo, checkout])
self.assert_db_field('mirror-updates', repo, 0)
mirror_dir = os.path.join(self._workspace.gitcache_dir_path, "mirrors",
"github.com", "seeraven", "gitcache")
self.assert_gitcache_ok(["git", "delete-mirror", mirror_dir])
self.assert_db_field('mirror-updates', repo, None)
# Delete using 'gitcache -d' command
checkout = os.path.join(self._workspace.workspace_path, "gitcache3")
self.assert_gitcache_ok(["git", "clone", repo, checkout])
self.assert_db_field('mirror-updates', repo, 0)
self.assert_gitcache_ok(["-d", repo])
self.assert_db_field('mirror-updates', repo, None)
# Delete using 'gitcache -d' command giving the path
checkout = os.path.join(self._workspace.workspace_path, "gitcache4")
self.assert_gitcache_ok(["git", "clone", repo, checkout])
self.assert_db_field('mirror-updates', repo, 0)
self.assert_gitcache_ok(["-d", mirror_dir])
self.assert_db_field('mirror-updates', repo, None)
# Delete using invalid URL
self.assert_gitcache_fails(["-d", "https://github.com/seeraven/gatcache.git"])
# Delete using invalid path
mirror_dir = os.path.join(self._workspace.gitcache_dir_path, "mirrors",
"github.com", "seeraven", "gatcache")
self.assert_gitcache_fails(["-d", mirror_dir])
# Delete more than one mirror at once
checkout = os.path.join(self._workspace.workspace_path, "gitcache5")
self.assert_gitcache_ok(["git", "clone", repo, checkout])
self.assert_db_field('mirror-updates', repo, 0)
repo2 = "https://github.com/seeraven/submodule-example"
checkout2 = os.path.join(self._workspace.workspace_path, "submodule-example")
self.assert_gitcache_ok(["git", "clone", repo2, checkout2])
self.assert_db_field('mirror-updates', repo2, 0)
self.assert_gitcache_ok(["-d", repo, "-d", repo2])
self.assert_db_field('mirror-updates', repo, None)
self.assert_db_field('mirror-updates', repo2, None)
# -----------------------------------------------------------------------------
# EOF
# -----------------------------------------------------------------------------
|
import sys
import urllib2
from PyQt4 import QtCore, QtGui
class DownloadThread(QtCore.QThread):
data_downloaded = QtCore.pyqtSignal(object)
def __init__(self, url):
QtCore.QThread.__init__(self)
self.url = url
def run(self):
info = urllib2.urlopen(self.url).info()
self.data_downloaded.emit('%s\n%s' % (self.url, info))
class MainWindow(QtGui.QWidget):
def __init__(self):
super(MainWindow, self).__init__()
self.list_widget = QtGui.QListWidget()
self.button = QtGui.QPushButton("Start")
self.button.clicked.connect(self.start_download)
layout = QtGui.QVBoxLayout()
layout.addWidget(self.button)
layout.addWidget(self.list_widget)
self.setLayout(layout)
def start_download(self):
urls = ['http://google.com', 'http://twitter.com', 'http://yandex.ru',
'http://stackoverflow.com/', 'http://www.youtube.com/']
self.threads = []
for url in urls:
downloader = DownloadThread(url)
downloader.data_downloaded.connect(self.on_data_ready)
self.threads.append(downloader)
downloader.start()
def on_data_ready(self, data):
print data
self.list_widget.addItem(unicode(data))
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
window = MainWindow()
window.resize(640, 480)
window.show()
sys.exit(app.exec_())
|
##
# Copyright (c) 2007-2013 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from caldavclientlibrary.admin.xmlaccounts.directory import XMLDirectory
from xml.etree.ElementTree import XML
from StringIO import StringIO
from caldavclientlibrary.protocol.utils.xmlhelpers import BetterElementTree
from caldavclientlibrary.admin.xmlaccounts import recordtypes
from getpass import getpass
import getopt
class Command(object):
def __init__(self, cmdname, description):
self.path = None
self.cmdname = cmdname
self.description = description
self.recordType = None
def usage(self):
if self.allRecordsAllowed():
print """USAGE: %s [TYPE] [OPTIONS]
TYPE: One of "all", "users", "groups", "locations" or "resources". Also,
"a", "u", "g", "l" or "r" as shortcuts. Invalid or missing type is
treated as "all".
Options:
-f file path to accounts.xml
""" % (self.cmdname,)
else:
print """USAGE: %s TYPE [OPTIONS]
TYPE: One of "users", "groups", "locations" or "resources". Also,
"u", "g", "l" or "r" as shortcuts.
Options:
-f file path to accounts.xml
""" % (self.cmdname,)
def allRecordsAllowed(self):
"""
Indicates whether a command is able to operate on all record types in addition to
individual record types. Sub-classes should override this if they can handle all
record in one go.
"""
return False
def execute(self, argv):
"""
Execute the command specified by the command line arguments.
@param argv: command line arguments.
@type argv: C{list}
@return: 1 for success, 0 for failure.
@rtype: C{int}
"""
# Check first argument for type
argv = self.getTypeArgument(argv)
if argv is None:
return 0
opts, args = getopt.getopt(argv, 'f:h', ["help", ])
for name, value in opts:
if name == "-f":
self.path = value
elif name in ("-h", "--help"):
self.usage()
return 1
else:
print "Unknown option: %s." % (name,)
self.usage()
return 0
if not self.path:
print "Must specify a path."
self.usage()
return 0
if args:
print "Arguments not allowed."
self.usage()
return 0
if not self.loadAccounts():
return 0
return self.doCommand()
def getTypeArgument(self, argv):
"""
Extract the user specified record type argument from the command line arguments.
@param argv: command line arguments.
@type argv: C{list}
@return: the modified arguments (if a record type is found the corresponding argument is
removed from the argv passed in).
@rtype: C{list}
"""
# Check first argument for type
if len(argv) == 0:
print "Must specify a record type."
self.usage()
return None
type = argv[0]
type = self.mapType(type)
if not type and not self.allRecordsAllowed():
print "Invalid type '%s'." % (argv[0],)
self.usage()
return None
self.recordType = type if type else recordtypes.recordType_all
if type:
return argv[1:]
else:
return argv
def mapType(self, type):
"""
Map the specified user record type input to the actual record type identifier.
@param type: user input from the command line.
@type type: C{str}
@return: identifier matching the user input, or C{None} if no match.
@rtype: L{admin.xmlaccounts.recordtypes}
"""
return {
"users" : recordtypes.recordType_users,
"u" : recordtypes.recordType_users,
"groups" : recordtypes.recordType_groups,
"g" : recordtypes.recordType_groups,
"locations": recordtypes.recordType_locations,
"l" : recordtypes.recordType_locations,
"resources": recordtypes.recordType_resources,
"r" : recordtypes.recordType_resources,
"all" : recordtypes.recordType_all,
"a" : recordtypes.recordType_all,
}.get(type, None)
def loadAccounts(self):
"""
Load the entire directory from the XML file.
"""
f = open(self.path, "r")
if not f:
print "Could not open file: %s" % (self.path,)
return 0
xmldata = f.read()
f.close()
self.directory = XMLDirectory()
self.directory.parseXML(XML(xmldata))
return 1
def writeAccounts(self):
"""
Write the entire directory to the XML file.
"""
node = self.directory.writeXML()
os = StringIO()
xmldoc = BetterElementTree(node)
xmldoc.writeUTF8(os)
f = open(self.path, "w")
if not f:
print "Could not open file: %s for writing" % (self.path,)
return 0
f.write(os.getvalue())
f.close()
return 1
def doCommand(self):
"""
Run the command. Sub-classes must implement this.
"""
raise NotImplementedError
def promptPassword(self):
"""
Prompt the user for a password.
"""
while True:
password = getpass("Password: ")
temp = getpass("Password (again): ")
if temp != password:
print "Passwords do not match. Try again."
else:
return password
def getMemberList(self, prompt, title, type):
"""
Prompt the user for a list of members.
"""
results = []
print prompt
while True:
memberType = raw_input("%s type [u/g/l/r or leave empty to stop adding %s]: " % (title, type,))
if memberType in ("u", "g", "l", "r",):
memberUid = raw_input("%s uid [leave empty to stop adding %s]: " % (title, type,))
if memberUid:
# Verify that member type exists
recordType = self.mapType(memberType)
if self.directory.containsRecord(recordType, memberUid):
results.append((recordType, memberUid,))
else:
print "Record uid: '%s 'of type: '%s' does not exist in the directory." % (memberUid, recordType,)
else:
break
elif memberType:
print "Member type must be one of 'u' (users), 'g' (groups), 'l' (locations) or 'r' (resources)."
else:
break
return results
|
# import modules
from colorexlib.colorex import ColorExGrid
# set tile grid options
options = dict()
options['source'] = ['data\\attendance.csv', 'csv']
options['title'] = 'Student Attendance'
options['subtitle'] = 'Attendance of students per month in 2018'
options['theme'] = 'default'
# create ColorEx object, passing options.
colorex_grid = ColorExGrid(options)
# create color grid in HTML
colorex_grid.to_html('attendance.html',
'templates\\template.html')
|
"""Zendown flavored Markdown. It extends Markdown with macros."""
from __future__ import annotations
import logging
import re
from typing import TYPE_CHECKING, List, NamedTuple, Optional
from mistletoe import block_token, span_token
from mistletoe.block_token import BlockToken, Document, Heading, HTMLBlock, Quote
from mistletoe.html_renderer import HTMLRenderer
from mistletoe.span_token import HTMLSpan, Image, InlineCode, Link, RawText, SpanToken
from zendown.macro import Context, Kind, MacroError
from zendown.tokens import Token, link, raw_text, strip_comments
if TYPE_CHECKING:
# pylint: disable=cyclic-import
from zendown.article import Article
def set_zfm_tokens():
"""Reset the mistletoe tokens to the ones ZFM token set.
Mistletoe is designed to use renderes as context managers, adding tokens on
entry and reseting them on exit. This doesn't work well for Zendown because
parsing and rendering are decoupled. We need to parse before rendering, but
we also might parse in the middle of rendering (in which case we don't want
to reset tokens immediately afterwards!). The hacky solution is just to call
this function before tokenizing to ensure the right tokens are there.
"""
block_token.reset_tokens()
span_token.reset_tokens()
block_token.add_token(HTMLBlock)
block_token.add_token(ExtendedHeading)
block_token.add_token(BlockMacro)
span_token.add_token(HTMLSpan)
span_token.add_token(InlineMacro)
def parse_document(raw: str) -> Document:
"""Parse a ZFM document."""
set_zfm_tokens()
doc = Document(raw)
strip_comments(doc)
return doc
def smartify(text: str) -> str:
"""Augment text with smart typography.
This replaces dumb quotes with curly quotes, "..." with ellipses, and "--"
with em dashes.
"""
text = re.sub(r"([a-zA-Z0-9.,?!;:\'\"])\"", r"\1”", text)
text = text.replace(r'"', r"“")
text = re.sub(r"([a-zA-Z0-9.,?!;:\'\"])'", r"\1’", text)
text = text.replace(r"'", r"‘")
text = text.replace(r"...", r"…")
text = text.replace(r"--", r"—")
return text
class ExtendedHeading(Heading):
"""Heading token extended with id attribute support.
Example:
# Some heading {#some-id}
This heading would have an id of "some-id".
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.identifier = None
if self.children:
last = self.children[-1]
if isinstance(last, RawText):
match = re.search(r" {#([^ }]+)}(?:$|\n)", last.content)
if match:
last.content = last.content[: match.start()]
self.identifier = match.group(1)
class InlineMacro(SpanToken):
"""Inline macro token.
Examples:
In a sentence @macroname it appears.
^^^^^^^^^^
In a sentence @macroname{argument} it appears.
^^^^^^^^^^^^^^^^^^^^
"""
pattern = re.compile(r"(?<!\\)@([a-z]+[a-z0-9]*)(?:{([^}]*)}|\b)")
parse_inner = False
def __init__(self, match):
super().__init__(match)
self.name = match.group(1)
self.arg = match.group(2)
class BlockMacro(BlockToken):
"""Block macro token.
Examples:
@macroname
@macroname{argument}
@macroname:
> This will be passed as children.
@macroname{argument}:
> This will be passed as children.
@macroname
> This is just a regular blockquote.
It is invalid to invoke the macro with a colon without providing children
(the blockquote). The renderer must check for this.
"""
pattern = re.compile(r"^@([a-z]+[a-z0-9]*)(?:{([^}]*)})?(:)?$")
name = ""
arg = ""
colon = ""
def __init__(self, result):
self.name, self.arg, self.colon, lines = result
super().__init__(lines, block_token.tokenize)
@classmethod
def start(cls, line):
match = cls.pattern.match(line)
if match is None:
return False
cls.name = match.group(1)
cls.arg = match.group(2)
cls.colon = match.group(3)
return True
@classmethod
def read(cls, lines):
next(lines)
line_buffer = []
if cls.colon:
for line in lines:
if not Quote.start(line):
break
line_buffer.append(line)
return cls.name, cls.arg, cls.colon, line_buffer
class RenderOptions(NamedTuple):
"""Options for rendering ZFM."""
shift_headings_by: int = 0
exclude_images: bool = False
class ZFMRenderer(HTMLRenderer):
"""Renderer from ZFM to HTML."""
def __init__(self, ctx: Context, options: RenderOptions):
super().__init__(ExtendedHeading, BlockMacro, InlineMacro)
ctx.renderer = self
self.ctx = ctx
self.options = options
self.inline_code_macro = ctx.project.cfg["inline_code_macro"]
self.smart_typography = ctx.project.cfg["smart_typography"]
self.image_links = ctx.project.cfg["image_links"]
self.image_title_from_alt = ctx.project.cfg["image_title_from_alt"]
def error(self, message: str) -> str:
"""Log an error and render it."""
logging.error("%s: %s", self.ctx.article.path, message)
return self.render_error(message)
@staticmethod
def render_error(message: str) -> str:
return f'<span style="color: red; font-weight: bold">[{message}]</span>'
def render(self, token: Token) -> str:
error = getattr(token, "zfm_error", None)
if error is not None:
return self.render_error(error)
return super().render(token)
def run_macro(
self, name: str, arg: str, block: Optional[List[BlockToken]], kind: Kind
) -> str:
macro = self.ctx.project.get_macro(name)
if not macro:
return self.error(f"{name}: undefined macro")
if macro.kind is not kind:
actual = macro.kind.name.lower()
called = kind.name.lower()
return self.error(f"{name}: {actual} macro invoked as {called} macro")
try:
return macro(self.ctx, arg, block)
except MacroError as ex:
return self.error(f"{name}: {ex}")
def render_inline_macro(self, token: InlineMacro) -> str:
return self.run_macro(token.name, token.arg, None, Kind.INLINE)
def render_inline_code(self, token: InlineCode) -> str:
if self.inline_code_macro:
text = token.children[0].content
return self.run_macro(self.inline_code_macro, text, None, Kind.INLINE)
return super().render_inline_code(token)
def render_block_macro(self, token: BlockMacro) -> str:
block = None
if token.children:
assert isinstance(token.children[0], Quote)
block = token.children[0].children
elif token.colon:
return self.error(f"{token.name}: missing blockquote after colon")
if token.name == "include":
if token.children:
return self.error("include: macro does not take blockquote")
doc = token.zfm_include.doc
return self.render_inner(doc)
return self.run_macro(token.name, token.arg, block, Kind.BLOCK)
def render_extended_heading(self, token: Heading) -> str:
# TODO: Make this driven by the builder.
if self.ctx.builder.name == "latex":
template = "<h{level} id={id}>{inner}</h{level}>"
identifier = f"{self.ctx.article.node.ref}:{token.identifier}"
else:
template = (
'<a id="{id}" data-hs-anchor="true"></a><h{level}>{inner}</h{level}>'
)
identifier = token.identifier
level = max(1, min(6, token.level + self.options.shift_headings_by))
inner = self.render_inner(token)
return template.format(level=level, id=identifier, inner=inner)
def render_raw_text(self, token: RawText) -> str:
if self.smart_typography:
token.content = smartify(token.content)
return super().render_raw_text(token)
def render_link(self, token: Link) -> str:
interlink = getattr(token, "zfm_interlink", None)
if interlink:
token.target = self.ctx.builder.resolve_link(self.ctx, interlink)
if not token.children:
token.children = [raw_text(interlink.article.title)]
# Need noopener for TOC links to work in HubSpot. Otherwise it scrolls
# past a bit. Good idea in general to use noopener.
# TODO: Make this driven by the builder.
template = '<a href="{target}"{title} rel="noopener">{inner}</a>'
target = self.escape_url(token.target)
if token.title:
title = ' title="{}"'.format(self.escape_html(token.title))
else:
title = ""
inner = self.render_inner(token)
return template.format(target=target, title=title, inner=inner)
def render_image(self, token: Image) -> str:
if self.options.exclude_images:
return ""
if not getattr(token, "zfm_image_processed", False):
token.zfm_image_processed = True
asset = getattr(token, "zfm_asset", None)
if asset:
token.src = self.ctx.builder.resolve_asset(self.ctx, asset)
if self.image_links and not getattr(token, "zfm_nolink", False):
return super().render_link(link(token.src, [token]))
# TODO: Make this driven by the builder.
size = ""
width_height = getattr(token, "zfm_size", None)
if width_height:
width, height = width_height
size_style = ""
if width:
size += f' width="{width}"'
size_style += f"width:{width}px;"
if height:
size += f' height="{height}"'
size_style += f"height:{height}px;"
if size_style:
size += f' style="{size_style}"'
template = (
'<img src="{src}" alt="{alt}"{title} class="hs-image-align-none"{size} />'
)
alt = self.render_to_plain(token)
if token.title:
title = ' title="{}"'.format(self.escape_html(token.title))
elif self.image_title_from_alt:
title = ' title="{}"'.format(alt)
else:
title = ""
return template.format(src=token.src, alt=alt, title=title, size=size)
|
# Practice 01 - Python basics
# The program ask the user to enter a number
# then tell us if the number is pair or impair
def exo01():
while True:
# Catch with the try the possible exceptions that user can enter
try:
# Capture the user input
n = int(input("Enter a number : "))
# First exception if the user enter a value that is not integer
except ValueError:
print("\n!! Error : please enter an integer. !! \n")
# Second exception if the user quit the program with Ctrl+Z or Ctrl+C
except (EOFError, KeyboardInterrupt):
print("\nBYE BYE \n")
break
# If no exception, let's print the result
else:
# using the modulo operator % to know if the number is pair or impair
if n % 2 == 0:
print("\n" + str(n) + " >>> Pair \n")
else:
print("\n" + str(n) + " >>> Impair \n")
if __name__ == "__main__":
exo01()
|
#
# Copyright 2010-2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
'''
Provides source and sink blocks to interface with the UHD library.
Used to send and receive data between the Ettus Research, LLC product
line.
'''
########################################################################
# Prepare uhd swig module to make it more pythonic
########################################################################
def _prepare_uhd_swig():
try:
import uhd_swig
except ImportError:
import os
dirname, filename = os.path.split(os.path.abspath(__file__))
__path__.append(os.path.join(dirname, "..", "..", "swig"))
import uhd_swig
#some useful typedefs for the user
setattr(uhd_swig, 'freq_range_t', uhd_swig.meta_range_t)
setattr(uhd_swig, 'gain_range_t', uhd_swig.meta_range_t)
#Make the python tune request object inherit from float
#so that it can be passed in GRC as a frequency parameter.
#The type checking in GRC will accept the tune request.
#Also use kwargs to construct individual struct elements.
class tune_request_t(uhd_swig.tune_request_t, float):
def __new__(self, *args, **kwargs): return float.__new__(self)
def __float__(self): return self.target_freq
def __init__(self, *args, **kwargs):
super(tune_request_t, self).__init__(*args)
for key, val in kwargs.iteritems(): setattr(self, key, val)
setattr(uhd_swig, 'tune_request_t', tune_request_t)
#Make the python tune request object inherit from string
#so that it can be passed in GRC as a string parameter.
#The type checking in GRC will accept the device address.
#Define the set/get item special methods for dict access.
class device_addr_t(uhd_swig.device_addr_t, str):
def __new__(self, *args): return str.__new__(self)
def __getitem__(self, key): return self.get(key)
def __setitem__(self, key, val): self.set(key, val)
def __init__(self, *args, **kwargs):
super(device_addr_t, self).__init__(*args)
if args and isinstance(args[0], device_addr_t):
for key in args[0].keys(): self[key] = args[0][key]
setattr(uhd_swig, 'device_addr_t', device_addr_t)
#make the streamer args take **kwargs on init
class stream_args_t(uhd_swig.stream_args_t):
def __init__(self, *args, **kwargs):
super(stream_args_t, self).__init__(*args)
for key, val in kwargs.iteritems():
#for some reason, I can't assign a list in the constructor
#but what I can do is append the elements individually
if key == 'channels':
for v in val: self.channels.append(v)
elif key == 'args':
self.args = device_addr_t(val)
else: setattr(self, key, val)
setattr(uhd_swig, 'stream_args_t', stream_args_t)
#handle general things on all uhd_swig attributes
#Install the __str__ and __repr__ handlers if applicable
#Create aliases for uhd swig attributes to avoid the "_t"
for attr in dir(uhd_swig):
myobj = getattr(uhd_swig, attr)
if hasattr(myobj, 'to_string'): myobj.__repr__ = lambda o: o.to_string().strip()
if hasattr(myobj, 'to_pp_string'): myobj.__str__ = lambda o: o.to_pp_string().strip()
if hasattr(myobj, 'to_bool'): myobj.__nonzero__ = lambda o: o.to_bool()
if hasattr(myobj, 'to_int'): myobj.__int__ = lambda o: o.to_int()
if hasattr(myobj, 'to_real'): myobj.__float__ = lambda o: o.to_real()
if attr.endswith('_t'): setattr(uhd_swig, attr[:-2], myobj)
#make a new find devices that casts everything with the pythonized device_addr_t which has __str__
def find_devices(*args, **kwargs):
def to_pythonized_dev_addr(dev_addr):
new_dev_addr = uhd_swig.device_addr_t()
for key in dev_addr.keys(): new_dev_addr[key] = dev_addr.get(key)
return new_dev_addr
return __builtins__['map'](to_pythonized_dev_addr, uhd_swig.find_devices_raw(*args, **kwargs))
setattr(uhd_swig, 'find_devices', find_devices)
#Cast constructor args (FIXME swig handle overloads?)
for attr in ('usrp_source', 'usrp_sink', 'amsg_source'):
def constructor_factory(old_constructor):
def constructor_interceptor(*args, **kwargs):
args = list(args)
kwargs = dict(kwargs)
for index, key, cast in (
(0, 'device_addr', device_addr),
(1, 'io_type', io_type),
):
try:
if len(args) > index: args[index] = cast(args[index])
if kwargs.has_key(key): kwargs[key] = cast(kwargs[key])
except: pass
#don't pass kwargs, it confuses swig, map into args list:
for key in ('device_addr', 'stream_args', 'io_type', 'num_channels', 'msgq'):
if kwargs.has_key(key): args.append(kwargs[key])
return old_constructor(*args)
return constructor_interceptor
setattr(uhd_swig, attr, constructor_factory(getattr(uhd_swig, attr)))
#Aliases for deprecated constructors
setattr(uhd_swig, 'single_usrp_source', uhd_swig.usrp_source)
setattr(uhd_swig, 'single_usrp_sink', uhd_swig.usrp_sink)
setattr(uhd_swig, 'multi_usrp_source', uhd_swig.usrp_source)
setattr(uhd_swig, 'multi_usrp_sink', uhd_swig.usrp_sink)
########################################################################
# Initialize this module with the contents of uhd swig
########################################################################
_prepare_uhd_swig()
from uhd_swig import *
|
#67 Faça um programa que leia um número inteiro e forneça a tabuada, utilizando WHILE.
def Main067():
a = b = 0
while True:
a = int(input('Quer ver a tabuada de que valor? '))
if a <= 0:
break
for b in range (0,11):
print(f'{a} x {b} = {a*b:.0f}')
b += 1
Main067() |
from .config import YamlConfig
def get_yaml_config() -> YamlConfig:
yaml_config = YamlConfig()
yaml_config.load('./test_resources/yaml_config.yaml')
return yaml_config
def test_yaml_config_get_tasks_hello_description():
yaml_config = get_yaml_config()
assert yaml_config.get(['tasks', 'hello', 'description']) == 'say hello world'
def test_yaml_config_get_tasks_hello_description_text():
yaml_config = get_yaml_config()
try:
yaml_config.get(['tasks', 'hello', 'description', 'text'])
assert False, "Error Expected"
except Exception as e:
assert str(e) == "`say hello world` is neither list or dictionary"
def test_yaml_config_get_tasks_hello_start():
yaml_config = get_yaml_config()
assert len(yaml_config.get(['tasks', 'hello', 'start'])) == 3
def test_yaml_config_get_tasks_hello_check():
yaml_config = get_yaml_config()
try:
yaml_config.get(['tasks', 'hello', 'check'])
assert False, "Error Expected"
except Exception as e:
assert str(e) == "`ordereddict([('description', 'say hello world'), ('start', ['bash', '-c', 'echo hello world'])])` has no key `check`"
def test_yaml_config_get_tasks_hello_start_0():
yaml_config = get_yaml_config()
assert yaml_config.get(['tasks', 'hello', 'start', 0]) == 'bash'
def test_yaml_config_get_tasks_hello_start_3():
yaml_config = get_yaml_config()
try:
yaml_config.get(['tasks', 'hello', 'start', 3])
assert False, "Error Expected"
except Exception as e:
assert str(e) == "`['bash', '-c', 'echo hello world']` has no index `3`" |
import os
from collections import OrderedDict
from onegov.foundation import BaseTheme
from onegov.core.utils import module_path
HELVETICA = '"Helvetica Neue", Helvetica, Roboto, Arial, sans-serif !default;'
ARIAL = 'Arial, sans-serif !default;'
VERDANA = 'Verdana, Geneva, sans-serif !default;'
COURIER_NEW = '"Courier New", Courier, monospace !default;' # monospace
# options editable by the user
user_options = {
'primary-color': '#006fba',
'font-family-sans-serif': HELVETICA
}
default_font_families = {
'Helvetica': HELVETICA,
'Arial': ARIAL,
'Verdana': VERDANA,
'Courier New': COURIER_NEW,
}
class OrgTheme(BaseTheme):
name = 'onegov.org.foundation'
_force_compile = False
@property
def default_options(self):
options = OrderedDict((
# tile images
('tile-image-1', '"../static/homepage-images/tile-1-small.jpg"'),
('tile-image-2', '"../static/homepage-images/tile-2-small.jpg"'),
('tile-image-3', '"../static/homepage-images/tile-3-small.jpg"'),
('tile-image-4', '"../static/homepage-images/tile-4-small.jpg"'),
('tile-image-5', '"../static/homepage-images/tile-5-small.jpg"'),
('tile-image-6', '"../static/homepage-images/tile-6-small.jpg"'),
))
options.update(user_options)
return options
@property
def foundation_components(self):
return (
'grid',
'accordion',
'alert-boxes',
'block-grid',
'breadcrumbs',
'button-groups',
'buttons',
'dropdown',
'dropdown-buttons',
'forms',
'inline-lists',
'labels',
'orbit',
'pagination',
'panels',
'progress-bars',
'reveal',
'side-nav',
'switches',
'split-buttons',
'sub-nav',
'tables',
'thumbs',
'tooltips',
'top-bar',
'type',
'visibility',
)
@property
def pre_imports(self):
imports = [
'foundation-mods',
]
for font_family in self.additional_font_families:
imports.append(font_family)
return imports
@property
def post_imports(self):
return [
'org',
'chosen'
]
@property
def extra_search_paths(self):
return [
module_path('onegov.org.theme', 'styles'),
self.font_search_path
]
@property
def font_search_path(self):
""" Load fonts of the current theme folder and ignore fonts from
parent applications if OrgTheme is inherited. """
module = self.name.replace('foundation', 'theme')
return module_path(module, 'fonts')
@property
def font_families(self):
families = default_font_families.copy()
families.update(self.additional_font_families)
return families
@property
def additional_font_families(self):
""" Returns the filenames as they are to use as label in the settings
as well as to construct the font-family string.
Only sans-serif fonts are supported by now.
"""
if not os.path.exists(self.font_search_path):
return {}
def fn(n):
return n.split('.')
return {
fn(n)[0]: f'"{fn(n)[0]}", {HELVETICA}' for n in os.listdir(
self.font_search_path) if fn(n)[1] in ('css', 'scss')
}
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from .. import _utilities
import typing
# Export this package's modules as members:
from .get_cluster_node_pool import *
from .get_kubernetes_cluster import *
from .get_kubernetes_service_versions import *
from .get_registry import *
from .get_registry_scope_map import *
from .get_registry_token import *
from .group import *
from .kubernetes_cluster import *
from .kubernetes_cluster_node_pool import *
from .registry import *
from .registry_scope_map import *
from .registry_token import *
from .registry_webhook import *
from .registry_webook import *
from ._inputs import *
from . import outputs
|
'''
Created on Mar 28, 2019
@author: XBBNTNI
'''
#!/usr/bin/env python
import sys
import wx
import wx.stc as stc
# import wx.lib.scrolledpanel as scrolled
from wx.lib.wordwrap import wordwrap
from diffutil import FancySequenceMatcher
FACE1 = FACE2 = FACE3 = "Dejavu Sans Mono"
FACE_SIZE = 10
DEBUG = 1
class DiffSTC(stc.StyledTextCtrl):
def __init__(self, parent, ID):
stc.StyledTextCtrl.__init__(self, parent, ID)
self.Bind(wx.EVT_WINDOW_DESTROY, self.OnDestroy)
def OnDestroy(self, evt):
# This is how the clipboard contents can be preserved after
# the app has exited.
wx.TheClipboard.Flush()
evt.Skip()
def GetValue(self):
return self.GetText()
def SetValue(self, value):
self.SetText(value)
class PyDiff(wx.Frame):
REPLACE_STYLE = 5
INSERT_STYLE = 6
DELETE_STYLE = 6
BLANK_STYLE = 8
INTRA_STYLE = 21
def __init__(self, parent, title, fromfile, tofile, fromtext, totext):
wx.Frame.__init__(self, parent, title=title, size=(500, 500),style=wx.DEFAULT_FRAME_STYLE|wx.NO_FULL_REPAINT_ON_RESIZE)
#initialize settings
self.modify = False
self.activeLine = None
self.initDiff(fromfile, tofile, fromtext, totext)
#create GUI
self.createMenu()
self.createSplitters()
self.createToolbar()
self.sb = self.CreateStatusBar()
self.sb.SetStatusText("Press F1 for help")
#bind some application events
self.Bind(wx.EVT_CLOSE, self.QuitApplication)
#self.Bind(wx.EVT_SIZE, self.OnResize)
self.Bind(wx.EVT_SIZE, self.OnSize)
#display
self.Center()
self.Show(True)
self.Maximize()
self.rightSWindow.Scroll(0,0)
self.leftSWindow.Scroll(0,0)
def initDiff(self, fromfile, tofile, fromtext=None, totext=None):
self.leftFileName = fromfile
self.rightFileName = tofile
if fromtext is None:
fromlines = open(fromfile, 'U').readlines()
else:
fromlines = fromtext.splitlines(1)
if totext is None:
tolines = open(tofile, 'U').readlines()
else:
tolines = totext.splitlines(1)
self.diffTexts = fromlines,tolines
def OnWheel(self, event):
pos = self.rightSWindow.GetScrollPos(0)
if event.GetWheelRotation() > 0:
self.rightSWindow.Scroll(0,pos-1)
self.leftSWindow.Scroll(0,pos-1)
else:
self.rightSWindow.Scroll(0,pos+1)
self.leftSWindow.Scroll(0,pos+1)
def createSplitters(self):
# Create the splitter window.
splitter = wx.SplitterWindow(self)
splitter.SetMinimumPaneSize(1)
font = wx.Font(16, wx.SWISS, wx.NORMAL, wx.NORMAL)
if sys.platform == "darwin":
fontLines = wx.Font(FACE_SIZE-1, wx.DEFAULT, wx.NORMAL, wx.NORMAL, False, FACE3)
else:
fontLines = wx.Font(FACE_SIZE, wx.DEFAULT, wx.NORMAL, wx.NORMAL, False, FACE3)
def createTextPanel(self, splitter, scrollCallback, filename):
swindow = wx.ScrolledWindow(splitter)
swindow.SetScrollbars(20,20,55,40)
swindow.Scroll(0,0)
swindow.Bind(wx.EVT_SCROLLWIN, scrollCallback)
vbox = wx.BoxSizer(wx.VERTICAL)
hbox = wx.BoxSizer(wx.HORIZONTAL)
swindow.SetSizer(vbox)
lbl = wx.StaticText(swindow, -1, filename, (-1, -1))
linesLbl = wx.StaticText(swindow, -1, "1", (-1, -1), style=wx.ALIGN_RIGHT)
linesLbl.SetFont(fontLines)
lbl.SetFont(font)
vbox.Add(lbl, 0, flag=wx.CENTER)
view = DiffSTC(swindow, -1)
vbox.Add(hbox, 1, flag=wx.EXPAND)
hbox.Add(linesLbl, 0)
hbox.Add(view, 1, flag=wx.EXPAND)
return lbl, linesLbl, view, swindow
self.rightLbl, self.rightLinesLbl, self.rightView, self.rightSWindow = createTextPanel(self, splitter, self.OnScrollRight, self.rightFileName)
self.leftLbl, self.leftLinesLbl, self.leftView, self.leftSWindow = createTextPanel(self, splitter, self.OnScrollLeft, self.leftFileName)
#create text
self.populateText()
self.rightViewOrig = self.rightView.GetValue()
splitter.SplitVertically(self.leftSWindow, self.rightSWindow)
splitter.SetSashPosition(250, True)
self.splitter = splitter
self.leftView.Bind(wx.EVT_SCROLLWIN, self.OnScrollLeft)
self.last_left_pos = self.leftView.GetScrollPos(1)
self.last_right_pos = self.rightView.GetScrollPos(1)
self.leftView.SetUseVerticalScrollBar(False)
self.rightView.SetUseVerticalScrollBar(False)
self.leftView.Bind(wx.EVT_MOUSEWHEEL, self.OnWheel)
self.rightView.Bind(wx.EVT_MOUSEWHEEL, self.OnWheel)
self.leftView.SetReadOnly(True)
self.leftView.Bind(wx.EVT_LEFT_UP, self.OnMouseLeft)
self.rightView.Bind(wx.EVT_LEFT_UP, self.OnMouseRight)
self.rightView.Bind(wx.EVT_KEY_UP, self.OnKey)
self.rightView.Bind(wx.EVT_KEY_DOWN, self.OnKey)
def OnMouseLeft(self, event):
curpos = self.leftView.GetCurrentPos()
self.setActiveLine(self.leftView.LineFromPosition(curpos), noMove = True)
event.Skip()
def OnMouseRight(self, event):
curpos = self.rightView.GetCurrentPos()
self.setActiveLine(self.rightView.LineFromPosition(curpos), noMove = True)
event.Skip()
def OnKey(self, event):
if self.rightView.GetValue() != self.rightViewOrig:
self.modify = True
self.rightLbl.SetLabel("* " + self.rightFileName)
event.Skip()
def OnScrollLeft(self, event):
pos = event.GetPosition()
#pos = self.leftSWindow.GetScrollPos(1)
self.rightSWindow.Scroll(0,pos)
event.Skip()
def OnScrollRight(self, event):
pos = event.GetPosition()
self.leftSWindow.Scroll(0,pos)
event.Skip()
def createMenu(self):
# Set up menu bar for the program.
self.mainmenu = wx.MenuBar() # Create menu bar.
menuNames = "File Edit Navigate View Help".split()
menus = {}
for menuName in menuNames:
menu = wx.Menu()
self.mainmenu.Append(menu, menuName) # Add the project menu to the menu bar.
menus[menuName] = menu
menu = menus["File"]
item = menu.Append(wx.ID_OPEN, '&Open\tCtrl+O', '') # Append a new menu
item = menu.Append(wx.ID_NEW, '&Save\tCtrl+S', '')
self.Bind(wx.EVT_MENU, self.OnSave, item) # Create and assign a menu event.
item = menu.Append(wx.ID_EXIT, 'Save As\tCtrl+Shift+S', '')
menu.AppendSeparator()
item = menu.Append(wx.ID_EXIT, 'Reload', '')
menu.AppendSeparator()
item = menu.Append(wx.ID_EXIT, '&Quit\tCtrl+Q', '')
self.Bind(wx.EVT_MENU, self.QuitApplication, item) # Create and assign a menu event.
menu = menus["Help"]
item = menu.Append(-1, 'About wxPyDiff', '')
self.Bind(wx.EVT_MENU, self.OnInfo, item) # Create and assign a menu event.
# Attach the menu bar to the window.
self.SetMenuBar(self.mainmenu)
def OnInfo(self, event):
info = wx.AboutDialogInfo()
info.Name = "wxPyDiff"
info.Version = "0.1a"
info.Copyright = "(C) 2009 Fred Lionetti"
info.Description = wordwrap(
"A simple cross-platform diff utility made from wxPython and difflib.",
350, wx.ClientDC(self))
info.WebSite = ("http://code.google.com/p/wxpydiff/", "wxPyDiff home page")
info.Developers = [ "Fred Lionetti" ]
info.License = wordwrap("LGPL", 500, wx.ClientDC(self))
# Then we call wx.AboutBox giving it that info object
wx.AboutBox(info)
def createToolbar(self):
TBFLAGS = ( wx.TB_HORIZONTAL| wx.NO_BORDER| wx.TB_FLAT
#| wx.TB_TEXT
#| wx.TB_HORZ_LAYOUT
)
tb = self.CreateToolBar( TBFLAGS )
tsize = (16,16)
bmp = wx.ArtProvider.GetBitmap
tb.SetToolBitmapSize(tsize)
buttons = [
["Open", wx.ART_FILE_OPEN, "Open file", self.OnOpen],
["Save", wx.ART_FILE_SAVE, "Save file", self.OnSave],
["Reload", wx.ART_EXECUTABLE_FILE, "Reload files", self.OnOpen],
["Undo", wx.ART_UNDO, "Undo last change", self.OnOpen],
["Previous Difference", wx.ART_GO_UP, "Go to previous difference", self.OnUp],
["Next Difference", wx.ART_GO_DOWN, "Go to next difference", self.OnDown],
["Use theirs", wx.ART_GO_FORWARD, "Use theirs for current text block", self.OnUseTheirs],
#["Use mine", wx.ART_GO_BACK, "Use mine for current text block", self.OnOpen],
["Help", wx.ART_HELP, "Display help", self.OnOpen],
]
for btn in buttons:
name, art, help, cmd = btn
id = wx.NewId()
tb.AddLabelTool(id, name, bmp(art, wx.ART_TOOLBAR, tsize), shortHelp=name, longHelp=help)
self.Bind(wx.EVT_TOOL, cmd, id=id)
tb.Realize()
def OnUseTheirs(self, event):
if self.activeLine == None:
return
self.leftView.GotoLine(self.activeLine)
lineText = self.leftView.GetCurLine()[0]
self.rightView.GotoLine(self.activeLine)
self.rightView.LineDelete()
self.rightView.InsertText(self.rightView.GetCurrentPos(), lineText)
def OnUp(self, event):
if self.activeLine == None:
self.activeLine = self.specialLines[0]
self.specialLines.reverse()
for specialLine in self.specialLines:
if specialLine < self.activeLine:
self.setActiveLine(specialLine)
self.specialLines.reverse()
return
self.specialLines.reverse()
def OnDown(self, event):
if self.activeLine == None:
self.activeLine = self.specialLines[0]
for specialLine in self.specialLines:
if specialLine > self.activeLine:
self.setActiveLine(specialLine)
return
def OnSave(self, event):
print("do save...")
f = open(self.rightFileName, 'w')
lastPos = self.rightView.GetLineEndPosition(self.rightView.GetLineCount())
for i in range(lastPos):
if self.rightView.GetStyleAt(i) != self.BLANK_STYLE:
f.write(chr(self.rightView.GetCharAt(i)))
f.close()
print("do update...")
self.doUpdate()
def doUpdate(self):
print("init diff...")
self.initDiff()
print("pop text...")
self.populateText()
print("done!")
def populateText(self):
# set default windows end-of-line mode (\r\n)
self.leftView.SetEOLMode(wx.stc.STC_EOL_CRLF)
self.rightView.SetEOLMode(wx.stc.STC_EOL_CRLF)
self.leftView.StyleSetSpec(stc.STC_STYLE_DEFAULT, "size:%d,face:%s" % (FACE_SIZE, FACE3))
self.rightView.StyleSetSpec(stc.STC_STYLE_DEFAULT, "size:%d,face:%s" % (FACE_SIZE, FACE3))
self.leftView.StyleClearAll()
self.rightView.StyleClearAll()
leftText = ""
rightText = ""
pluses = []
minuses = []
blank_left = []
blank_right = []
lsublines = []
rsublines = []
leftBlank = 0
rightBlank = 0
lastCode = ""
subtractions = []
additions = []
modifications = []
self.leftView.StyleSetSpec(self.REPLACE_STYLE, "face:%s,fore:#000000,back:#FFFF00,size:%d" % (FACE3, FACE_SIZE))
self.rightView.StyleSetSpec(self.REPLACE_STYLE, "face:%s,fore:#000000,back:#FFFF00,size:%d" % (FACE3, FACE_SIZE))
self.leftView.StyleSetSpec(self.DELETE_STYLE, "face:%s,fore:#000000,back:#FF0000,size:%d" % (FACE3, FACE_SIZE))
self.rightView.StyleSetSpec(self.INSERT_STYLE, "face:%s,fore:#000000,back:#00FF00,size:%d" % (FACE3, FACE_SIZE))
self.leftView.StyleSetSpec(self.BLANK_STYLE, "face:%s,italic,fore:#000000,back:#BBBBBB,size:%d" % (FACE3, FACE_SIZE))
self.rightView.StyleSetSpec(self.BLANK_STYLE, "face:%s,italic,fore:#000000,back:#BBBBBB,size:%d" % (FACE3, FACE_SIZE))
self.leftView.StyleSetSpec(self.INTRA_STYLE, "face:%s,fore:#000000,back:#FDD017,size:%d" % (FACE3, FACE_SIZE))
self.rightView.StyleSetSpec(self.INTRA_STYLE, "face:%s,fore:#000000,back:#FDD017,size:%d" % (FACE3, FACE_SIZE))
lineNum = 0
additionPos = []
subtractionPos = []
modificationPos = []
intraAdds = []
intraSubs = []
lastSub = None
lastAdd = None
blankLeft = []
blankRight = []
n = 1
a, b = self.diffTexts
seq = FancySequenceMatcher(None,a, b)
groups = seq.get_opcodes()
for tag, alo, ahi, blo, bhi in groups:
if tag == "equal":
for line in b[blo:bhi]:
leftText += line
rightText += line
elif tag == "insert":
for line in b[blo:bhi]:
start = len(leftText)
leftText += " \n"
end = len(leftText)
blankLeft.append([start,end-start])
start = len(rightText)
rightText += line
end = len(rightText)
additionPos.append([start,end-start, None])
elif tag == "delete":
for line in a[alo:ahi]:
start = len(leftText)
leftText += line
end = len(leftText)
subtractionPos.append([start,end-start, None])
start = len(rightText)
rightText += "\n"
end = len(rightText)
blankRight.append([start,end-start])
elif tag == "replace":
if len(a[alo:ahi]) != len(b[blo:bhi]):
if DEBUG: import pdb; pdb.set_trace()
raise RuntimeError("Replace blocks doesn't have equal line quantities")
for linea, lineb in zip(a[alo:ahi], b[blo:bhi]):
starta = len(leftText)
leftText += linea
end = len(leftText)
subtractionPos.append([starta,end-starta, True])
startb = len(rightText)
rightText += lineb
end = len(rightText)
additionPos.append([startb,end-startb, True])
for ai, bj in seq._intraline_diffs(linea, lineb):
intraSubs.append([starta + ai[0], ai[1] - ai[0], True])
intraAdds.append([startb + bj[0], bj[1] - bj[0], True])
else:
if DEBUG: import pdb; pdb.set_trace()
raise RuntimeError("Diff operation unknown: %s" % tag)
def updateLinesNumbers(ed, lbl, greyStyle):
lines = ""
i = 0
for line in range(ed.GetLineCount()):
if ed.GetStyleAt(ed.PositionFromLine(line)) != self.BLANK_STYLE:
i += 1
lines += "%d\n"%i
# TODO: use MARGINSETTEXT
else:
lines += "\n"
lbl.SetLabel(lines)
def setupStyle(self, ed, marker, markerColor, linesLbl, blankLines, diffList, intraDiffs):
ed.StartStyling(0, 0xff)
styleid = 20
ed.StyleSetSpec(styleid, "face:%s,fore:#000000,back:#FFFFFF,size:%d" % (FACE3, FACE_SIZE))
ed.SetStyling(ed.GetLength(), styleid)
markerStyleId = 2
markerStyleIdMod = 4
ed.MarkerDefine(markerStyleId, marker, markerColor, markerColor)
ed.MarkerDefine(markerStyleIdMod, wx.stc.STC_MARK_CHARACTER+ord("!"), "dark yellow", "light gray")
#add diffs and red minus signs
for pos in diffList:
start, delta, modified = pos
ed.StartStyling(start, 0xff)
if not modified:
ed.SetStyling(delta-1, self.INSERT_STYLE)
ed.MarkerAdd(ed.LineFromPosition(start), markerStyleId)
else:
ed.SetStyling(delta-1, self.REPLACE_STYLE)
ed.MarkerAdd(ed.LineFromPosition(start), markerStyleIdMod)
#add grey blank lines
for pos in blankLines:
start, delta = pos
ed.StartStyling(start, 0xff)
ed.SetStyling(delta, self.BLANK_STYLE)
# TODO: use AnnotationSetText(1, "ann\n")
# add in-line
for diffline in intraDiffs:
start, delta, changed = diffline
ed.StartStyling(start, 0xff)
ed.SetStyling(delta, self.INTRA_STYLE)
updateLinesNumbers(ed, linesLbl, self.BLANK_STYLE)
self.leftView.SetValue(leftText)
self.rightView.SetValue(rightText)
setupStyle(self, self.leftView, stc.STC_MARK_MINUS, "red", self.leftLinesLbl, blankLeft, subtractionPos, intraSubs)
setupStyle(self, self.rightView, stc.STC_MARK_PLUS, "blue", self.rightLinesLbl, blankRight, additionPos, intraAdds)
self.calculateSpecialLines(subtractionPos, additionPos)
self.leftView.EmptyUndoBuffer()
self.rightView.EmptyUndoBuffer()
self.arrowMarker = 3
self.leftView.MarkerDefine(self.arrowMarker, stc.STC_MARK_ARROW, "black", "black")
self.rightView.MarkerDefine(self.arrowMarker, stc.STC_MARK_ARROW, "black", "black")
#self.setActiveLine(3)
def calculateSpecialLines(self, subtractionPos, additionPos):
specialLines = []
specialPositions = [pos[0] for pos in subtractionPos]
for line in specialPositions:
specialLines.append(self.leftView.LineFromPosition(line))
print("\n")
specialPositions = [pos[0] for pos in additionPos]
for line in specialPositions:
specialLines.append(self.rightView.LineFromPosition(line))
self.specialLines = list(set(specialLines))
self.specialLines.sort()
def setActiveLine(self, lineNum, noMove = False):
if self.activeLine != None:
self.leftView.MarkerDelete(self.activeLine, self.arrowMarker)
self.rightView.MarkerDelete(self.activeLine, self.arrowMarker)
self.leftView.MarkerAdd(lineNum, self.arrowMarker)
self.rightView.MarkerAdd(lineNum, self.arrowMarker)
self.activeLine = lineNum
if not noMove:
ratio = float(self.rightSWindow.GetScrollPageSize(1))/self.rightView.GetLineCount()
self.rightSWindow.Scroll(0,self.activeLine*ratio)
self.leftSWindow.Scroll(0,self.activeLine*ratio)
def OnSize(self, event):
xsize, ysize = event.GetSize()
self.splitter.SetSashPosition(int(xsize/2), True)
event.Skip()
def OnOpen(self, event):
print("not yet implemented!")
def QuitApplication(self, event):
if self.modify:
dlg = wx.MessageDialog(self, 'Save before Exit?', '', wx.YES_NO | wx.YES_DEFAULT |
wx.CANCEL | wx.ICON_QUESTION)
val = dlg.ShowModal()
if val == wx.ID_YES:
self.OnSaveFile(event)
if not self.modify:
wx.Exit()
elif val == wx.ID_CANCEL:
dlg.Destroy()
else:
self.Destroy()
else:
self.Destroy()
if __name__ == '__main__':
# if len(sys.argv) > 2:
# fromfile = sys.argv[-2]
# tofile = sys.argv[-1]
# else:
fromfile = r"C:\1\sql_editor\src\view\other\diff\test1.txt"
tofile = r"C:\1\sql_editor\src\view\other\diff\test2.txt"
app = wx.App(0)
frame = PyDiff(None, 'wxPyDiff', fromfile, tofile, None, None)
app.MainLoop()
|
#!/usr/bin/python3
import os
import glob
import time
import socket
from config import api_key
from datetime import datetime
import RPi.GPIO as GPIO
from Adafruit_CharLCD import Adafruit_CharLCD
# Initialize LCD (must specify pinout and dimensions)
lcd = Adafruit_CharLCD(rs=26, en=19, d4=13, d5=6, d6=5, d7=11, cols=16, lines=2)
os.system('modprobe w1-gpio')
os.system('modprobe w1-therm')
base_dir = '/sys/bus/w1/devices/'
device_folder = glob.glob(base_dir + '28*')[0]
device_file = device_folder + '/w1_slave'
def read_temp():
with open(device_file, 'r') as f:
lines = f.readlines()
while lines[0].strip()[-3:] != 'YES':
time.sleep(0.2)
equals_pos = lines[1].find('t=')
if equals_pos != -1:
temp_string = lines[1][equals_pos+2:]
temp_c = "{0:.2f}".format(float(temp_string)/1000)
return temp_c
while True:
try:
lcd.clear()
temp = read_temp()
thingspeak = urllib.request.urlopen("https://api.thingspeak.com/update?api_key={}&field1={}".format(api_key, temp))
reading = 'Temp: {}{}C'.format(temp, chr(186))
print(reading)
lcd.message(reading)
time.sleep(1)
except KeyboardInterrupt:
print('CTRL-C pressed. Program exiting...')
break
finally:
lcd.clear()
GPIO.cleanup()
|
wdth=500
hght=500
x1=100
y1=100
x2=250
y2=350
x3=450
y3=100
outsideRadius = 150
insideRadius = 100
c=color(int(random(0,255)),int(random(0,255)),int(random(0,255)),50)
def setup():
size(wdth,hght)
background(205)
x = wdth / 2
y = hght / 2
def draw() :
numPoints = int(map(mouseX, 0, width, 6, 60))
angle = 109
angleStep = 360.0 / numPoints
beginShape(TRIANGLE_STRIP)
for i in range(numPoints):
x = width / 2
y = height / 2
px = x + cos(radians(angle)) * outsideRadius
py = y + sin(radians(angle)) * outsideRadius
angle += angleStep
vertex(px, py)
px = x + cos(radians(angle)) * insideRadius
py = y + sin(radians(angle)) * insideRadius
vertex(px, py)
angle *= angleStep*100
endShape()
def mousePressed():
redraw()
|
'''
Just the setup.
'''
from setuptools import setup, find_packages
def readme():
with open('README.rst') as f:
return f.read()
# Package details
setup(
name='machine-learning-fortune',
version='0.0.1',
author='Will Ballard',
author_email='wballard@mailframe.net',
url='https://github.com/wballard/machine-learning-fortune',
description='Make a `fortune` with machine learning!',
long_description=readme(),
license='BSD 3-Clause License',
packages=find_packages(),
scripts=['bin/machine-learning-fortune'],
install_requires=[
'keras>=2.1.2',
'tensorflow>=1.4.1',
'numpy>=1.13.1',
'tqdm',
'docopt',
'scikit-learn>=0.19.1'
],
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Scientific/Engineering :: Artificial Intelligence'
]
) |
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.forms import UserCreationForm, UserChangeForm, UsernameField
from django.utils.translation import ugettext_lazy as _
from .models import User
class AdminUserCreationForm(UserCreationForm):
"""
Custom Create Form for user creation
"""
class Meta(UserCreationForm.Meta):
model = User
fields = ("email",)
class AdminUserChangeForm(UserChangeForm):
class Meta(UserCreationForm.Meta):
model = User
fields = '__all__'
# field_classes = {'username': UsernameField}
@admin.register(User)
class CustomUserAdmin(UserAdmin):
fieldsets = (
(_("Basic Info"), {
'classes': ('collapse', ),
'fields': ('email', 'password','first_name','last_name','inviter')}),
(
_('Permissions'),
{
'fields': (
'is_active', 'is_staff','activated',
'is_superuser', 'groups',
'user_permissions')
}
),
(_('Important dates'), {'fields': ('created_by','created_date', 'updated_date','updated_by', 'last_login')}),
)
readonly_fields = ('created_date', 'updated_date', 'last_login','updated_by','created_by')
add_fieldsets = (
(_("Basics"), {
'classes': ('wide', ),
'fields': ('email', 'password1', 'password2', ),
}),
)
form = AdminUserChangeForm
add_form = AdminUserCreationForm
list_display = ('email', 'is_staff', 'is_active','first_name', 'last_name')
ordering = ('email',) |
#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
import os
import subprocess
import sys
# Ideally this path would be /var/lib/heat-cfntools/cfn-init-data
# but this is where all boot metadata is stored
LAST_METADATA_DIR = os.environ.get('HEAT_CFN_INIT_LAST_METADATA_DIR',
'/var/cache/heat-cfntools')
CFN_INIT_CMD = os.environ.get('HEAT_CFN_INIT_CMD',
'cfn-init')
def main(argv=sys.argv, stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr):
log = logging.getLogger('heat-config')
handler = logging.StreamHandler(stderr)
handler.setFormatter(
logging.Formatter(
'[%(asctime)s] (%(name)s) [%(levelname)s] %(message)s'))
log.addHandler(handler)
log.setLevel('DEBUG')
c = json.load(stdin)
config = c.get('config', {})
if not isinstance(config, dict):
config = json.loads(config)
meta = {'AWS::CloudFormation::Init': config}
if not os.path.isdir(LAST_METADATA_DIR):
os.makedirs(LAST_METADATA_DIR, 0o700)
fn = os.path.join(LAST_METADATA_DIR, 'last_metadata')
with os.fdopen(os.open(fn, os.O_CREAT | os.O_WRONLY | os.O_TRUNC, 0o700),
'w') as f:
json.dump(meta, f)
log.debug('Running %s' % CFN_INIT_CMD)
subproc = subprocess.Popen([CFN_INIT_CMD], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
cstdout, cstderr = subproc.communicate()
if cstdout:
log.info(cstdout)
if cstderr:
log.info(cstderr)
if subproc.returncode:
log.error("Error running %s. [%s]\n" % (
CFN_INIT_CMD, subproc.returncode))
else:
log.info('Completed %s' % CFN_INIT_CMD)
response = {
'deploy_stdout': cstdout.decode('utf-8', 'replace'),
'deploy_stderr': cstderr.decode('utf-8', 'replace'),
'deploy_status_code': subproc.returncode,
}
json.dump(response, stdout)
if __name__ == '__main__':
sys.exit(main())
|
import numpy as np
import pandas as pd
import xarray as xr
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from matplotlib.lines import Line2D
import os
import itertools
import pf_dynamic_cart as pfc
import pf_dynamic_sph as pfs
import Grid
from scipy import interpolate
if __name__ == "__main__":
# # Initialization
# matplotlib.rcParams.update({'font.size': 12, 'text.usetex': True})
# ---- INITIALIZE GRIDS ----
(Lx, Ly, Lz) = (21, 21, 21)
(dx, dy, dz) = (0.375, 0.375, 0.375)
NGridPoints = (1 + 2 * Lx / dx) * (1 + 2 * Ly / dy) * (1 + 2 * Lz / dz)
# NGridPoints_cart = 1.37e5
massRat_Vals = [1, 2, 5, 10]
toggleDict = {'Location': 'work'}
datapathDict = {}
for mR in massRat_Vals:
if toggleDict['Location'] == 'home':
datapathDict[mR] = '/home/kis/Dropbox/VariationalResearch/HarvardOdyssey/genPol_data/NGridPoints_{:.2E}/massRatio={:.1f}/imdyn_cart'.format(NGridPoints, mR)
elif toggleDict['Location'] == 'work':
datapathDict[mR] = '/media/kis/Storage/Dropbox/VariationalResearch/HarvardOdyssey/genPol_data/NGridPoints_{:.2E}/massRatio={:.1f}/imdyn_cart'.format(NGridPoints, mR)
# # # Concatenate Individual Datasets (everything)
# mR = 10
# innerdatapath = datapathDict[mR]
# ds_list = []; P_list = []; aIBi_list = []; mI_list = []
# for ind, filename in enumerate(os.listdir(innerdatapath)):
# print(ind)
# if filename == 'quench_Dataset.nc':
# continue
# print(filename)
# ds = xr.open_dataset(innerdatapath + '/' + filename)
# ds_list.append(ds)
# P_list.append(ds.attrs['P'])
# aIBi_list.append(ds.attrs['aIBi'])
# mI_list.append(ds.attrs['mI'])
# s = sorted(zip(aIBi_list, P_list, ds_list))
# g = itertools.groupby(s, key=lambda x: x[0])
# aIBi_keys = []; aIBi_groups = []; aIBi_ds_list = []
# for key, group in g:
# aIBi_keys.append(key)
# aIBi_groups.append(list(group))
# for ind, group in enumerate(aIBi_groups):
# aIBi = aIBi_keys[ind]
# _, P_list_temp, ds_list_temp = zip(*group)
# ds_temp = xr.concat(ds_list_temp, pd.Index(P_list_temp, name='P'))
# aIBi_ds_list.append(ds_temp)
# ds_tot = xr.concat(aIBi_ds_list, pd.Index(aIBi_keys, name='aIBi'))
# del(ds_tot.attrs['P']); del(ds_tot.attrs['aIBi']); del(ds_tot.attrs['nu']); del(ds_tot.attrs['gIB'])
# ds_tot.to_netcdf(innerdatapath + '/quench_Dataset.nc')
# # # Concatenate Individual Datasets (aIBi specific - note that for some reason there is a chunk of memory from the initial for loop through filenames that is not being freed up)
# mR = 1
# innerdatapath = datapathDict[mR]
# aIBi_List = [-10, -5, -2]
# # aIBi_List = [-2]
# for aIBi in aIBi_List:
# ds_list = []; P_list = []; mI_list = []
# for ind, filename in enumerate(os.listdir(innerdatapath)):
# if filename[0:14] == 'quench_Dataset':
# continue
# ds = xr.open_dataset(innerdatapath + '/' + filename)
# aIBi_temp = ds.attrs['aIBi']
# if aIBi_temp != aIBi:
# continue
# print(filename)
# ds_list.append(ds)
# P_list.append(ds.attrs['P'])
# mI_list.append(ds.attrs['mI'])
# s = sorted(zip(P_list, ds_list))
# g = itertools.groupby(s, key=lambda x: x[0])
# P_keys = []; P_ds_list = []; aIBi_ds_list = []
# for key, group in g:
# P_temp_list, ds_temp_list = zip(*list(group))
# P_keys.append(key) # note that key = P_temp_list[0]
# P_ds_list.append(ds_temp_list[0])
# with xr.concat(P_ds_list, pd.Index(P_keys, name='P')) as ds_tot:
# # ds_tot = xr.concat(P_ds_list, pd.Index(P_keys, name='P'))
# del(ds_tot.attrs['P']); del(ds_tot.attrs['nu']); del(ds_tot.attrs['gIB'])
# ds_tot.to_netcdf(innerdatapath + '/quench_Dataset_aIBi_{:.2f}.nc'.format(aIBi))
# # Analysis of Total Dataset
aIBi = -2
qdsDict = {}
for mR in massRat_Vals:
# qdsDict[mR] = xr.open_dataset(datapathDict[mR] + '/quench_Dataset.nc').sel(aIBi=aIBi)
qdsDict[mR] = xr.open_dataset(datapathDict[mR] + '/quench_Dataset_aIBi_{:.2f}.nc'.format(aIBi))
PVals = qdsDict[1]['P'].values
tVals = qdsDict[1]['t'].values
n0 = qdsDict[1].attrs['n0']
gBB = qdsDict[1].attrs['gBB']
nu = pfc.nu(gBB)
mI = qdsDict[1].attrs['mI']
mB = qdsDict[1].attrs['mB']
# IMPURITY DISTRIBUTION CHARACTERIZATION (CARTESIAN)
nPIm_FWHM_Dict = {}
nPIm_distPeak_Dict = {}
nPIm_deltaPeak_Dict = {}
nPIm_Tot_Dict = {}
for mind, mR in enumerate(massRat_Vals):
nPIm_FWHM_Vals = np.zeros(PVals.size)
nPIm_distPeak_Vals = np.zeros(PVals.size)
nPIm_deltaPeak_Vals = np.zeros(PVals.size)
nPIm_Tot_Vals = np.zeros(PVals.size)
nPIm_Vec = np.empty(PVals.size, dtype=np.object)
PIm_Vec = np.empty(PVals.size, dtype=np.object)
for ind, P in enumerate(PVals):
qds_nPIm_inf = qdsDict[mR]['nPI_mag'].sel(P=P).isel(t=-1).dropna('PI_mag')
PIm_Vals = qds_nPIm_inf.coords['PI_mag'].values
dPIm = PIm_Vals[1] - PIm_Vals[0]
# # Calculate nPIm(t=inf) normalization
nPIm_Tot_Vals[ind] = np.sum(qds_nPIm_inf.values * dPIm) + qdsDict[mR].sel(P=P).isel(t=-1)['mom_deltapeak'].values
# Calculate FWHM, distribution peak, and delta peak
nPIm_FWHM_Vals[ind] = pfc.FWHM(PIm_Vals, qds_nPIm_inf.values)
nPIm_distPeak_Vals[ind] = np.max(qds_nPIm_inf.values)
nPIm_deltaPeak_Vals[ind] = qdsDict[mR].sel(P=P).isel(t=-1)['mom_deltapeak'].values
# Plot characterization of nPIm(t=inf)
nPIm_FWHM_Dict[mR] = nPIm_FWHM_Vals
nPIm_distPeak_Dict[mR] = nPIm_distPeak_Vals
nPIm_deltaPeak_Dict[mR] = nPIm_deltaPeak_Vals
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k', 'w']
legend_elements = []
fig, ax = plt.subplots()
for mind, mR in enumerate(massRat_Vals):
mIc = mR * mB * nu
mininds = np.argpartition(nPIm_FWHM_Dict[mR], 2)[:2]
Pcrit = np.average(PVals[mininds]) # estimate of critical momentum based on two minimum values of FWHM
if mR == 10:
Pcrit = mIc
# Pcrit = mIc
ax.plot(PVals / (Pcrit), nPIm_FWHM_Dict[mR], color=colors[mind], linestyle='-')
ax.plot(PVals / (Pcrit), nPIm_distPeak_Dict[mR], color=colors[mind], linestyle='--')
ax.plot(PVals / (Pcrit), nPIm_deltaPeak_Dict[mR], color=colors[mind], linestyle=':')
legend_elements.append(Line2D([0], [0], color=colors[mind], lw=2, label=r'$\frac{m_{I}}{m_{B}}=$' + '{:.1f}'.format(mR)))
legend_elements.append(Line2D([0], [0], color='k', linestyle='-', lw=1, label='Incoherent Dist FWHM'))
legend_elements.append(Line2D([0], [0], color='k', linestyle='--', lw=1, label='Incoherent Dist Peak'))
legend_elements.append(Line2D([0], [0], color='k', linestyle=':', lw=1, label='Delta Peak (Z-factor)'))
ax.legend(handles=legend_elements)
ax.set_xlabel(r'$\frac{P}{P_{crit}}$')
ax.set_xlabel(r'$\frac{P}{m_{I}c_{BEC}}$')
ax.set_title(r'$n_{|P_{I}|}$' + ' Characterization (' + r'$aIB^{-1}=$' + '{0})'.format(aIBi))
plt.show()
# fig2, ax2, = plt.subplots()
# Pinit = 6
# for mind, mR in enumerate(massRat_Vals):
# qds_nPIm_inf = qdsDict[mR]['nPI_mag'].sel(P=Pinit, method='nearest').isel(t=-1).dropna('PI_mag')
# Pinit = 1 * qds_nPIm_inf['P'].values
# PIm_Vals = qds_nPIm_inf.coords['PI_mag'].values
# ax2.plot(PIm_Vals, qds_nPIm_inf.values, color=colors[mind], linestyle='-', label=r'$\frac{m_{I}}{m_{B}}=$' + '{:.1f}'.format(mR))
# ax2.set_xlabel(r'$|P_{I}|$')
# ax2.set_title(r'$n_{|P_{I}|}$' + ' (' + r'$aIB^{-1}=$' + '{0}, '.format(aIBi) + r'$P=$' + '{:.2f})'.format(Pinit))
# ax2.legend()
# plt.show()
# fig2, ax2 = plt.subplots()
# ax2.plot(mI * nu * np.ones(PIm_Vals.size), np.linspace(0, 1, PIm_Vals.size), 'k--', label=r'$m_{I}c$')
# curve = ax2.plot(PIm_Vec[0], nPIm_Vec[0], color='k', lw=2, label='')[0]
# line = ax2.plot(PVals[0] * np.ones(PIm_Vals.size), np.linspace(0, nPIm_deltaPeak_Vals[0], PIm_Vals.size), 'go', label='')[0]
# P_text = ax2.text(0.85, 0.85, 'P: {:.2f}'.format(PVals[0]), transform=ax2.transAxes, color='r')
# norm_text = ax2.text(0.7, 0.8, r'$\int n_{|\vec{P_{I}}|} d|\vec{P_{I}}| = $' + '{:.3f}'.format(nPIm_Tot_Vals[0]), transform=ax.transAxes, color='b')
# ax2.legend()
# ax2.set_xlim([-0.01, np.max(PIm_Vec[0])])
# ax2.set_ylim([0, 1.2])
# ax2.set_title('Impurity Momentum Magnitude Distribution (' + r'$aIB^{-1}=$' + '{0})'.format(aIBi))
# ax2.set_ylabel(r'$n_{|\vec{P_{I}}|}$')
# ax2.set_xlabel(r'$|\vec{P_{I}}|$')
# def animate2(i):
# curve.set_xdata(PIm_Vec[i])
# curve.set_ydata(nPIm_Vec[i])
# line.set_xdata(PVals[i])
# line.set_ydata(np.linspace(0, nPIm_deltaPeak_Vals[i], PIm_Vals.size))
# P_text.set_text('P: {:.2f}'.format(PVals[i]))
# norm_text.set_text(r'$\int n_{|\vec{P_{I}}|} d|\vec{P_{I}}| = $' + '{:.3f}'.format(nPIm_Tot_Vals[i]))
# anim2 = FuncAnimation(fig2, animate2, interval=1000, frames=range(PVals.size))
# anim2.save(animpath + '/aIBi_{0}'.format(aIBi) + '_ImpDist.gif', writer='imagemagick')
# plt.show()
|
__author__ = 'surya'
from Mysql_queries import MySqlConnection
def getAlldata_fromDatabase(Query,cnx,query_val='Null',append=False):
if not cnx.is_connected():
cnx = MySqlConnection.connectSql()
dic={}
cursor3 = cnx.cursor()
if query_val!="Null":
cursor3.execute(Query,query_val)
else:
cursor3.execute(Query)
for (id,name) in cursor3:
if name not in dic:
if append:
dic[name]=[id]
else:
dic[name]=id
else:
if append:
dic[name].append(id)
cursor3.close()
return dic
def get2colmergedata_fromDatabase(Query,cnx,query_val='Null',append=False):
if not cnx.is_connected():
cnx = MySqlConnection.connectSql()
dic={}
cursor3 = cnx.cursor()
if query_val!="Null":
cursor3.execute(Query,query_val)
else:
cursor3.execute(Query)
for (id,name,other) in cursor3:
if name not in dic:
if append:
dic[name+"_"+other]=[id]
else:
dic[name+"_"+other]=id
else:
if append:
dic[name+"_"+other].append(id)
cursor3.close()
return dic
def getdatafromDB(Query,cnx,four=False,two=False):
if not cnx.is_connected():
cnx = MySqlConnection.connectSql()
name_date2id={}
cursor3 = cnx.cursor()
cursor3.execute(Query)
if four:
for (first,second,third,fourth) in cursor3:
# print first,second,third,str(fourth)
if third+"_"+fourth+"_"+str(second) not in name_date2id:
name_date2id[third+"_"+str(second)+"_"+fourth]=first
elif two:
for (name,id)in cursor3:
if name not in name_date2id:
name_date2id[name]=id
else:
for (first,second,third) in cursor3:
if second+"_"+str(third) not in name_date2id:
name_date2id[second+"_"+str(third)]=first
cursor3.close()
return name_date2id |
import base64
import csv
import io
from utility import Utility
class Report(object):
"""
We use this class to generate text reports.
Class based on awilson@cloudpassage.com's Report.py class and modified
for this purpose
"""
@classmethod
def create_csv_report(cls, vulnerable_image_check_data):
"""
Expect a dictionary object, produce text in CSV format.
Args:
- cls - reference to the current instance of hte class
- vulnerable_image_check_data (dict) - dictionary of vulnerability
data
Return:
- result (str) - base64 encoded vulnerability report
"""
# initialize the list as we will have a list of dicts
rows = []
# let's build the output for all sets in the dataset
# dataset is vulnerability info for all images in request
for set in vulnerable_image_check_data["image_issues"]:
# format the data for a cvs report
row = cls.format_vulnerable_image_data_csv(set)
# append each returned data set to the whole
rows.append(row)
# the fieldnames for the csv - DictWriter will order by these
fieldnames = \
["registry", "repository", "tag",
"package", "version", "image_digest"]
# get a stream io object
ephemeral_obj = io.BytesIO()
# write the csv data
csv_writer = csv.DictWriter(ephemeral_obj, fieldnames=fieldnames)
csv_writer.writeheader()
csv_writer.writerows(rows)
# encode to base64
result = base64.b64encode(ephemeral_obj.getvalue())
# clean up
ephemeral_obj.close()
# return report data
return result
@classmethod
def create_stdout_report(cls, vulnerable_image_check_data):
"""
Expect a dictionary object, produce text appropriate for stdout.
Args:
- cls - reference to the current instance of the class
- vulnerable_image_check_data (dict) - dictionary of vulnerability
data
Return:
- result (str) - base64 encoded vulnerability report
Format of encoded data:
Registry: DPR
Repository: bkumar89/centos
Tag: 7.1.1503
Vulnerabilities:
Package: binutils Package Version: 2.23.52.0.1-30.el7 | CVE List: cve-2014-8484 cve-2014-8485 # NOQA
"""
result = ""
# for each data set in all the data
for set in vulnerable_image_check_data["image_issues"]:
# format data as noted above
pieces = cls.format_vulnerable_image_data(set)
pieces = pieces.split('\n')
pieces = "\n".join(pieces)
# build full dataset
result += pieces
# encode data
result = base64.b64encode(result)
# return report data
return result
@classmethod
def create_slack_reports(cls, channel_reference, default_channel,
routing_rules, instances):
"""Create a plaintext report for Slack.
Args:
channel_reference(dict): Keys are channel names, values are channel
IDs.
default_channel(str): Name of default Slack channel.
routing_rules(dict): Rules for routing messages to different Slack
channels. Formatted like
{"metadata_field_name":
{"metadata_field_value_to_match": "slack_channel_name"}}
instances(dict): Instance metadata.
Returns:
dict: {"channel": "report"} where "channel" is the Slack channel
ID and "report" is the text of the report.
"""
organized = {}
# Group by target Slack channel.
for instance in instances:
channel = Utility.get_channel_for_message(channel_reference,
instance, routing_rules,
default_channel)
if channel not in organized:
organized[channel] = []
organized[channel].append(instance)
# Build report per channel, each sorted by instance ID.
report = {}
for target, content in organized.items():
x_content = {c.keys()[0]: c.values()[0] for c in content}
report[target] = cls.create_stdout_report(x_content)
return report
@classmethod
def format_vulnerable_image_data(cls, vic_data):
"""Format vulnerability data for reporting.
Args:
- cls - reference to the current instance of the class
- vic_data (dict): Formatted like this:
Registry: DPR
Repository: bkumar89/centos
Tag: 7.1.1503
Vulnerabilities:
Package: binutils Package Version: 2.23.52.0.1-30.el7 | CVE List: cve-2014-8484 cve-2014-8485 # NOQA
"""
registry = \
"\n\nRegistry: {registry}" \
"".format(registry=vic_data["image"]["registry"]["name"])
repository = \
" Repository: {repository}" \
"".format(repository=vic_data["image"]["repository"]["name"])
tags = ""
for tag in vic_data["image"]["tags"]:
tags += tag
tags += " "
tag_list = \
" Tag(s): {tag_list}".format(tag_list=tags)
vulnerabilities = " Vulnerabilities:" # NOQA
package = " Package: {package}".format(package=vic_data["name"])
# build package, package version and cve's into one line
package_version = \
" Package Version: {package_version}" \
"".format(package_version=vic_data["version"])
package += package_version
cves = ""
for cve in vic_data["cves"]:
cves += cve["name"]
cves += " "
cve_list = " | CVE List: {cve_list}".format(cve_list=cves)
package += cve_list
# order the fields and separate them by a newline
ordered_fields = [registry, repository, tag_list,
vulnerabilities, package]
# return formatted report data
return "\n".join(ordered_fields)
@classmethod
def format_vulnerable_image_data_csv(cls, vic_data):
"""
Format vulnerability data for reporting in CSV format.
Args:
vic_data (dict) - vulnerability data
Returns:
result - (dict) - vulnerability report data
"""
number_tags = len(vic_data["image"]["tags"])
counter = 0
tags = ""
increment = 1
while counter < number_tags:
tags += vic_data["image"]["tags"][counter]
tags += " "
counter = counter + increment
result = {"registry": vic_data["image"]["registry"]["name"],
"repository": vic_data["image"]["repository"]["name"],
"tag": tags,
"package": vic_data["name"],
"image_digest": vic_data["image"]["image_sha"],
"version": vic_data["version"]}
return result
|
"""
Xlink utility methods.
Copyright (C) 2019-2022 Intel Corporation
SPDX-License-Identifier: Apache-2.0
"""
import logging
from typing import List
from ctypes import *
from ..constants import XLINK_LIB_PATH, SW_DEVICE_ID_PCIE_INTERFACE, SW_DEVICE_ID_INTERFACE_SHIFT, \
SW_DEVICE_ID_INTERFACE_MASK, MAXIMUM_DEVICE_NAME_SIZE
from .ixlink_wrapper import X_LINK_SUCCESS, xlink_handle, HOST_DEVICE
logger = logging.getLogger(__name__)
def get_all_xlink_pcie_device_ids(num_devices: int) -> List[int]:
""" Call xlink API to get all xlink PCIe device.
@return: list that contains xlink PCIe device id
"""
xlink_library = CDLL(XLINK_LIB_PATH)
xlink_library.xlink_initialize()
dev_id_list = (c_int * 64)()
xlink_pcie_dev_ids = []
num_dev = c_int(num_devices)
logger.debug('Call xlink get device list...')
status = xlink_library.xlink_get_device_list(
byref(dev_id_list), byref(num_dev))
if status is not X_LINK_SUCCESS:
logger.error('xlink_get_device_list failed - %s', str(status))
logger.debug(f"number of dev = {num_dev.value}, device list: ")
for num in range(len(dev_id_list)):
logger.debug("dev_id_list[{}]: {}".format(num, dev_id_list[num]))
if _get_interface_from_sw_device_id(dev_id_list[num]) == SW_DEVICE_ID_PCIE_INTERFACE:
xlink_pcie_dev_ids.append(dev_id_list[num])
logger.debug("dev {} with dev id {} is added".format(num, dev_id_list[num]))
return xlink_pcie_dev_ids
def _get_interface_from_sw_device_id(sw_device_id: int) -> int:
""" Call xlink API to get all xlink PCIe device.
@param sw_device_id: xlink sw device id to be checked
@return: number representing xlink device type
"""
return (sw_device_id >> SW_DEVICE_ID_INTERFACE_SHIFT) & SW_DEVICE_ID_INTERFACE_MASK
def filter_first_slice_from_list(xlink_pcie_dev_list: List[int]) -> List[int]:
""" Filter the device list. Only retain root PCIe device.
The PCIe id can be obtained using xlink_get_device_name API.
In TBH, only root xlink PCIe device from TBH will be connected.
E.g. List[04:00.0, 04:00.2, 04:00.4] -> List[04:00.0]
@param xlink_pcie_dev_list: list of xlink sw device id
@return: filtered list that only remains root pcie device
"""
xlink_library = CDLL(XLINK_LIB_PATH)
xlink_library.xlink_initialize()
dev_list = xlink_pcie_dev_list.copy()
for dev_id in dev_list:
dev_name = _get_device_name(dev_id, xlink_library)
if dev_name.split('.', 1)[1] != "0":
xlink_pcie_dev_list.remove(dev_id)
return xlink_pcie_dev_list
def _get_device_name(sw_device_id: int, xlink_library: CDLL) -> str:
""" Call xlink API to get device's name based on sw device id
@param sw_device_id: xlink sw device id to be checked
@param xlink_library: xlink shared library CDLL object
@return: string representing pcie id. Example: 04:00.0
"""
xlink_handler = xlink_handle(dev_type=HOST_DEVICE)
xlink_handler.sw_device_id = sw_device_id
dev_name_p = create_string_buffer(MAXIMUM_DEVICE_NAME_SIZE)
size = c_uint(MAXIMUM_DEVICE_NAME_SIZE)
status = xlink_library.xlink_get_device_name(byref(xlink_handler), byref(dev_name_p), size)
if status is not X_LINK_SUCCESS:
print('xlink_get_device_name failed - %s', str(status))
dev_name = ''
try:
for i in range(size.value):
dev_name = dev_name + dev_name_p[i].decode('utf-8') # type: ignore
except UnicodeDecodeError:
pass
return dev_name.split('\x00')[0]
|
import logging
from typing import Optional, Tuple
from snuba.consumers.consumer_builder import ConsumerBuilder
from snuba.stateful_consumer import ConsumerStateData, ConsumerStateCompletionEvent
from snuba.utils.state_machine import State
logger = logging.getLogger('snuba.snapshot-catchup')
class CatchingUpState(State[ConsumerStateCompletionEvent, Optional[ConsumerStateData]]):
"""
In this state the consumer consumes the main topic but
it discards the transacitons that were present in the
snapshot (xid < xmax and not in xip_list).
Once this phase is done the consumer goes back to normal
consumption.
"""
def __init__(
self,
consumer_builder: ConsumerBuilder
) -> None:
super().__init__()
self.__consumer_builder = consumer_builder
self.__consumer = None
def signal_shutdown(self) -> None:
if self.__consumer:
self.__consumer.signal_shutdown()
def handle(self, state_data: Optional[ConsumerStateData]) -> Tuple[ConsumerStateCompletionEvent, Optional[ConsumerStateData]]:
assert state_data is not None
consumer = self.__consumer_builder.build_snapshot_aware_consumer(
snapshot_id=state_data.snapshot_id,
transaction_data=state_data.transaction_data,
)
self.__consumer = consumer
consumer.run()
return (
ConsumerStateCompletionEvent.CONSUMPTION_COMPLETED,
None,
)
|
import re
import rethinkdb as r
from repool import ConnectionPool
from rethinkdb.errors import RqlRuntimeError, RqlDriverError, ReqlCursorEmpty
from app.incident import Incident, LIST_FIELDS, CRITICAL_FIELDS
from templates.responses import (
CREATE_INCIDENT_FAILED, SET, GET, GET_LIST, NAG)
class CommanderBase:
"""
Incident commander main class
"""
def __init__(self, config):
self.config = config
print(self.config)
self.name = self.config['name']
self.id = self.config['id']
self.db_name = self.config['db_name']
self.rdb = r.connect(
host=self.config['db_host'],
port=self.config['db_port']
)
try:
r.db_create(self.db_name).run(self.rdb)
r.db(self.db_name)\
.table_create('incidents', primary_key='slack_channel')\
.run(self.rdb)
print('Database setup completed.')
except RqlRuntimeError:
print('App database already exists.')
self.rdb.close()
self.pool = ConnectionPool(
host=self.config['db_host'],
port=self.config['db_port'],
db=self.db_name
)
def pre_message(self):
try:
self.rdb = self.pool.acquire()
except RqlDriverError:
print("Could not connect to db")
def post_message(self):
self.pool.release(self.rdb)
def process_message(self, message):
self.pre_message()
return_val = self.parse_message(message)
self.post_message()
return return_val
def parse_message(self, message):
if not self.valid_message(message):
return ""
stripped_message = message.get('text')
if stripped_message is None:
return ""
else:
stripped_message = stripped_message.strip()
name_match = re.match(r'<@?{}>:?\s*(.*)'.format(self.id),
stripped_message,
flags=re.IGNORECASE)
if name_match:
commands = name_match.groups()[0]
return self.parse_commands(commands, channel=message['channel'], user=message['user'])
if message['channel'].startswith('D'):
return self.parse_commands(stripped_message,
channel=message['channel'], user=message.get('user'))
def valid_message(self, message):
return message.get('user') != self.id
def parse_commands(self, commands, channel):
return NotImplementedError
class Commander(CommanderBase):
def __init__(self, *args, **kwargs):
super(Commander, self).__init__(*args, **kwargs)
def parse_commands(self, commands, channel, user):
# Run down a big old list of short-circuiting ifs to determine
# which command was called
create_incident = re.match(r'create[ -]incident\s*(.*)',
commands,
flags=re.I)
if create_incident:
# begin workflow for creating incident
return self.create_incident(create_incident.groups()[0])
summary_match = re.match(
r'^\s*summary|summarize', commands, flags=re.I)
if summary_match:
return self.summarize(channel)
resolve_match = re.match(r'resolve\s*(.*)', commands, flags=re.I)
if resolve_match:
incident = Incident.get_incident_by_channel(self.rdb, channel)
return incident.resolve(channel, self.rdb)
set_match = re.match(
r'set[ -]([A-Za-z_]+)\s*(.*)', commands, flags=re.I)
if set_match:
return self.set_field(channel, user, set_match.groups()[0], set_match.groups()[1])
get_match = re.match(
r'get[ -]([A-Za-z_]+)\s*(.*)', commands, flags=re.I)
if get_match:
return self.get_field(channel, get_match.groups()[0])
add_match = re.match(
r'add[ -]([A-Za-z_]+)\s*(.*)', commands, flags=re.I)
if add_match:
return self.add_field(channel, user, add_match.groups()[0], add_match.groups()[1])
remove_match = re.match(
r'remove[ -]([A-Za-z_]+)\s+([1-9]\d*)', commands, flags=re.I)
if remove_match:
return self.remove_field(channel, *remove_match.groups())
return 'no match for this command'
def create_incident(self, app_name):
# catches "for app-name" or "app-name"
current_app_name = re.match(r'(?:for\s+)?(.*)', app_name)
if not current_app_name:
return CREATE_INCIDENT_FAILED.render()
incident = Incident.create_new_incident(
current_app_name.groups()[0], self.config)
incident.create_channel()
incident.save(self.rdb)
return 'Created incident!: <#{}|{}>'.format(incident.slack_channel, incident.name)
def set_field(self, channel, user, field, value):
if field in LIST_FIELDS:
return self.add_field(channel, user, field, value)
incident = Incident.get_incident_by_channel(self.rdb, channel)
try:
setattr(incident, field, value)
incident.save(self.rdb)
except KeyError:
return "{} is not a field that exists on an incident".format(field)
return SET.render(field=field, value=value)
def get_field(self, channel, field):
incident = Incident.get_incident_by_channel(self.rdb, channel)
val = getattr(incident, field)
# Use the list template if value is a list, else just return regularly
if isinstance(val, list):
return GET_LIST.render(field=field, value=val)
return GET.render(field=field, value=val)
def add_field(self, channel, user, field, value):
if field not in LIST_FIELDS:
return '`add` commands can only be used with one of the following: {}'.format(', '.join(
LIST_FIELDS))
d = r.table('incidents').filter(
{'slack_channel': channel}).run(self.rdb)
try:
d = d.next()
except ReqlCursorEmpty:
return "Cant Find Incident"
r.table('incidents').filter({'slack_channel': channel}).update({
field: r.row[field].default([]).append({
'ts': r.now(),
'user': user,
'text': value,
'removed': False
})
}, return_changes=True).run(self.rdb)
return self.get_field(channel, field)
def remove_field(self, channel, field, display_index):
if field not in LIST_FIELDS:
return '`remove` commands can only be used with one of the following: {}'.format(', '.join(
LIST_FIELDS))
# lists are numbered starting from 1, not 0, so subract 1 for the real
# index
index = int(display_index)
if index > 0:
index = index - 1
else:
return 'Items number must be 1 or greater'
r.table('incidents').filter({'slack_channel': channel}).update({
field: r.row[field].change_at(index,
r.row[field][index].merge({
'removed': True
})
)
}).run(self.rdb)
return self.get_field(channel, field)
# Periodic update functions
def nag(self):
self.pre_message()
response = []
incidents = r.table('incidents').run(self.rdb)
for incident in incidents:
channel = incident.get('slack_channel')
message = ""
for key in CRITICAL_FIELDS:
if incident.get(key) is None:
message = "{}\n{}".format(message, NAG.render(key=key))
response.append([channel, message])
self.post_message()
return response
def update(self):
self.pre_message()
response = []
incidents = r.table('incidents').run(self.rdb)
for incident in incidents:
# This will just return to the incident channel, thoughts?
channel = incident.get('slack_channel')
message = "" # This should be the summary!
response.append([channel, message])
self.post_message()
return response
def summarize(self, channel):
self.pre_message()
incident = Incident.get_incident_by_channel(self.rdb, channel)
incident.post_summary(self.config)
self.post_message()
|
import math
import numpy as np
import os
from PIL import Image
import random
import torch
import torch.utils.data as data
import pickle
# import pickle
# with open('x.pkl', 'wb') as file:
# pickle.dump(object, file)
default_data_path = {
"200MB": "/miniscratch/tyz/datasets/CloudCast/200MB/pkls/",
"8GB": "/miniscratch/tyz/datasets/CloudCast/8GB/pkls/",
}
def load_cs_small(root=default_data_path):
path_train = os.path.join(root["200MB"], "train.pkl")
path_test = os.path.join(root["200MB"], "test.pkl")
with open(path_train, "rb") as file:
data_train = pickle.load(file)
with open(path_test, "rb") as file:
data_test = pickle.load(file)
return [data_train, data_test]
class CloudCast(data.Dataset):
def __init__(
self,
root,
is_train,
n_frames_input,
n_frames_output,
is_large=False,
max_pxl_value=15,
transform=None,
batchsize=16,
):
"""
param num_objects: a list of number of possible objects.
"""
super(CloudCast, self).__init__()
self.dataset_all = load_cs_small()
if is_train:
self.dataset = self.dataset_all[0]
else:
self.dataset = self.dataset_all[1]
self.length = self.dataset.shape[-1]
self.is_large = False
self.is_train = is_train
self.n_frames_input = n_frames_input
self.n_frames_output = n_frames_output
self.n_frames_total = self.n_frames_input + self.n_frames_output
self.transform = transform
self.max_pxl_value = max_pxl_value
self.batchsize = batchsize
# For generating data
if self.is_large:
self.image_size_ = 728
else:
self.image_size_ = 128
self.step_length_ = 0.1
def getslice(self, cloudcast, idx):
# cloudcast is an ndarray with shape: (H,W,T)
# idx is the index of the slice
# this function aims to return the slice of the "video" with given idx
# target shape: (n_frames_input + n_frames_output, H, W, C)
H, W, T = cloudcast.shape
num_normal_batch = int((self.length - 1) / self.batchsize)
num_normal_data = num_normal_batch * self.batchsize
if idx <= num_normal_data:
slice = cloudcast[
:, :, idx : idx + self.n_frames_total
] # get a compelete slice from the begining
else: # avoid getting errors when the rest of the data is not enough for a batch
diff = self.length - idx
slice = cloudcast[:, :, -diff - self.n_frames_total : -diff]
slice = np.moveaxis(slice, -1, 0)[:, np.newaxis, :, :]
return slice
def __getitem__(self, idx):
# if self.length < idx-1:
# return [idx, torch.empty(), torch.empty(), torch.empty(), np.zeros(1)]
images = self.getslice(self.dataset, idx)
input = images[: self.n_frames_input]
if self.n_frames_output > 0:
output = images[
-self.n_frames_output :
] # avoid error when the rest of the data is not enough for an output with len of n_frames_output
else:
output = []
frozen = input[-1]
output = torch.from_numpy(output / self.max_pxl_value).contiguous().float()
input = torch.from_numpy(input / self.max_pxl_value).contiguous().float()
out = [idx, output, input, frozen, np.zeros(1)]
return out
def __len__(self):
return self.length
|
# -*- coding: utf-8 -*-
# encoding: utf-8
'''
Created on 2015年3月1日
@author: kane
'''
import six
from billiards.pay import Pay
import alipay
from billiards.views.transaction import getIdFromTradeNum,\
transactionSuccessNotification, TRANSACTION_TIME_FORMAT
from xml.etree import ElementTree
if six.PY3:
from urllib.parse import unquote
else:
from urlparse import unquote
from billiards.models import Transaction
from datetime import datetime
import pytz
from billiards.settings import TIME_ZONE
from django.utils.timezone import utc
from django.shortcuts import redirect
from django.http.response import HttpResponse
from django.views.decorators.csrf import csrf_exempt
import logging
logger = logging.getLogger("transaction")
def alipay_wapreturn(request):
paymethod = Pay.getPayMethod()
account = paymethod.getAccount(True)
pay = alipay.WapAlipay(pid=account.pid, key=account.key, seller_email=account.email)
parameters = {k: unquote(v) for k, v in request.GET.iteritems()}
if pay.verify_notify(**parameters):
tradenum = request.GET.get('out_trade_no')
try:
transaction = Transaction.objects.get(id=getIdFromTradeNum(tradenum))
if transaction.state != 2 and transaction.state != 5:
transaction.paytradeNum = request.GET.get('trade_no')
transaction.tradeStatus = 'TRADE_SUCCESS'
transaction.paidDate = datetime.now().replace(tzinfo=utc).astimezone(pytz.timezone(TIME_ZONE))
transaction.state = 2
transaction.save()
transactionSuccessNotification(transaction)
except Transaction.DoesNotExist:
#TODO handle error case
pass
# add a page here
if transaction.goods.type == 2:
return redirect('user_assistant_order')
return HttpResponse("Payment completed.")
return HttpResponse("Error.")
@csrf_exempt
def alipay_wapnotify(request):
try:
paymethod = Pay.getPayMethod()
account = paymethod.getAccount(True)
pay = alipay.WapAlipay(pid=account.pid, key=account.key, seller_email=account.email)
parameters = {k: v for k, v in request.POST.iteritems()}
logger.info("Received alipay wap notify at %s with parameters '%s'." %(datetime.now(),
'&'.join(['%s=%s' % (key, v) for key,v in request.POST.iteritems()])))
if pay.verify_notify(**parameters):
tree = ElementTree.ElementTree(ElementTree.fromstring(unquote(parameters['notify_data']).encode("utf-8")))
notifydata = {node.tag: node.text for node in tree.iter()}
tradenum = notifydata['out_trade_no']
try:
transaction = Transaction.objects.get(id=getIdFromTradeNum(tradenum))
if transaction.tradeStatus == 'TRADE_FINISHED' or transaction.tradeStatus == 'TRADE_CLOSED':
# already completed transaction
return HttpResponse("success")
transaction.paytradeNum = notifydata['trade_no']
transaction.tradeStatus = notifydata['trade_status']
transaction.notifyid = notifydata['notify_id']
transaction.buyeid = notifydata['buyer_id']
transaction.paidDate = datetime.now().replace(tzinfo=utc).astimezone(pytz.timezone(TIME_ZONE)) if 'gmt_payment' not in notifydata else \
datetime.strptime(notifydata['gmt_payment'], TRANSACTION_TIME_FORMAT).replace(tzinfo=pytz.timezone(TIME_ZONE))
transaction.state = 2 if notifydata['trade_status'] == 'TRADE_SUCCESS' else 5
transaction.save()
transactionSuccessNotification(transaction)
return HttpResponse("success.")
except Transaction.DoesNotExist:
#TODO handle error case
pass
else:
logger.warn("alipay wap notify is invalid.")
except Exception:
logger.exception("exception occurred when processing alipay wap notification.")
return HttpResponse("Error.")
def alipay_return(request):
paymethod = Pay.getPayMethod()
account = paymethod.getAccount(True)
pay = alipay.Alipay(pid=account.pid, key=account.key, seller_email=account.email)
parameters = {k: v for k, v in request.GET.iteritems()}
if pay.verify_notify(**parameters):
if request.GET.get('is_success') == 'T':
tradenum = request.GET.get('out_trade_no')
try:
transaction = Transaction.objects.get(id=getIdFromTradeNum(tradenum))
if transaction.state != 2 and transaction.state != 5:
transaction.paytradeNum = request.GET.get('trade_no')
transaction.tradeStatus = request.GET.get('trade_status')
transaction.notifyid = request.GET.get('notify_id')
transaction.buyerEmail = request.GET.get('buyer_email')
transaction.buyeid = request.GET.get('buyer_id')
if transaction.tradeStatus == 'TRADE_FINISHED' or transaction.tradeStatus == 'TRADE_SUCCESS':
transaction.paidDate = datetime.strptime(request.GET.get('notify_time'), TRANSACTION_TIME_FORMAT).replace(tzinfo=pytz.timezone(TIME_ZONE))
transaction.state = 2
transaction.save()
transactionSuccessNotification(transaction, False)
except Transaction.DoesNotExist:
#TODO handle error case
pass
# TODO add a page for it
if transaction.goods.type == 2:
return redirect('user_assistant_order')
return HttpResponse("Payment completed.")
return HttpResponse("Error.")
@csrf_exempt
def alipay_notify(request):
try:
paymethod = Pay.getPayMethod()
account = paymethod.getAccount(True)
pay = alipay.Alipay(pid=account.pid, key=account.key, seller_email=account.email)
parameters = {k: v for k, v in request.POST.iteritems()}
logger.info("Received alipay notify at %s with parameters '%s'." %(datetime.now(),
'&'.join(['%s=%s' % (key, v) for key,v in request.POST.iteritems()])))
if pay.verify_notify(**parameters):
tradenum = request.GET.get('out_trade_no')
try:
transaction = Transaction.objects.get(id=getIdFromTradeNum(tradenum))
if transaction.tradeStatus == 'TRADE_FINISHED' or transaction.tradeStatus == 'TRADE_CLOSED':
# already completed transaction
return HttpResponse("success")
if transaction.paytradeNum is None:
transaction.paytradeNum = request.GET.get('trade_no')
transaction.tradeStatus = request.GET.get('trade_status')
transaction.notifyid = request.GET.get('notify_id')
transaction.buyerEmail = request.GET.get('buyer_email')
transaction.buyeid = request.GET.get('buyer_id')
if transaction.tradeStatus == 'TRADE_FINISHED' or transaction.tradeStatus == 'TRADE_SUCCESS':
transaction.paidDate = datetime.strptime(request.GET.get('gmt_payment'), TRANSACTION_TIME_FORMAT).replace(tzinfo=pytz.timezone(TIME_ZONE))
transaction.state = 2
transactionSuccessNotification(transaction, False)
elif transaction.tradeStatus == 'TRADE_CLOSED':
transaction.closedDate = datetime.strptime(request.GET.get('gmt_close'), TRANSACTION_TIME_FORMAT).replace(tzinfo=pytz.timezone(TIME_ZONE))
transaction.state = 4
transaction.save()
return HttpResponse("success")
except Transaction.DoesNotExist:
#TODO handle error case
pass
else:
logger.warn("alipay notify is invalid.")
except Exception:
logger.exception("exception occurred when processing alipay notification.")
return HttpResponse("Error.")
|
#! /usr/bin/env python
# -*- coding:utf-8 -*-
#
# Pose estimation of the camera
#
# External dependencies
import pickle
import cv2
import numpy as np
# Calibration pattern size
pattern_size = ( 9, 6 )
# Chessboard pattern
pattern_points = np.zeros( ( np.prod( pattern_size ), 3 ), np.float32 )
pattern_points[ :, :2 ] = np.indices( pattern_size ).T.reshape( -1, 2 )
# Chessboard square size
pattern_points *= 34.15
# 3D points
object_points = []
# 2D points
image_points = []
# Load calibration file
with open( 'calibration.pkl', 'rb' ) as calibration_file :
calibration = pickle.load( calibration_file )
#
for image_file in [ 'pose1.png', 'pose2.png', 'pose3.png', 'pose4.png', 'pose5.png' ] :
# Read the image
image = cv2.imread( image_file )
# Get image size
# image_size = image.shape[ :2 ][ ::-1 ]
# Compute new optimal camera matrix
# new_camera_matrix, roi = cv2.getOptimalNewCameraMatrix( calibration['camera_matrix'], calibration['dist_coefs'], image_size, 1, image_size )
# Remove lens distortion
# rectified_image = cv2.undistort( image, calibration['camera_matrix'], calibration['dist_coefs'], None, new_camera_matrix )
# print calibration['camera_matrix']
# print new_camera_matrix
# print roi
# Print ROI
#cv2.rectangle( rectified_image, roi[:2], roi[2:], (0,0,255), 2 )
#cv2.imshow( 'rectified', rectified_image )
#cv2.waitKey()
# Convert the image in grayscale
# rectified_image = cv2.cvtColor( rectified_image, cv2.COLOR_BGR2GRAY )
image = cv2.cvtColor( image, cv2.COLOR_BGR2GRAY )
# Chessboard detection flags
flags = 0
flags |= cv2.CALIB_CB_ADAPTIVE_THRESH
flags |= cv2.CALIB_CB_NORMALIZE_IMAGE
# Find the chessboard corners on the image
found, corners = cv2.findChessboardCorners( image, pattern_size, flags = flags )
# Pattern not found
if not found : print( 'Chessboard not found...' )
# Termination criteria for the corner detection
criteria = ( cv2.TERM_CRITERIA_MAX_ITER + cv2.TERM_CRITERIA_EPS, 30, 1e-5 )
# Refine the corner positions
cv2.cornerSubPix( image, corners, ( 11, 11 ), ( -1, -1 ), criteria )
# Solve the pose
_, rotation_vector, translation_vector = cv2.solvePnP( pattern_points, corners.reshape( -1, 2 ), calibration['camera_matrix'], calibration['dist_coefs'] )
# _, rotation_vector, translation_vector = cv2.solvePnP( pattern_points, corners.reshape( -1, 2 ), calibration['camera_matrix'], calibration['dist_coefs'] )
# rotation_vector, translation_vector, _ = cv2.solvePnPRansac( pattern_points, corners.reshape( -1, 2 ), calibration['camera_matrix'], calibration['dist_coefs'] )
# print rotation_vector
print translation_vector.T
# rotation_matrix, _ = cv2.Rodrigues( rotation_vector )
# print rotation_matrix
|
# Contains code under test for pytest notebook
def add_func(a, b):
return a + b
def prime(a, b):
return a**b
def euclid(p, q):
"""Return the euclidean distance.
Args:
p (list): p vector
q (list): q vector
Returns:
euclidean distance
"""
dist = 0
for p_i, q_i in zip(p, q):
dist += (q_i - p_i) ** 2
return dist ** 0.5
|
import pytest
from .DirectorRequestBuilder import DirectorRequestBuilder
from .BuilderPointImageryRequest import BuilderPointImageryRequest
from .pointImageryRequest import PointImageryRequest
@pytest.fixture
def testPointRequest():
tempOrder = BuilderPointImageryRequest()
director = DirectorRequestBuilder()
director.construct(tempOrder)
newOrder = tempOrder.request
return(newOrder)
class Tests:
def test_fixture(self):
tcase = testPointRequest()
assert isinstance(tcase, PointImageryRequest)
|
import matplotlib.pyplot as plt
import numpy as np
from pymoo.visualization.fitness_landscape import FitnessLandscape
from pymoo.visualization.video.callback_video import AnimationCallback
class TwoVariablesOneObjectiveVisualization(AnimationCallback):
def __init__(self,
n_samples_for_surface=10000,
**kwargs):
super().__init__(**kwargs)
self.last_pop = None
self.n_samples_for_surface = n_samples_for_surface
def do(self, problem, algorithm):
# check whether the visualization can be done or not - throw exception or simply do nothing
if problem.n_var != 2 or problem.n_obj != 1:
raise Exception("This visualization can only be used for problems with two variables and one objective!")
# draw the problem surface
FitnessLandscape(problem, _type="contour", kwargs_contour=dict(alpha=0.5)).do()
# get the population
pop = algorithm.pop
X, F, CV = pop.get("X", "F", "CV")
plt.scatter(X[:, 0], X[:, 1], color="blue", marker="o", s=70)
is_new = np.full(len(pop), True)
if self.last_pop is not None:
for k, ind in enumerate(pop):
if ind in self.last_pop:
is_new[k] = False
# plot the new population
if is_new.sum() > 0:
X, F, CV = pop[is_new].get("X", "F", "CV")
plt.scatter(X[:, 0], X[:, 1], color="red", marker="*", s=70)
if hasattr(algorithm, "off") and algorithm.off is not None:
X, F, CV = algorithm.off.get("X", "F", "CV")
plt.scatter(X[:, 0], X[:, 1], color="purple", marker="*", s=40)
xl, xu = problem.bounds()
plt.xlim(xl[0], xu[0])
plt.ylim(xl[1], xu[1])
plt.title(f"Generation: {algorithm.n_gen}")
plt.legend()
# store the current population as the last
self.last_pop = set(pop)
|
#! usr/bin/env python3
# -*- coding: utf-8 -*-
'''
This features.py is used to extract audio features based on openSIMLE.
Require: openSMILE-2.2rc1
OpenSMILE only support audios in WAV format,
so before using this script you could
transform MP3s into WAVs by transformat.sh.
'''
__author__ = 'huizhang'
import csv
import os
import shutil
import subprocess
from math import floor
import numpy as np
def extract_all_wav_feature(wavdir, distfile, opensmiledir):
'''Extract 6373-dimension static features into one dist file.
Args:
wavdir: Path to audios in WAV format.
distfile: Path of distfile.
opensmiledir: Path to opensimle project root.
Returns:
Distfile containing 6373-dimension static features of all the WAVs.
'''
SMILExtract = os.path.join(opensmiledir,"SMILExtract")
config_file = os.path.join(opensmiledir,"config", "IS13_ComParE.conf")
if os.path.exists(distfile):
os.remove(distfile)
wav = [f for f in os.listdir(wavdir) if f[-4:] == ".wav"]
for w in wav:
wavpath = os.path.join(wavdir,w)
subprocess.check_call([SMILExtract, "-C", config_file, "-I", wavpath, "-O", distfile, "-instname", w])
def extract_frame_feature(wavdir, distdir, opensmiledir):
'''Extract lld features in frame size: 60ms, step size: 10ms.
Args:
wavdir: Path to audios in WAV format.
distdir: Path of distdir.
opensmiledir: Path to opensimle project root.
Returns:
Distfiles containing lld features for each WAV.
'''
SMILExtract = os.path.join(opensmiledir,"SMILExtract")
config_file = os.path.join(opensmiledir,"config", "IS13_ComParE_lld.conf")
if os.path.exists(distdir):
shutil.rmtree(distdir)
os.mkdir(distdir)
wav = [f for f in os.listdir(wavdir) if f[-4:] == ".wav"]
for w in wav:
wavpath = os.path.join(wavdir,w)
distfile = os.path.join(distdir,w[:-4]+".csv")
subprocess.check_call([SMILExtract, "-C", config_file, "-I", wavpath, "-O", distfile])
def process_dynamic_feature(llddir, distdir, all_songs_distfile, delimiter=";"):
'''Obtain dynamic features in window size: 1s, shift size: 0.5s.
Args:
llddir: Path to lld feature files.
distdir: Path of distdir.
all_songs_distfile: Path of distfile.
delimiter: csv delimiter in lld feature files, default=';'.
Returns:
Distfiles containing 260-dimension dynamic features all WAVs.
'''
if os.path.exists(distdir):
shutil.rmtree(distdir)
os.mkdir(distdir)
# names of features
headers = ['musicId', 'frameTime', 'F0final_sma_mean', 'voicingFinalUnclipped_sma_mean', 'jitterLocal_sma_mean', 'jitterDDP_sma_mean', 'shimmerLocal_sma_mean', 'logHNR_sma_mean', 'audspec_lengthL1norm_sma_mean', 'audspecRasta_lengthL1norm_sma_mean', 'pcm_RMSenergy_sma_mean', 'pcm_zcr_sma_mean', 'audSpec_Rfilt_sma[0]_mean', 'audSpec_Rfilt_sma[1]_mean', 'audSpec_Rfilt_sma[2]_mean', 'audSpec_Rfilt_sma[3]_mean', 'audSpec_Rfilt_sma[4]_mean', 'audSpec_Rfilt_sma[5]_mean', 'audSpec_Rfilt_sma[6]_mean', 'audSpec_Rfilt_sma[7]_mean', 'audSpec_Rfilt_sma[8]_mean', 'audSpec_Rfilt_sma[9]_mean', 'audSpec_Rfilt_sma[10]_mean', 'audSpec_Rfilt_sma[11]_mean', 'audSpec_Rfilt_sma[12]_mean', 'audSpec_Rfilt_sma[13]_mean', 'audSpec_Rfilt_sma[14]_mean', 'audSpec_Rfilt_sma[15]_mean', 'audSpec_Rfilt_sma[16]_mean', 'audSpec_Rfilt_sma[17]_mean', 'audSpec_Rfilt_sma[18]_mean', 'audSpec_Rfilt_sma[19]_mean', 'audSpec_Rfilt_sma[20]_mean', 'audSpec_Rfilt_sma[21]_mean', 'audSpec_Rfilt_sma[22]_mean', 'audSpec_Rfilt_sma[23]_mean', 'audSpec_Rfilt_sma[24]_mean', 'audSpec_Rfilt_sma[25]_mean', 'pcm_fftMag_fband250-650_sma_mean', 'pcm_fftMag_fband1000-4000_sma_mean', 'pcm_fftMag_spectralRollOff25.0_sma_mean', 'pcm_fftMag_spectralRollOff50.0_sma_mean', 'pcm_fftMag_spectralRollOff75.0_sma_mean', 'pcm_fftMag_spectralRollOff90.0_sma_mean', 'pcm_fftMag_spectralFlux_sma_mean', 'pcm_fftMag_spectralCentroid_sma_mean', 'pcm_fftMag_spectralEntropy_sma_mean', 'pcm_fftMag_spectralVariance_sma_mean', 'pcm_fftMag_spectralSkewness_sma_mean', 'pcm_fftMag_spectralKurtosis_sma_mean', 'pcm_fftMag_spectralSlope_sma_mean', 'pcm_fftMag_psySharpness_sma_mean', 'pcm_fftMag_spectralHarmonicity_sma_mean', 'pcm_fftMag_mfcc_sma[1]_mean', 'pcm_fftMag_mfcc_sma[2]_mean', 'pcm_fftMag_mfcc_sma[3]_mean', 'pcm_fftMag_mfcc_sma[4]_mean', 'pcm_fftMag_mfcc_sma[5]_mean', 'pcm_fftMag_mfcc_sma[6]_mean', 'pcm_fftMag_mfcc_sma[7]_mean', 'pcm_fftMag_mfcc_sma[8]_mean', 'pcm_fftMag_mfcc_sma[9]_mean', 'pcm_fftMag_mfcc_sma[10]_mean', 'pcm_fftMag_mfcc_sma[11]_mean', 'pcm_fftMag_mfcc_sma[12]_mean', 'pcm_fftMag_mfcc_sma[13]_mean', 'pcm_fftMag_mfcc_sma[14]_mean', 'F0final_sma_de_mean', 'voicingFinalUnclipped_sma_de_mean', 'jitterLocal_sma_de_mean', 'jitterDDP_sma_de_mean', 'shimmerLocal_sma_de_mean', 'logHNR_sma_de_mean', 'audspec_lengthL1norm_sma_de_mean', 'audspecRasta_lengthL1norm_sma_de_mean', 'pcm_RMSenergy_sma_de_mean', 'pcm_zcr_sma_de_mean', 'audSpec_Rfilt_sma_de[0]_mean', 'audSpec_Rfilt_sma_de[1]_mean', 'audSpec_Rfilt_sma_de[2]_mean', 'audSpec_Rfilt_sma_de[3]_mean', 'audSpec_Rfilt_sma_de[4]_mean', 'audSpec_Rfilt_sma_de[5]_mean', 'audSpec_Rfilt_sma_de[6]_mean', 'audSpec_Rfilt_sma_de[7]_mean', 'audSpec_Rfilt_sma_de[8]_mean', 'audSpec_Rfilt_sma_de[9]_mean', 'audSpec_Rfilt_sma_de[10]_mean', 'audSpec_Rfilt_sma_de[11]_mean', 'audSpec_Rfilt_sma_de[12]_mean', 'audSpec_Rfilt_sma_de[13]_mean', 'audSpec_Rfilt_sma_de[14]_mean', 'audSpec_Rfilt_sma_de[15]_mean', 'audSpec_Rfilt_sma_de[16]_mean', 'audSpec_Rfilt_sma_de[17]_mean', 'audSpec_Rfilt_sma_de[18]_mean', 'audSpec_Rfilt_sma_de[19]_mean', 'audSpec_Rfilt_sma_de[20]_mean', 'audSpec_Rfilt_sma_de[21]_mean', 'audSpec_Rfilt_sma_de[22]_mean', 'audSpec_Rfilt_sma_de[23]_mean', 'audSpec_Rfilt_sma_de[24]_mean', 'audSpec_Rfilt_sma_de[25]_mean', 'pcm_fftMag_fband250-650_sma_de_mean', 'pcm_fftMag_fband1000-4000_sma_de_mean', 'pcm_fftMag_spectralRollOff25.0_sma_de_mean', 'pcm_fftMag_spectralRollOff50.0_sma_de_mean', 'pcm_fftMag_spectralRollOff75.0_sma_de_mean', 'pcm_fftMag_spectralRollOff90.0_sma_de_mean', 'pcm_fftMag_spectralFlux_sma_de_mean', 'pcm_fftMag_spectralCentroid_sma_de_mean', 'pcm_fftMag_spectralEntropy_sma_de_mean', 'pcm_fftMag_spectralVariance_sma_de_mean', 'pcm_fftMag_spectralSkewness_sma_de_mean', 'pcm_fftMag_spectralKurtosis_sma_de_mean', 'pcm_fftMag_spectralSlope_sma_de_mean', 'pcm_fftMag_psySharpness_sma_de_mean', 'pcm_fftMag_spectralHarmonicity_sma_de_mean', 'pcm_fftMag_mfcc_sma_de[1]_mean', 'pcm_fftMag_mfcc_sma_de[2]_mean', 'pcm_fftMag_mfcc_sma_de[3]_mean', 'pcm_fftMag_mfcc_sma_de[4]_mean', 'pcm_fftMag_mfcc_sma_de[5]_mean', 'pcm_fftMag_mfcc_sma_de[6]_mean', 'pcm_fftMag_mfcc_sma_de[7]_mean', 'pcm_fftMag_mfcc_sma_de[8]_mean', 'pcm_fftMag_mfcc_sma_de[9]_mean', 'pcm_fftMag_mfcc_sma_de[10]_mean', 'pcm_fftMag_mfcc_sma_de[11]_mean', 'pcm_fftMag_mfcc_sma_de[12]_mean', 'pcm_fftMag_mfcc_sma_de[13]_mean', 'pcm_fftMag_mfcc_sma_de[14]_mean', 'F0final_sma_std', 'voicingFinalUnclipped_sma_std', 'jitterLocal_sma_std', 'jitterDDP_sma_std', 'shimmerLocal_sma_std', 'logHNR_sma_std', 'audspec_lengthL1norm_sma_std', 'audspecRasta_lengthL1norm_sma_std', 'pcm_RMSenergy_sma_std', 'pcm_zcr_sma_std', 'audSpec_Rfilt_sma[0]_std', 'audSpec_Rfilt_sma[1]_std', 'audSpec_Rfilt_sma[2]_std', 'audSpec_Rfilt_sma[3]_std', 'audSpec_Rfilt_sma[4]_std', 'audSpec_Rfilt_sma[5]_std', 'audSpec_Rfilt_sma[6]_std', 'audSpec_Rfilt_sma[7]_std', 'audSpec_Rfilt_sma[8]_std', 'audSpec_Rfilt_sma[9]_std', 'audSpec_Rfilt_sma[10]_std', 'audSpec_Rfilt_sma[11]_std', 'audSpec_Rfilt_sma[12]_std', 'audSpec_Rfilt_sma[13]_std', 'audSpec_Rfilt_sma[14]_std', 'audSpec_Rfilt_sma[15]_std', 'audSpec_Rfilt_sma[16]_std', 'audSpec_Rfilt_sma[17]_std', 'audSpec_Rfilt_sma[18]_std', 'audSpec_Rfilt_sma[19]_std', 'audSpec_Rfilt_sma[20]_std', 'audSpec_Rfilt_sma[21]_std', 'audSpec_Rfilt_sma[22]_std', 'audSpec_Rfilt_sma[23]_std', 'audSpec_Rfilt_sma[24]_std', 'audSpec_Rfilt_sma[25]_std', 'pcm_fftMag_fband250-650_sma_std', 'pcm_fftMag_fband1000-4000_sma_std', 'pcm_fftMag_spectralRollOff25.0_sma_std', 'pcm_fftMag_spectralRollOff50.0_sma_std', 'pcm_fftMag_spectralRollOff75.0_sma_std', 'pcm_fftMag_spectralRollOff90.0_sma_std', 'pcm_fftMag_spectralFlux_sma_std', 'pcm_fftMag_spectralCentroid_sma_std', 'pcm_fftMag_spectralEntropy_sma_std', 'pcm_fftMag_spectralVariance_sma_std', 'pcm_fftMag_spectralSkewness_sma_std', 'pcm_fftMag_spectralKurtosis_sma_std', 'pcm_fftMag_spectralSlope_sma_std', 'pcm_fftMag_psySharpness_sma_std', 'pcm_fftMag_spectralHarmonicity_sma_std', 'pcm_fftMag_mfcc_sma[1]_std', 'pcm_fftMag_mfcc_sma[2]_std', 'pcm_fftMag_mfcc_sma[3]_std', 'pcm_fftMag_mfcc_sma[4]_std', 'pcm_fftMag_mfcc_sma[5]_std', 'pcm_fftMag_mfcc_sma[6]_std', 'pcm_fftMag_mfcc_sma[7]_std', 'pcm_fftMag_mfcc_sma[8]_std', 'pcm_fftMag_mfcc_sma[9]_std', 'pcm_fftMag_mfcc_sma[10]_std', 'pcm_fftMag_mfcc_sma[11]_std', 'pcm_fftMag_mfcc_sma[12]_std', 'pcm_fftMag_mfcc_sma[13]_std', 'pcm_fftMag_mfcc_sma[14]_std', 'F0final_sma_de_std', 'voicingFinalUnclipped_sma_de_std', 'jitterLocal_sma_de_std', 'jitterDDP_sma_de_std', 'shimmerLocal_sma_de_std', 'logHNR_sma_de_std', 'audspec_lengthL1norm_sma_de_std', 'audspecRasta_lengthL1norm_sma_de_std', 'pcm_RMSenergy_sma_de_std', 'pcm_zcr_sma_de_std', 'audSpec_Rfilt_sma_de[0]_std', 'audSpec_Rfilt_sma_de[1]_std', 'audSpec_Rfilt_sma_de[2]_std', 'audSpec_Rfilt_sma_de[3]_std', 'audSpec_Rfilt_sma_de[4]_std', 'audSpec_Rfilt_sma_de[5]_std', 'audSpec_Rfilt_sma_de[6]_std', 'audSpec_Rfilt_sma_de[7]_std', 'audSpec_Rfilt_sma_de[8]_std', 'audSpec_Rfilt_sma_de[9]_std', 'audSpec_Rfilt_sma_de[10]_std', 'audSpec_Rfilt_sma_de[11]_std', 'audSpec_Rfilt_sma_de[12]_std', 'audSpec_Rfilt_sma_de[13]_std', 'audSpec_Rfilt_sma_de[14]_std', 'audSpec_Rfilt_sma_de[15]_std', 'audSpec_Rfilt_sma_de[16]_std', 'audSpec_Rfilt_sma_de[17]_std', 'audSpec_Rfilt_sma_de[18]_std', 'audSpec_Rfilt_sma_de[19]_std', 'audSpec_Rfilt_sma_de[20]_std', 'audSpec_Rfilt_sma_de[21]_std', 'audSpec_Rfilt_sma_de[22]_std', 'audSpec_Rfilt_sma_de[23]_std', 'audSpec_Rfilt_sma_de[24]_std', 'audSpec_Rfilt_sma_de[25]_std', 'pcm_fftMag_fband250-650_sma_de_std', 'pcm_fftMag_fband1000-4000_sma_de_std', 'pcm_fftMag_spectralRollOff25.0_sma_de_std', 'pcm_fftMag_spectralRollOff50.0_sma_de_std', 'pcm_fftMag_spectralRollOff75.0_sma_de_std', 'pcm_fftMag_spectralRollOff90.0_sma_de_std', 'pcm_fftMag_spectralFlux_sma_de_std', 'pcm_fftMag_spectralCentroid_sma_de_std', 'pcm_fftMag_spectralEntropy_sma_de_std', 'pcm_fftMag_spectralVariance_sma_de_std', 'pcm_fftMag_spectralSkewness_sma_de_std', 'pcm_fftMag_spectralKurtosis_sma_de_std', 'pcm_fftMag_spectralSlope_sma_de_std', 'pcm_fftMag_psySharpness_sma_de_std', 'pcm_fftMag_spectralHarmonicity_sma_de_std', 'pcm_fftMag_mfcc_sma_de[1]_std', 'pcm_fftMag_mfcc_sma_de[2]_std', 'pcm_fftMag_mfcc_sma_de[3]_std', 'pcm_fftMag_mfcc_sma_de[4]_std', 'pcm_fftMag_mfcc_sma_de[5]_std', 'pcm_fftMag_mfcc_sma_de[6]_std', 'pcm_fftMag_mfcc_sma_de[7]_std', 'pcm_fftMag_mfcc_sma_de[8]_std', 'pcm_fftMag_mfcc_sma_de[9]_std', 'pcm_fftMag_mfcc_sma_de[10]_std', 'pcm_fftMag_mfcc_sma_de[11]_std', 'pcm_fftMag_mfcc_sma_de[12]_std', 'pcm_fftMag_mfcc_sma_de[13]_std', 'pcm_fftMag_mfcc_sma_de[14]_std']
window = 1
overlap = 0.5
llds = [f for f in os.listdir(llddir) if f[-4:] == ".csv"]
all_dynamic_features = []
all_musicId = []
for lld in llds:
musicId = []
lldpath = os.path.join(llddir,lld)
single_song_distfile = os.path.join(distdir,lld)
dynamic_features = _compute_feature_with_window_and_overlap(lldpath, window, overlap, delimiter)
for i in range(len(dynamic_features)):
musicId.append(lld[:-4])
_write_features_to_csv(headers, musicId, dynamic_features, single_song_distfile)
all_musicId += musicId
all_dynamic_features += dynamic_features
_write_features_to_csv(headers, all_musicId, all_dynamic_features, all_songs_distfile)
def _compute_feature_with_window_and_overlap(lldpath, window, overlap, delimiter):
'''Compute the mean and std for frame-wise features in window size: 1s, shift size: 0.5s.'''
fs = 0.01
num_in_new_frame = floor(overlap/fs)
num_in_window = floor(window/fs)
# load the features from disk
all_frame = []
with open(lldpath) as f:
reader = csv.reader(f,delimiter=delimiter)
next(reader)
for row in reader:
frame_feature = []
for i in range(len(row)-1): #旧的frametime不用记录
frame_feature.append(float(row[i+1]))
all_frame.append(frame_feature)
# compute new number of frames
new_num_of_frame = floor(len(all_frame)/num_in_new_frame)
all_new_frame = []
# compute mean and std in each window as the feature corresponding to the frame.
for i in range(new_num_of_frame):
start_index = num_in_new_frame * i
new_frame_array = np.array(all_frame[start_index:start_index+num_in_window])
mean_llds = np.mean(new_frame_array,axis=0)
std_llds = np.std(new_frame_array,axis=0)
new_frametime = i * overlap
new_frame = [new_frametime] + mean_llds.tolist() + std_llds.tolist()
all_new_frame.append(new_frame)
return all_new_frame
def _write_features_to_csv(headers, musicIds, contents, distfile):
'''Write all the features into one file, and add the last column as the annotation value'''
with open(distfile,"w") as newfile:
writer = csv.writer(newfile)
writer.writerow(headers + ["class"])
for i in range(len(contents)):
writer.writerow([musicIds[i]] + contents[i] + ["?"])
if __name__ == "__main__":
wavdir ="/Path/to/WAVs"
opensmiledir = "/Path/to/openSMILE-2.1.0"
static_distfile = "static_features.arff"
lld_distdir = "IS13features_lld"
dynamic_distdir = "dynamic_features"
all_dynamic_distfile = "dynamic_features.csv"
delimiter = ";"
extract_all_wav_feature(wavdir,static_distfile,opensmiledir)
extract_frame_feature(wavdir,lld_distdir,opensmiledir)
process_dynamic_feature(lld_distdir,dynamic_distdir,all_dynamic_distfile,delimiter)
|
from unittest.mock import patch
import miscutils.testing as under_test
class TestPatcher:
# Its tempting to use the utility to test itself...
@patch("miscutils.testing.patch")
def test_creates_relative_patches(self, mock_patch):
mock_patch.return_value = "patch_context"
relative_patch = under_test.relative_patch_maker("some_namespace")
patch_context = relative_patch("some_relative_thing_to_replace")
mock_patch.assert_called_once()
mock_patch.assert_called_with("some_namespace.some_relative_thing_to_replace")
assert patch_context == "patch_context"
|
import pymongo
import unittest
import pytest
from pymongo import IndexModel
from mongodantic.models import MongoModel
from mongodantic import connect
from mongodantic.exceptions import MongoIndexError
class TestIndexOperation:
def setup(self, drop=False, basic_indexes=True):
connect("mongodb://127.0.0.1:27017", "test")
class Ticket(MongoModel):
name: str
position: int
config: dict
class Config:
if basic_indexes:
indexes = [IndexModel([('position', 1)]), IndexModel([('name', 1)])]
else:
indexes = indexes = [IndexModel([('position', 1)])]
if drop:
Ticket.querybuilder.drop_collection(force=True)
self.Ticket = Ticket
self.Ticket.execute_indexes()
def test_check_indexes(self):
self.setup(False)
result = self.Ticket.querybuilder.check_indexes()
assert result == {
'_id_': {'key': {'_id': 1}},
'position_1': {'key': {'position': 1}},
'name_1': {'key': {'name': 1}},
}
def test_check_indexes_if_remove(self):
self.setup(False, False)
result = self.Ticket.querybuilder.check_indexes()
assert result == {
'_id_': {'key': {'_id': 1}},
'position_1': {'key': {'position': 1}},
}
def test_drop_index(self):
self.setup(False)
with pytest.raises(MongoIndexError):
result = self.Ticket.querybuilder.drop_index('position1111')
result = self.Ticket.querybuilder.drop_index('position_1')
assert result == 'position_1 dropped.'
self.setup(True, False)
|
# Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import tempfile
import nibabel as nib
import numpy as np
import torch
from monai.data.synthetic import create_test_image_2d
quick_test_var = "QUICKTEST"
def skip_if_quick(obj):
is_quick = os.environ.get(quick_test_var, "").lower() == "true"
return unittest.skipIf(is_quick, "Skipping slow tests")(obj)
def make_nifti_image(array, affine=None):
"""
Create a temporary nifti image on the disk and return the image name.
User is responsible for deleting the temporary file when done with it.
"""
if affine is None:
affine = np.eye(4)
test_image = nib.Nifti1Image(array, affine)
temp_f, image_name = tempfile.mkstemp(suffix=".nii.gz")
nib.save(test_image, image_name)
os.close(temp_f)
return image_name
class NumpyImageTestCase2D(unittest.TestCase):
im_shape = (128, 128)
input_channels = 1
output_channels = 4
num_classes = 3
def setUp(self):
im, msk = create_test_image_2d(self.im_shape[0], self.im_shape[1], 4, 20, 0, self.num_classes)
self.imt = im[None, None]
self.seg1 = (msk[None, None] > 0).astype(np.float32)
self.segn = msk[None, None]
class TorchImageTestCase2D(NumpyImageTestCase2D):
def setUp(self):
NumpyImageTestCase2D.setUp(self)
self.imt = torch.tensor(self.imt)
self.seg1 = torch.tensor(self.seg1)
self.segn = torch.tensor(self.segn)
def expect_failure_if_no_gpu(test):
if not torch.cuda.is_available():
return unittest.expectedFailure(test)
else:
return test
|
#Utility routines for HyperDrive and HyperCheck
import numpy as np
import copy
import importlib
from scipy import optimize
big = 1.0e10
small = 1.0e-6
def mac(x):
if x > 0.0: return x
else: return 0.0
def S(x):
if x > 0.0: return 1.0
else: return -1.0
def floor(x):
if x < small: return small
else: return x
def Ineg(x):
if x <= 0.0: return 0.0
else: return big
def Nneg(x):
if x <= 0.0: return 0.0
else: return big*x
def princon(hm):
print("Constants for model:",hm.file)
print(hm.const)
print("Derived values:")
for i in range(len(hm.const)):
print(hm.name_const[i]+" =",hm.const[i])
def w_rate_lin(y,mu): return (mac(y)**2)/(2.0*mu)
def w_rate_lind(y,mu): return mac(y)/mu
def w_rate_rpt(y,mu,r): return mu*(r**2)*(np.cosh(mac(y)/(mu*r)) - 1.0)
def w_rate_rptd(y,mu,r): return r*np.sinh(mac(y)/(mu*r))
def calcsum(vec):
global eps, sig, chi, alp, nstep, nsub, deps, sigrec, const, hm, n_int, var
sumerr = 0.0
print(vec)
hm.const = copy.deepcopy(const)
for i in range(n_int):
hm.const[2+2*i] = vec[i]
hm.deriv()
eps = 0.0
sig = 0.0
alp = np.zeros(n_int)
chi = np.zeros(n_int)
if "_h" in var:
alp = np.zeros(n_int+1)
chi = np.zeros(n_int+1)
if "_cbh" in var:
alp = np.zeros(n_int+1)
chi = np.zeros(n_int+1)
for step in range(nstep):
for i in range(nsub):
strain_inc_f(deps)
error = sig - sigrec[step]
sumerr += error**2
return sumerr
def solve_L(yo, Lmatp, Lrhsp):
global Lmat, Lrhs, L
Lmat = np.eye(hm.n_y) #initialise matrix and RHS for elastic solution
Lrhs = np.zeros(hm.n_y)
for N in range(hm.n_y): #loop over yield surfaces
if yo[N] > -0.00001: #if this surface yielding ...
Lmat[N] = Lmatp[N] #over-write line in matrix with plastic solution
Lrhs[N] = Lrhsp[N]
L = np.linalg.solve(Lmat, Lrhs) #solve for plastic multipliers
L = np.array([max(Lv, 0.0) for Lv in L]) #make plastic multipliers non-negative
return L
def strain_inc_f(deps):
global eps, sig, alp, chi
acc = 0.5
yo = hm.y_f(chi,eps,alp)
dyda_minus = hm.dyda_f(chi,eps,alp) - np.einsum("Nm,mn->Nn", hm.dydc_f(chi,eps,alp), hm.d2fdada(eps,alp))
dyde_minus = hm.dyde_f(chi,eps,alp) - np.einsum("Nm,m->N", hm.dydc_f(chi,eps,alp), hm.d2fdade(eps,alp))
Lmatp = -np.einsum("Nn,Mn->NM", dyda_minus, hm.dydc_f(chi,eps,alp))
Lrhsp = acc*yo + np.einsum("N,->N", dyde_minus, deps)
L = solve_L(yo, Lmatp, Lrhsp)
dalp = np.einsum("N,Nm->m",L,hm.dydc_f(chi,eps,alp))
eps = eps + deps
alp = alp + dalp
sig = hm.dfde(eps,alp)
chi = -hm.dfda(eps,alp)
def optim(base, variant):
global sigrec, hm, nstep, nsub, n_int, const, deps, var
global eps, sig, alp, chi
var = variant
sigrec = np.zeros(nstep)
print("calculate base curve from", base)
hm = importlib.import_module(base)
hm.setvals()
hm.const = copy.deepcopy(const[:2+2*n_int])
hm.deriv()
eps = 0.0
sig = 0.0
alp = np.zeros(n_int)
chi = np.zeros(n_int)
for step in range(nstep):
for i in range(nsub): strain_inc_f(deps)
sigrec[step] = sig
print("optimise", variant)
hm = importlib.import_module(variant)
hm.setvals()
vec = np.zeros(n_int)
for i in range(n_int): vec[i] = const[2*i+2]
print(vec,calcsum(vec))
#optimize.Bounds(0.0,np.inf)
bnds = optimize.Bounds(0.0001,np.inf)
resultop = optimize.minimize(calcsum, vec, method='L-BFGS-B', bounds=bnds)
vec = resultop.x
print(vec,calcsum(vec))
for i in range(n_int): const[2+2*i] = resultop.x[i]
return const
def derive_from_points(modeltype, epsin, sigin, Einf=0.0, epsmax=0.0, HARM_R=0.0):
global nstep, nsub, n_int, const, deps
eps = np.array(epsin)
sig = np.array(sigin)
le = len(eps)
ls = len(sig)
if ls != le:
print("Unequal numbers of values")
le = min(le, ls)
n_int = le
E = np.zeros(n_int+1)
E[0] = sig[0] / eps[0]
for i in range(1,n_int):
E[i] = (sig[i]-sig[i-1]) / (eps[i]-eps[i-1])
E[n_int] = Einf
print("eps =",eps)
print("sig =",sig)
print("E =",E)
k = np.zeros(n_int)
H = np.zeros(n_int)
if "ser" in modeltype:
print("Series parameters")
E0 = E[0]
for i in range(n_int):
k[i] = sig[i]
H[i] = E[i+1]*E[i]/(E[i] - E[i+1])
const = [E0, n_int]
for i in range(n_int):
const.append(round(k[i],6))
const.append(round(H[i],6))
base = "h1epmk_ser"
elif "par" in modeltype:
print("Parallel parameters")
for i in range(n_int):
H[i] = E[i] - E[i+1]
k[i] = eps[i]*H[i]
const = [Einf, n_int]
for i in range(n_int):
const.append(round(k[i],6))
const.append(round(H[i],6))
base = "h1epmk_par"
elif "nest" in modeltype:
print("Nested parameters")
E0 = E[0]
for i in range(n_int):
k[i] = sig[i] - sig[i-1]
H[i] = E[i+1]*E[i]/(E[i] - E[i+1])
k[0] = sig[0]
const = [E0, n_int]
for i in range(n_int):
const.append(round(k[i],6))
const.append(round(H[i],6))
base = "h1epmk_nest"
if "_b" in modeltype: #now optimise for bounding surface model
print("Optimise parameters for _b option")
nstep = 100
nsub = 10
if epsmax == 0.0:
epsmax = 1.5*eps[n_int-1]
print("setting epsmax =",epsmax)
deps = epsmax / float(nstep*nsub)
const = optim(base, modeltype)
for i in range(2,2+2*n_int):
const[i] = round(const[i],6)
if "_h" in modeltype: #now optimise for HARM model
print("Optimise parameters for _h option")
nstep = 100
nsub = 10
if epsmax == 0.0:
epsmax = 1.5*eps[n_int-1]
print("setting epsmax =",epsmax)
deps = epsmax / float(nstep*nsub)
const.append(HARM_R)
const = optim(base, modeltype)
for i in range(2,2+2*n_int):
const[i] = round(const[i],6)
if "_cbh" in modeltype: #now optimise for bounding HARM model
print("Optimise parameters for _cbh option")
nstep = 100
nsub = 10
if epsmax == 0.0:
epsmax = 1.5*eps[n_int-1]
print("setting epsmax =",epsmax)
deps = epsmax / float(nstep*nsub)
const.append(HARM_R)
const = optim(base, modeltype)
for i in range(2,2+2*n_int):
const[i] = round(const[i],6)
return const
def numdiff_1(mode,ndim,fun,var,alp,vari):
if mode == 0: num = 0.0
else: num = np.zeros([ndim])
for i in range(ndim):
var1 = copy.deepcopy(var)
var2 = copy.deepcopy(var)
if mode == 0:
var1 = var - vari
var2 = var + vari
else:
var1[i] = var[i] - vari
var2[i] = var[i] + vari
f1 = fun(var1,alp)
f2 = fun(var2,alp)
if mode == 0: num = (f2 - f1) / (2.0*vari)
else: num[i] = (f2 - f1) / (2.0*vari)
return num
def numdiff_2(mode,ndim,n_int,fun,var,alp,alpi):
if mode == 0: num = np.zeros([n_int])
else: num = np.zeros([n_int,ndim])
for k in range(n_int):
for i in range(ndim):
alp1 = copy.deepcopy(alp)
alp2 = copy.deepcopy(alp)
if mode == 0:
alp1[k] = alp[k] - alpi
alp2[k] = alp[k] + alpi
else:
alp1[k,i] = alp[k,i] - alpi
alp2[k,i] = alp[k,i] + alpi
f1 = fun(var,alp1)
f2 = fun(var,alp2)
if mode == 0: num[k] = (f2 - f1) / (2.0*alpi)
else: num[k,i] = (f2 - f1) / (2.0*alpi)
return num
def numdiff_3(mode,ndim,n_int,n_y,fun,chi,var,alp,chii):
if mode == 0: num = np.zeros([n_y,n_int])
else: num = np.zeros([n_y,n_int,ndim])
for k in range(n_int):
for i in range(ndim):
chi1 = copy.deepcopy(chi)
chi2 = copy.deepcopy(chi)
if mode == 0:
chi1[k] = chi[k] - chii
chi2[k] = chi[k] + chii
else:
chi1[k,i] = chi[k,i] - chii
chi2[k,i] = chi[k,i] + chii
f1 = fun(chi1,var,alp)
f2 = fun(chi2,var,alp)
for j in range(n_y):
if mode == 0: num[j,k] = (f2[j] - f1[j]) / (2.0*chii)
else: num[j,k,i] = (f2[j] - f1[j]) / (2.0*chii)
return num
def numdiff_4(mode,ndim,n_int,n_y,fun,chi,var,alp,vari):
if mode == 0: num = np.zeros([n_y])
else: num = np.zeros([n_y,ndim])
for i in range(ndim):
var1 = copy.deepcopy(var)
var2 = copy.deepcopy(var)
if mode == 0:
var1 = var - vari
var2 = var + vari
else:
var1[i] = var[i] - vari
var2[i] = var[i] + vari
f1 = fun(chi,var1,alp)
f2 = fun(chi,var2,alp)
for j in range(n_y):
if mode == 0: num[j] = (f2[j] - f1[j]) / (2.0*vari)
else: num[j,i] = (f2[j] - f1[j]) / (2.0*vari)
return num
def numdiff_5(mode,ndim,n_int,n_y,fun,chi,var,alp,alpi):
if mode == 0: num = np.zeros([n_y,n_int])
else: num = np.zeros([n_y,n_int,ndim])
for k in range(n_int):
for i in range(ndim):
alp1 = copy.deepcopy(alp)
alp2 = copy.deepcopy(alp)
if mode == 0:
alp1[k] = alp[k] - alpi
alp2[k] = alp[k] + alpi
else:
alp1[k,i] = alp[k,i] - alpi
alp2[k,i] = alp[k,i] + alpi
f1 = fun(chi,var,alp1)
f2 = fun(chi,var,alp2)
for j in range(n_y):
if mode == 0: num[j,k] = (f2[j] - f1[j]) / (2.0*alpi)
else: num[j,k,i] = (f2[j] - f1[j]) / (2.0*alpi)
return num
def numdiff_6(mode,ndim,n_int,fun,chi,var,alp,chii):
if mode == 0: num = np.zeros([n_int])
else: num = np.zeros([n_int,ndim])
for k in range(n_int):
for i in range(ndim):
chi1 = copy.deepcopy(chi)
chi2 = copy.deepcopy(chi)
if mode == 0:
chi1[k] = chi[k] - chii
chi2[k] = chi[k] + chii
else:
chi1[k,i] = chi[k,i] - chii
chi2[k,i] = chi[k,i] + chii
f1 = fun(chi1,var,alp)
f2 = fun(chi2,var,alp)
if mode == 0: num[k] = (f2 - f1) / (2.0*chii)
else: num[k,i] = (f2 - f1) / (2.0*chii)
return num
def numdiff_6a(mode,ndim,n_int,fun,chi,var,alp,chii):
if mode == 0: num = np.zeros([n_int])
else: num = np.zeros([n_int,ndim])
for k in range(n_int):
for i in range(ndim):
chi1 = copy.deepcopy(chi)
chi2 = copy.deepcopy(chi)
if mode == 0:
chi1[k] = chi[k] - chii
chi2[k] = chi[k] + chii
else:
chi1[k,i] = chi[k,i] - chii
chi2[k,i] = chi[k,i] + chii
f1 = fun(chi1,var,alp)
f0 = fun(chi,var,alp)
f2 = fun(chi2,var,alp)
if abs(f2-f0) > abs(f1-f0) == 0.0:
if mode == 0: num[k] = (f2 - f0) / chii
else: num[k,i] = (f2 - f0) / chii
else:
if mode == 0: num[k] = (f0 - f1) / chii
else: num[k,i] = (f0 - f1) / chii
return num
def numdiff2_1(mode,ndim,fun,var,alp,vari):
if mode == 0: num = 0.0
else: num = np.zeros([ndim,ndim])
for i in range(ndim):
for j in range(ndim):
if i==j:
var1 = copy.deepcopy(var)
var3 = copy.deepcopy(var)
if mode == 0:
var1 = var - vari
var3 = var + vari
else:
var1[i] = var[i] - vari
var3[i] = var[i] + vari
f1 = fun(var1,alp)
f2 = fun(var,alp)
f3 = fun(var3,alp)
if mode == 0: num = (f1 - 2.0*f2 + f3) / (vari**2)
else: num[i,i] = (f1 - 2.0*f2 + f3) / (vari**2)
else:
var1 = copy.deepcopy(var)
var2 = copy.deepcopy(var)
var3 = copy.deepcopy(var)
var4 = copy.deepcopy(var)
var1[i] = var[i] - vari
var1[j] = var[j] - vari
var2[i] = var[i] - vari
var2[j] = var[j] + vari
var3[i] = var[i] + vari
var3[j] = var[j] - vari
var4[i] = var[i] + vari
var4[j] = var[j] + vari
f1 = fun(var1,alp)
f2 = fun(var2,alp)
f3 = fun(var3,alp)
f4 = fun(var4,alp)
num[i,j] = (f1 - f2 - f3 + f4) / (4.0*(vari**2))
return num
def numdiff2_2(mode,ndim,n_int,fun,var,alp,vari,alpi):
if mode == 0: num = np.zeros(n_int)
else: num = np.zeros([n_int,ndim,ndim])
for k in range(n_int):
for i in range(ndim):
for j in range(ndim):
var1 = copy.deepcopy(var)
var2 = copy.deepcopy(var)
alp1 = copy.deepcopy(alp)
alp2 = copy.deepcopy(alp)
if mode == 0:
var1 = var - vari
var2 = var + vari
alp1[k] = alp[k] - alpi
alp2[k] = alp[k] + alpi
else:
var1[i] = var[i] - vari
var2[i] = var[i] + vari
alp1[k,j] = alp[k,j] - alpi
alp2[k,j] = alp[k,j] + alpi
f1 = fun(var1,alp1)
f2 = fun(var2,alp1)
f3 = fun(var1,alp2)
f4 = fun(var2,alp2)
if mode == 0: num[k] = (f1 - f2 - f3 + f4) / (4.0*vari*alpi)
else: num[k,i,j] = (f1 - f2 - f3 + f4) / (4.0*vari*alpi)
return num
def numdiff2_3(mode,ndim,n_int,fun,var,alp,vari,alpi):
if mode == 0: num = np.zeros(n_int)
else: num = np.zeros([n_int,ndim,ndim])
for k in range(n_int):
for i in range(ndim):
for j in range(ndim):
var1 = copy.deepcopy(var)
var2 = copy.deepcopy(var)
alp1 = copy.deepcopy(alp)
alp2 = copy.deepcopy(alp)
if mode == 0:
var1 = var - vari
var2 = var + vari
alp1[k] = alp[k] - alpi
alp2[k] = alp[k] + alpi
else:
var1[i] = var[i] - vari
var2[i] = var[i] + vari
alp1[k,j] = alp[k,j] - alpi
alp2[k,j] = alp[k,j] + alpi
f1 = fun(var1,alp1)
f2 = fun(var2,alp1)
f3 = fun(var1,alp2)
f4 = fun(var2,alp2)
if mode == 0: num[k] = (f1 - f2 - f3 + f4) / (4.0*vari*alpi)
else: num[k,j,i] = (f1 - f2 - f3 + f4) / (4.0*vari*alpi)
return num
def numdiff2_4(mode,ndim,n_int,fun,var,alp,alpi):
if mode == 0: num = np.zeros([n_int,n_int])
else: num = np.zeros([n_int,n_int,ndim,ndim])
for k in range(n_int):
for l in range(n_int):
for i in range(ndim):
for j in range(ndim):
if k==l and i==j:
alp1 = copy.deepcopy(alp)
alp3 = copy.deepcopy(alp)
if mode == 0:
alp1[k] = alp[k] - alpi
alp3[k] = alp[k] + alpi
else:
alp1[k,i] = alp[k,i] - alpi
alp3[k,i] = alp[k,i] + alpi
f1 = fun(var,alp1)
f2 = fun(var,alp)
f3 = fun(var,alp3)
if mode == 0: num[k,k] = (f1 - 2.0*f2 + f3) / (alpi**2)
else: num[k,k,i,i] = (f1 - 2.0*f2 + f3) / (alpi**2)
else:
alp1 = copy.deepcopy(alp)
alp2 = copy.deepcopy(alp)
alp3 = copy.deepcopy(alp)
alp4 = copy.deepcopy(alp)
if mode == 0:
alp1[k] = alp[k] - alpi
alp1[l] = alp[l] - alpi
alp2[k] = alp[k] - alpi
alp2[l] = alp[l] + alpi
alp3[k] = alp[k] + alpi
alp3[l] = alp[l] - alpi
alp4[k] = alp[k] + alpi
alp4[l] = alp[l] + alpi
else:
alp1[k,i] = alp[k,i] - alpi
alp1[l,j] = alp[l,j] - alpi
alp2[k,i] = alp[k,i] - alpi
alp2[l,j] = alp[l,j] + alpi
alp3[k,i] = alp[k,i] + alpi
alp3[l,j] = alp[l,j] - alpi
alp4[k,i] = alp[k,i] + alpi
alp4[l,j] = alp[l,j] + alpi
f1 = fun(var,alp1)
f2 = fun(var,alp2)
f3 = fun(var,alp3)
f4 = fun(var,alp4)
if mode == 0: num[k,l] = (f1 - f2 - f3 + f4) / (4.0*(alpi**2))
else: num[k,l,i,j] = (f1 - f2 - f3 + f4) / (4.0*(alpi**2))
return num |
#!/usr/bin/env python3
##########################################################
## Jose F. Sanchez ##
## Copyright (C) 2019 Lauro Sumoy Lab, IGTP, Spain ##
##########################################################
"""
Generates sample identification using KMA software and MLSTar.
Looks for similar entries on GenBank and retrieves them.
"""
## useful imports
import time
import io
import os
import re
import sys
import concurrent.futures
from termcolor import colored
import pandas as pd
## import my modules
from BacterialTyper.scripts import species_identification_KMA
from BacterialTyper.scripts import database_generator
from BacterialTyper.scripts import MLSTar
from BacterialTyper.scripts import edirect_caller
from BacterialTyper.modules import help_info
from BacterialTyper.config import set_config
from BacterialTyper import __version__ as pipeline_version
import HCGB
from HCGB import sampleParser
import HCGB.functions.aesthetics_functions as HCGB_aes
import HCGB.functions.time_functions as HCGB_time
import HCGB.functions.main_functions as HCGB_main
import HCGB.functions.files_functions as HCGB_files
####################################
def run_ident(options):
"""
Main function acting as an entry point to the module *ident*.
Arguments:
.. seealso:: Additional information to PubMLST available datasets.
- :doc:`PubMLST datasets<../../../data/PubMLST_datasets>`
"""
##################################
### show help messages if desired
##################################
if (options.help_format):
## help_format option
sampleParser.help_format()
exit()
elif (options.help_project):
## information for project
help_info.project_help()
exit()
elif (options.help_KMA):
## information for KMA Software
species_identification_KMA.help_kma_database()
exit()
elif (options.help_MLSTar):
## information for KMA Software
MLSTar.help_MLSTar()
exit()
## init time
start_time_total = time.time()
## debugging messages
global Debug
if (options.debug):
Debug = True
else:
Debug = False
### set as default paired_end mode
if (options.single_end):
options.pair = False
else:
options.pair = True
### species_identification_KMA -> most similar taxa
HCGB_aes.pipeline_header("BacterialTyper", ver=pipeline_version)
HCGB_aes.boxymcboxface("Species identification")
print ("--------- Starting Process ---------")
HCGB_time.print_time()
## absolute path for in & out
input_dir = os.path.abspath(options.input)
outdir=""
## Project mode as default
global Project
if (options.detached):
options.project = False
project_mode=False
outdir = os.path.abspath(options.output_folder)
Project=False
else:
options.project = True
outdir = input_dir
Project=True
## get files
pd_samples_retrieved = sampleParser.files.get_files(options, input_dir, "trim", ['_trim'], options.debug)
## debug message
if (Debug):
print (colored("**DEBUG: pd_samples_retrieve **", 'yellow'))
print (pd_samples_retrieved)
## generate output folder, if necessary
print ("\n+ Create output folder(s):")
if not options.project:
HCGB_files.create_folder(outdir)
## for each sample
outdir_dict = HCGB_files.outdir_project(outdir, options.project, pd_samples_retrieved, "ident", options.debug)
## let's start the process
print ("+ Generate an species typification for each sample retrieved using:")
print ("(1) Kmer alignment (KMA) software.")
print ("(2) Pre-defined databases by KMA or user-defined databases.")
## get databases to check
retrieve_databases = get_options_db(options)
## time stamp
start_time_partial = HCGB_time.timestamp(start_time_total)
## debug message
if (Debug):
print (colored("**DEBUG: retrieve_database **", 'yellow'))
pd.set_option('display.max_colwidth', None)
pd.set_option('display.max_columns', None)
print (retrieve_databases)
######## KMA identification
dataFrame_kma = KMA_ident(options, pd_samples_retrieved, outdir_dict, retrieve_databases, start_time_partial)
## functions.timestamp
start_time_partial = HCGB_time.timestamp(start_time_partial)
## debug message
if (Debug):
print (colored("**DEBUG: retrieve results to summarize **", 'yellow'))
pd.set_option('display.max_colwidth', None)
pd.set_option('display.max_columns', None)
print ("dataframe_kma")
print (dataFrame_kma)
## exit if viral search
skip=False
if (len(options.kma_dbs) == 1):
for i in options.kma_dbs:
if (i == 'viral'):
print ()
MLST_results = ''
options.fast = True
skip=True
## what if only plasmids?
## do edirect and MLST if bacteria
if (not skip):
dataFrame_edirect = pd.DataFrame()
######## EDirect identification
#dataFrame_edirect = edirect_ident(dataFrame_kma, outdir_dict, Debug)
## functions.timestamp
start_time_partial = HCGB_time.timestamp(start_time_partial)
## debug message
if (Debug):
print (colored("**DEBUG: retrieve results from NCBI **", 'yellow'))
pd.set_option('display.max_colwidth', None)
pd.set_option('display.max_columns', None)
print ("dataFrame_edirect")
print (dataFrame_edirect)
######## MLST identification
MLST_results = MLST_ident(options, dataFrame_kma, outdir_dict, dataFrame_edirect, retrieve_databases)
## functions.timestamp
start_time_partial = HCGB_time.timestamp(start_time_partial)
## debug message
if (Debug):
print (colored("**DEBUG: retrieve results to summarize **", 'yellow'))
pd.set_option('display.max_colwidth', None)
pd.set_option('display.max_columns', None)
print ("MLST_results")
print (MLST_results)
## generate summary for sample: all databases
## MLST, plasmids, genome, etc
HCGB_aes.boxymcboxface("Results Summary")
#####################################
## Summary identification results ##
#####################################
## parse results
if options.project:
final_dir = os.path.join(outdir, 'report', 'ident')
HCGB_files.create_folder(final_dir)
else:
final_dir = outdir
###
excel_folder = HCGB_files.create_subfolder("samples", final_dir)
print ('+ Print summary results in folder: ', final_dir)
print ('+ Print sample results in folder: ', excel_folder)
# Group dataframe results summary by sample name
sample_results_summary = dataFrame_kma.groupby(["Sample"])
## debug message
if (Debug):
print (colored("**DEBUG: sample_results_summary **", 'yellow'))
print (sample_results_summary)
##
results_summary_KMA = pd.DataFrame()
MLST_all = pd.DataFrame()
for name, grouped in sample_results_summary:
## create a excel and txt for sample
name_sample_excel = excel_folder + '/' + name + '_ident.xlsx'
name_sample_csv = outdir_dict[name] + '/ident_summary.csv' ## check in detached mode
writer_sample = pd.ExcelWriter(name_sample_excel, engine='xlsxwriter') ## open excel handle
## subset dataframe & print result
results_summary_toPrint_sample = grouped[['Sample','#Template',
'Query_Coverage','Template_Coverage',
'Depth', 'Database']]
results_summary_toPrint_sample.to_excel(writer_sample, sheet_name="KMA") ## write excel handle
results_summary_toPrint_sample.to_csv(name_sample_csv) ## write csv for sample
## read MLST
if MLST_results:
if name in MLST_results:
sample_MLST = pd.read_csv(MLST_results[name], header=0, sep=',')
sample_MLST['genus'] = dataFrame_edirect.loc[dataFrame_edirect['sample'] == name, 'genus'].values[0]
sample_MLST['species'] = dataFrame_edirect.loc[dataFrame_edirect['sample'] == name, 'species'].values[0]
sample_MLST.to_excel(writer_sample, sheet_name="MLST") ## write excel handle
## Return information to excel
MLST_all = pd.concat([MLST_all, sample_MLST])
## close excel handle
writer_sample.save()
##
name_excel = final_dir + '/identification_summary.xlsx'
print ('+ Summary information in excel file: ', name_excel)
writer = pd.ExcelWriter(name_excel, engine='xlsxwriter') ## open excel handle
## KMA dataframe: print result for sources
results_summary_KMA = dataFrame_kma[['Sample','#Template',
'Query_Coverage','Template_Coverage',
'Depth', 'Database']]
## Sum plasmid and chromosome statistics ##
## sum coverage
total_coverage = results_summary_KMA.groupby('Sample')['Query_Coverage'].sum().reset_index()
## debug message
if (Debug):
print ("*** Sum: Query_coverage ***")
print (total_coverage)
## TODO: FIX SUMMARY REPORT
results_summary_KMA = results_summary_KMA.set_index('Sample')
results_summary_KMA = results_summary_KMA.sort_values(by=['Sample', 'Database', 'Query_Coverage'],ascending=[True, True,True])
results_summary_KMA.to_excel(writer, sheet_name='KMA') ## write excel handle
## write MLST
if (MLST_results):
MLST_all.to_excel(writer, sheet_name='MLST')
## write excel and close
writer.save() ## close excel handle
print ("\n+ Check summary of results in file generated" )
### timestamp
start_time_partial = HCGB_time.timestamp(start_time_partial)
######################################
## update database for later usage
######################################
if not options.fast:
HCGB_aes.boxymcboxface("Update Sample Database")
## update db
print ("+ Update database with samples identified")
## debug message
if (Debug):
print (colored("**DEBUG: dataFrame_edirect **", 'yellow'))
pd.set_option('display.max_colwidth', None)
pd.set_option('display.max_columns', None)
print (dataFrame_edirect)
## dataFrame_edirect
file_toprint = final_dir + '/edirect_info2download.csv'
dataFrame_edirect.to_csv(file_toprint)
## update database with samples identified
data2download = dataFrame_edirect.filter(['genus','species', 'strain', 'genome'])
data2download = data2download.rename(columns={'genome': 'NCBI_assembly_ID', 'strain' : 'name'})
NCBI_folder = os.path.abspath(options.database) + '/NCBI'
database_generator.NCBI_DB(data2download, NCBI_folder, Debug)
else:
print ("+ No update of the database has been requested using option --fast")
print ("\n*************** Finish *******************")
start_time_partial = HCGB_time.timestamp(start_time_total)
## dump information and parameters
info_dir = HCGB_files.create_subfolder("info", outdir)
print("+ Dumping information and parameters")
runInfo = { "module":"ident", "time":HCGB_time.timestamp(time.time()),
"BacterialTyper version":pipeline_version }
HCGB_info.dump_info_run(info_dir, 'ident', options, runInfo, options.debug)
print ("+ Exiting identification module.")
return()
####################################
def KMA_ident(options, pd_samples_retrieved, outdir_dict, retrieve_databases, time_partial):
"""Kmer identification using software KMA_.
:param options: options passed to the :func:`BacterialTyper.modules.ident.run_ident` main function (threads, KMA_cutoff, etc). See details in...
:param pd_samples_retrieved: pandas dataframe for samples to process.
:param outdir_dict: dictionary containing information for each sample of the output folder for this process.
:param retrieve_databases:
:param time_partial: timestamp of start time of the process.
:type options:
:type pd_samples_retrieved: pandas.DataFrame()
:type outdir_dict: Dictionary
:type retrieve_databases: pandas.DataFrame()
:type time_partial:
:return: Information of the identification. See example below.
:rtype: pandas.DataFrame()
See example of returned dataframe in file :file:`/devel/results/KMA_ident_example.csv` here:
.. include:: ../../devel/results/KMA_ident_example.csv
:literal:
.. seealso:: This function depends on other ``BacterialTyper`` functions called:
- :func:`BacterialTyper.config.set_config.get_exe`
- :func:`BacterialTyper.scripts.functions.boxymcboxface`
- :func:`BacterialTyper.modules.ident.send_kma_job`
- :func:`BacterialTyper.modules.ident.get_outfile`
- :func:`BacterialTyper.scripts.species_identification_KMA.check_db_indexed`
- :func:`BacterialTyper.scripts.species_identification_KMA.parse_kma_results`
.. include:: ../../links.inc
"""
return(pd.DataFrame())
### print header
HCGB_aes.boxymcboxface("KMA Identification")
## set defaults
kma_bin = set_config.get_exe("kma")
## check status
databases2use = []
for index, db2use in retrieve_databases.iterrows():
## index_name
if (str(db2use['source']).startswith('KMA')):
print ('+ Check database: ' + db2use['db'])
fold_name = os.path.dirname(db2use['path'])
index_status = species_identification_KMA.check_db_indexed(db2use['path'], fold_name )
if (index_status == True):
print (colored("\t+ Databases %s seems to be fine...\n\n" % db2use['db'], 'green'))
databases2use.append(db2use['path'])
else:
#databases2use.remove(db2use)
print (colored("\t**Databases %s is not correctly indexed. Not using it...\n" % db2use['db'], 'red'))
## debug message
if (Debug):
print (colored("**DEBUG: databases2use\n" + "\n".join(databases2use) + "\n**", 'yellow'))
## Start identification of samples
print ("\n+ Send KMA identification jobs...")
## optimize threads
name_list = set(pd_samples_retrieved["name"].tolist())
threads_job = HCGB_main.optimize_threads(options.threads, len(name_list)) ## threads optimization
max_workers_int = int(options.threads/threads_job)
## debug message
if (Debug):
print (colored("**DEBUG: options.threads " + str(options.threads) + " **", 'yellow'))
print (colored("**DEBUG: max_workers " + str(max_workers_int) + " **", 'yellow'))
print (colored("**DEBUG: cpu_here " + str(threads_job) + " **", 'yellow'))
# Group dataframe by sample name
sample_frame = pd_samples_retrieved.groupby(["name"])
## send for each sample
with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers_int) as executor:
for db2use in databases2use:
## load database on memory
print ("+ Loading database on memory for faster identification.")
return_code_load = species_identification_KMA.load_db(kma_bin, db2use)
## send for each sample
commandsSent = { executor.submit(send_kma_job,
outdir_dict[name],
sorted(cluster["sample"].tolist()),
name, db2use, threads_job, Debug): name for name, cluster in sample_frame }
for cmd2 in concurrent.futures.as_completed(commandsSent):
details = commandsSent[cmd2]
try:
data = cmd2.result()
except Exception as exc:
print ('***ERROR:')
print (cmd2)
print('%r generated an exception: %s' % (details, exc))
## remove database from memory
print ("+ Removing database from memory...")
return_code_rm = species_identification_KMA.remove_db(kma_bin, db2use)
if (return_code_rm == 'FAIL'):
print (colored("***ERROR: Removing database from memory failed. Please do it manually! Execute command: %s" %cmd_rm_db,'red'))
## functions.timestamp
time_partial = HCGB_time.timestamp(time_partial)
## parse results
print ("+ KMA identification call finished for all samples...")
print ("+ Parse results now")
results_summary = pd.DataFrame()
for db2use in databases2use:
### [TODO]: parse data according to database: bacteria, plasmids or user data or genbank data provided
basename_db = os.path.basename(db2use)
pd.set_option('display.max_colwidth', None)
pd.set_option('display.max_columns', None)
###
for name, cluster in sample_frame:
## get result
## outdir_KMA
outdir_dict_kma = HCGB_files.create_subfolder("kma", outdir_dict[name])
result = get_outfile(outdir_dict_kma, name, db2use)
#print ('\t- File: ' + result + '.spa')
## get results using a cutoff value [Defaulta: 80]
results = species_identification_KMA.parse_kma_results(result + '.spa', options.KMA_cutoff)
results['Database'] = basename_db
### check if db2use is plasmids as it could be several.
if (results.index.size > 1):
if (basename_db == "plasmids.T" or basename_db == "viral.TG"):
## let it be several entries
results['Sample'] = name
results_summary = results_summary.append(results, ignore_index=True)
else:
print (colored("###########################################", 'yellow'))
print (colored("Sample %s contains multiple strains." %name, 'yellow'))
print (colored("###########################################", 'yellow'))
print (colored(results, 'yellow'))
print ('\n\n')
## add both strains if detected
results['Sample'] = name
results_summary = results_summary.append(results, ignore_index=True)
## TODO: add multi-isolate flag
elif (results.index.size == 1): ## 1 clear reference
results['Sample'] = name
results_summary = results_summary.append(results, ignore_index=True)
else:
print (colored('\tNo clear strain from database %s has been assigned to sample %s' %(basename_db, name), 'yellow'))
## add empty line if no available
results['Sample'] = name
results_summary = results_summary.append(results, ignore_index=True)
print ("+ Finish this step...")
## debug message
if (Debug):
results_summary.to_csv(quotechar='"')
return (results_summary)
###################################
def send_kma_job(outdir_file, list_files, name, database, threads, Debug):
"""
Executes KMA identification jobs
This function automates the process of checking if any previous run succeeded or
runs the appropiate identification process for the sample and database provided.
:param outdir_file:
:param list_files:
:param name:
:param database:
:param threads:
:param dataFrame_sample:
:type outdir_file:
:type list_files:
:type name:
:type database:
:type threads:
:type dataFrame_sample:
.. seealso:: This function depends on other ``BacterialTyper`` functions called:
- :func:`BacterialTyper.config.set_config.get_exe`
- :func:`BacterialTyper.scripts.species_identification_KMA.kma_ident_call`
- :func:`BacterialTyper.module.ident.get_outfile`
- :func:`BacterialTyper.scripts.functions.read_time_stamp`
"""
if (Debug):
print (colored("**DEBUG: ident.send_kma_job call**", 'yellow'))
print ("outdir_file")
print (outdir_file)
print ("list_files")
print (list_files)
print ("name: " + name)
print ("database: " + database)
## outdir_KMA
outdir_dict_kma = HCGB_files.create_subfolder("kma", outdir_file)
## set defaults
kma_bin = set_config.get_exe("kma")
## get outfile
outfile = get_outfile(outdir_dict_kma, name, database)
## check if previously run and succeeded
basename_tag = os.path.basename(outfile)
filename_stamp = outdir_dict_kma + '/.success_' + basename_tag
if (Debug):
print ("Outdir: ", outdir_dict_kma)
print ("outfile: ", outfile)
print ("Filename_stamp: ", filename_stamp)
if os.path.isfile(filename_stamp):
stamp = HCGB_time.read_time_stamp(filename_stamp)
print (colored("\tA previous command generated results on: %s [%s]" %(stamp, name), 'yellow'))
else:
## debug message
if (Debug):
print (colored("**DEBUG: species_identification_KMA.kma_ident_module call**", 'yellow'))
print ("outfile = get_outfile(outdir_dict_kma, name, db2use)")
print ("outfile: ", outfile)
print ("species_identification_KMA.kma_ident_module(outfile, list_files, name, database, threads) ")
print ("species_identification_KMA.kma_ident_module" + "\t" + outfile + "\t" + str(list_files) + "\t" + name + "\t" + database + "\t" + str(threads) + "\n")
## Sparse or not
#if any(name in basename_tag for name in ['userData_KMA', 'genbank_KMA']):
# if (basename_tag == 'userData_KMA'):
# option = ''
# else:
# option = '-Sparse '
## Add option to retrieve databse from memory
option = ""
option = option + '-shm 1'
# Call KMA
species_identification_KMA.kma_ident_call(outfile, list_files, name, database, kma_bin, option, threads)
stamp = HCGB_time.print_time_stamp(filename_stamp)
####################################
def get_outfile(output_dir, name, index_name):
"""
Generates the name for the output file created
:param output_dir: Absolute path to results folder
:param name: Name of the sample
:param index_name: Name of the database
:type output_dir: string
:type name: string
:type index_name: string
:retruns: Output file absolute path
"""
basename_tag = os.path.basename(index_name)
if Project:
output_path = output_dir
else:
output_path = HCGB_files.create_subfolder(name, output_dir)
out_file = output_path + '/' + name + '_' + basename_tag
return(out_file)
####################################
def edirect_ident(dataFrame, outdir_dict, Debug):
"""Connect to NCBI for information retrieval
This functions uses the software edirect_ to connect to NCBI and retrieve some information regarding samples, assemblies, publications, etc.
:param dataFrame: pandas dataframe for samples to process. Result from :func:`BacterialTyper.modules.ident.KMA_ident`.
:param outdir_dict: dictionary containing information for each sample of the output folder for this process.
:type dataFrame: pandas.DataFrame()
:type outdir_dict: Dictionary
:return: Information of the identification
:rtype: pandas.DataFrame()
See example of returned dataframe in file :file:`/devel/results/edirect_download_results.csv` here:
.. include:: ../../devel/results/edirect_download_results.csv
:literal:
.. seealso:: This function depends on other ``BacterialTyper`` functions called:
- :func:`BacterialTyper.scripts.functions.get_info_file`
- :func:`BacterialTyper.scripts.functions.read_time_stamp`
- :func:`BacterialTyper.scripts.functions.print_time_stamp`
- :func:`BacterialTyper.scripts.functions.optimize_threads`
- :func:`BacterialTyper.scripts.functions.create_subfolder`
- :func:`BacterialTyper.scripts.functions.boxymcboxface`
- :func:`BacterialTyper.scripts.functions.is_non_zero_file`
- :func:`BacterialTyper.scripts.edirect_caller.generate_docsum_call`
- :func:`BacterialTyper.scripts.edirect_caller.generate_xtract_call`
.. include:: ../../links.inc
"""
################################################
## TODO: What to do if multi-isolate sample?
################################################
## edirect
HCGB_aes.boxymcboxface("EDirect information")
print ("+ Connect to NCBI to get information from samples identified...")
## create dataframe to return results
edirect_frame = pd.DataFrame(columns=("sample", "genus", "species", "strain", "BioSample", "genome", "Plasmids"))
## debugging messages
if Debug:
print ("*******************************************************")
print ("Dataframe sample_results: ")
# Group dataframe sample name
sample_results = dataFrame.groupby(["Sample"])
for name, grouped in sample_results:
## debugging messages
if Debug:
print ("Name: ", name)
print (grouped)
## use edirect to get Species_name and entry for later identification
edirect_folder = HCGB_files.create_subfolder('edirect', outdir_dict[name])
## chromosome match
if (len(grouped.loc[grouped['Database'] == 'bacteria.ATG']['#Template']) == 0):
if Debug:
print ("Name: ", name)
print ("No chromosome match identified by kmer")
genus = ''
species = ''
BioSample_name = ''
AssemblyAcc = ''
else:
nucc_entry = grouped.loc[grouped['Database'] == 'bacteria.ATG']['#Template'].values[0].split()
## e.g. NZ_CP029680.1 Staphylococcus aureus strain AR_0215 chromosome, complete genome
##
out_docsum_file = edirect_folder + '/nuccore_docsum.txt'
tmp_species_outfile = edirect_folder + '/info.csv'
filename_stamp = edirect_folder + '/.success_species'
if os.path.isfile(filename_stamp):
stamp = HCGB_time.read_time_stamp(filename_stamp)
print (colored("\tA previous command generated results on: %s [%s]" %(stamp, name), 'yellow'))
status=True
else:
edirect_caller.generate_docsum_call('nuccore', nucc_entry[0], out_docsum_file)
status = edirect_caller.generate_xtract_call(out_docsum_file, 'DocumentSummary', 'Organism,BioSample,AssemblyAcc,Strain', tmp_species_outfile)
########################################
## get information from edirect call
########################################
if not status:
print ("NO INFORMATION")
continue
taxa_name_tmp = HCGB_main.get_info_file(tmp_species_outfile)
Organism = taxa_name_tmp[0].split(',')[0].split()
genus = Organism[0] ## genus
species = Organism[1] ## species
BioSample_name = taxa_name_tmp[0].split(',')[1] ## BioSample
AssemblyAcc = taxa_name_tmp[0].split(',')[2] ## AssemblyAcc
## sometimes strain is missing
if len(taxa_name_tmp[0].split(',')) > 3:
strain = taxa_name_tmp[0].split(',')[3] ## strain
else:
strain = 'NaN'
## get GenBank accession ID
out_docsum_file_assembly = edirect_folder + '/assembly_docsum.txt'
AssemblyAcc_outfile = edirect_folder + '/AssemblyAcc.csv'
edirect_caller.generate_docsum_call('assembly', AssemblyAcc, out_docsum_file_assembly)
edirect_caller.generate_xtract_call(out_docsum_file_assembly, 'DocumentSummary', 'Genbank', AssemblyAcc_outfile)
## some error occurred
if not HCGB_main.is_non_zero_file(out_docsum_file_assembly):
continue
## Is it better to download Refseq or Genbank?
## https://www.quora.com/What-is-the-difference-between-Refseq-and-Genbank
GenbankAcc = HCGB_main.get_info_file(AssemblyAcc_outfile)
if Debug:
print("Sample: ", name)
print("Genbank Acc: ", GenbankAcc[0])
## plasmid match
group_plasmid = grouped.loc[grouped['Database'] == 'plasmids.T' ]
plasmid_entries = group_plasmid['#Template'].tolist()
## e.g. NZ_CP029083.1 Staphylococcus aureus strain AR464 plasmid unnamed1, complete sequence
plasmid_entries_str = ",".join([i.split()[0] for i in plasmid_entries])
## save edirect_frame
#("sample", "taxa", strain, genome "BioSample", "Plasmids"))
edirect_frame.loc[len(edirect_frame)] = (name, genus, species, strain, BioSample_name, GenbankAcc[0], plasmid_entries_str)
stamp = HCGB_time.print_time_stamp(filename_stamp)
## debugging messages
if Debug:
print ("*******************************************************")
return (edirect_frame)
####################################
def MLST_ident(options, dataFrame, outdir_dict, dataFrame_edirect, retrieve_databases):
"""Generate MLST profile identification
This functions uses the `MLSTar software`_ to retrieve Multi locus sequence typing (MLST) profiles from PubMLST_ for the given species previously identified by KMA. It generates MLST profiling for each sample.
:param options: options passed to the :func:`BacterialTyper.modules.ident.run_ident` main function (threads, KMA_cutoff, etc). See details in...
:param dataFrame: pandas dataframe for samples to process. Result from :func:`BacterialTyper.modules.ident.KMA_ident`.
:param outdir_dict: dictionary containing information for each sample of the output folder for this process.
:param dataFrame_edirect: pandas dataframe resulted from :func:`BacterialTyper.modules.ident.edirect_ident`.
:param retrieve_databases:
:type options:
:type dataFrame: pandas.DataFrame()
:type outdir_dict: Dictionary
:type dataFrame_edirect: pandas.DataFrame()
:type retrieve_databases: pandas.DataFrame()
:return: Information of the MLST identification. Dictionary keys are samples and values are the absolute path to file generate by :func:`BacterialTyper.scripts.MLSTar.run_doMLST` containing MLST information.
:rtype: Dictionary
See example of returned dataframe in file :file:`/devel/results/doMLST_result_example.csv` here:
.. include:: ../../devel/results/doMLST_result_example.csv
:literal:
.. seealso:: Additional information to PubMLST available datasets.
- :doc:`PubMLST datasets<../../../data/PubMLST_datasets>`
.. seealso:: This function depends on other ``BacterialTyper`` functions called:
- :func:`BacterialTyper.scripts.functions.read_time_stamp`
- :func:`BacterialTyper.scripts.functions.create_subfolder`
- :func:`BacterialTyper.scripts.functions.boxymcboxface`
- :func:`BacterialTyper.scripts.MLSTar.run_MLSTar`
- :func:`HCGB.sampleParser.files.get_files`
- :func:`BacterialTyper.scripts.MLSTar.get_MLSTar_species`
.. include:: ../../links.inc
"""
## set config
rscript = set_config.get_exe("Rscript")
## TODO: Samples might not be assembled...to take into account and return 0
## TODO: Fix and install MLSTar during installation
print(MLSTar.get_MLSTar_package_installed())
exit()
########################################################################################
## TODO: What to do if multi-isolate sample?
## TODO: Control if a different profile is provided via --MLST_profile
## TODO: Check time passed and download again if >?? days passed]
## debug message
if (Debug):
print (colored("**DEBUG: dataFrame_edirect identified**", 'yellow'))
print (dataFrame_edirect)
## MLST call
HCGB_aes.boxymcboxface("MLST typing")
print ("+ Create classical MLST typification of each sample according to species retrieved by kmer...")
## get assembly files
input_dir = os.path.abspath(options.input)
assembly_samples_retrieved = sampleParser.files.get_files(options, input_dir, "assembly", ["fna"], options.debug)
## debug message
if (Debug):
print (colored("**DEBUG: assembly_samples_retrieved**", 'yellow'))
print (assembly_samples_retrieved)
# init
MLST_results = {}
## get MLST_profile: default or provided
mlst_profile_list = retrieve_databases.loc[ retrieve_databases['db'] == 'PubMLST']['path'].tolist()
if (Debug):
print ("** Debug **")
print ("mlst_profile_list")
print (mlst_profile_list)
print ("dataFrame_edirect")
print (dataFrame_edirect)
## Generate MLST call according to species identified for each sample
for index, row in dataFrame_edirect.iterrows():
MLSTar_taxa_name = MLSTar.get_MLSTar_species(row['genus'], row['species'] )
if (MLSTar_taxa_name == 'NaN'):
print (colored("\t- Not available PubMLST profile for sample [%s] identified as %s %s" %(row['sample'], row['genus'], row['species']), 'yellow'))
else:
for mlst_profile in mlst_profile_list:
## species folder
#species_mlst_folder = functions.create_subfolder(MLSTar_taxa_name, pubmlst_folder)
species_mlst = mlst_profile.split(',')[0]
species_mlst_folder = mlst_profile.split(',')[1]
## output file
output_file = species_mlst_folder + '/PubMLST_available_scheme.csv'
filename_stamp = species_mlst_folder + '/.success_scheme'
##
if MLSTar_taxa_name == species_mlst:
if os.path.isfile(filename_stamp):
stamp = HCGB_time.read_time_stamp(filename_stamp)
print (colored("\tA previous command generated results on: %s" %stamp, 'yellow'))
else:
### get scheme available
MLSTar.getPUBMLST(MLSTar_taxa_name, rscript, output_file)
stamp = HCGB_time.print_time_stamp(filename_stamp)
## parse and get scheme for classical MLST
schemes_MLST = pd.read_csv(output_file, sep=',', header=0)
##
for item, cluster in schemes_MLST.iterrows():
if cluster['len'] < 10:
scheme2use = int(cluster['scheme'])
continue
###
sample = row['sample']
MLSTar_folder = HCGB_files.create_subfolder('MLST', outdir_dict[sample])
genome_file = assembly_samples_retrieved.loc[assembly_samples_retrieved['name'] == sample]['sample'].values[0]
## call MLST
(results, profile_folder) = MLSTar.run_MLSTar(species_mlst_folder, rscript, MLSTar_taxa_name, scheme2use, sample, MLSTar_folder, genome_file, options.threads)
MLST_results[sample] = results
##
print ("+ Finish this step...")
return (MLST_results)
####################################
def get_external_kma(kma_external_files, Debug):
print ('\t- Get additional kma databases:')
## external sequences provided are indexed and generated in the same folder provided
option_db = ""
if (kma_external_files):
kma_external_files = set(kma_external_files)
kma_external_files = [os.path.abspath(f) for f in kma_external_files]
## check if indexed and/or index if necessary
external_kma_dbs_list = []
## set defaults
kma_bin = set_config.get_exe("kma")
for f in kma_external_files:
file_name = os.path.basename(f)
fold_name = os.path.dirname(f)
print (colored('\t\t+ %s' %file_name, 'green'))
print ()
## generate db
databaseKMA = species_identification_KMA.generate_db([f], file_name, fold_name, 'new', 'single', Debug, kma_bin)
if not databaseKMA:
print (colored("***ERROR: Database provided is not indexed.\n" %databaseKMA,'orange'))
else:
external_kma_dbs_list.append(databaseKMA)
external_kma_dbs_string = ','.join(external_kma_dbs_list)
option_db = "kma_external:" + external_kma_dbs_string
else:
## rise error & exit
print (colored("***ERROR: No database provided via --kma_external_file option.\n",'red'))
exit()
return(option_db)
####################################
def get_options_db(options):
"""Select databases to use according to the input options.
:param options:
:returns: Dataframe with database information among all databases available.
"""
print ("\n\n+ Select databases to use for identification:")
### database folder to use
database2use = os.path.abspath(options.database)
## debug message
if (Debug):
print (colored("**DEBUG: Database to use: " + database2use + " **", 'yellow'))
## according to user input: select databases to use
option_db = ""
############################################################
## Default db KMA
############################################################
kma_dbs = []
if not options.only_kma_db: ## exclusive
#kma_dbs = ["bacteria", "plasmids"]
kma_dbs = ["bacteria"]
if (options.kma_dbs):
options.kma_dbs = options.kma_dbs + kma_dbs
options.kma_dbs = set(options.kma_dbs)
else:
options.kma_dbs = kma_dbs
## rise error & exit if no dbs provided
if not (options.kma_dbs):
print (colored("***ERROR: No database provided via --kma_db option.\n",'red'))
exit()
############################################################
### Options:
############
## 1) only user data: previously identified and added
############
if (options.only_user_data):
option_db = "user_data"
############
## 2) only genbank data: previously download from NCBI reference genomes
############
elif (options.only_genbank_data):
option_db = "genbank"
############
## 3) only external kma
############
elif (options.only_external_kma):
option_db = get_external_kma(options.kma_external_files, Debug)
## rise attention
if (options.kma_dbs):
print (colored("***ATTENTION:\nDefatult databases and databases provided via --kma_dbs option would not be used as --only_external_kma option provided.\n",'red'))
#################
## all databases
#################
else:
####################
## default KMA dbs
####################
print ('\t- Selecting kma databases:')
kma_dbs_string = ','.join(options.kma_dbs)
option_db = "kma:" + kma_dbs_string
for i in options.kma_dbs:
print (colored('\t\t+ %s' %i, 'green'))
#################
## External file
#################
if (options.kma_external_files):
option_db_tmp = get_external_kma(options.kma_external_files, Debug)
option_db = option_db + '#' + option_db_tmp
#############################
## Previously identified data
#############################
if any([options.user_data, options.all_data]):
option_db = option_db + '#kma_user_data:user_data'
#############################
## Genbank reference data
#############################
if any([options.genbank_data, options.all_data]):
option_db = option_db + '#kma_NCBI:genbank'
###############
### PubMLST ###
###############
print ("\n\t - Select MLST profiles")
option_db_PubMLST = 'MLST:PubMLST'
print (colored("\t\t + Default MLST profile under database provided: PubMLST", 'green'))
if options.MLST_profile:
## user provides a PubMLST profile
options.MLST_profile = os.path.abspath(options.MLST_profile)
option_db_PubMLST = option_db_PubMLST + '#MLST:' + options.MLST_profile
print (colored("\t\t + User provided MLST profile: %s" %options.MLST_profile, 'green'))
###############
### get dbs
###############
print ("\n+ Parsing information to retrieve databases")
print ("+ Reading from database: " + database2use)
HCGB_aes.print_sepLine("-",50, False)
###############
## debug message
if (Debug):
print (colored("**DEBUG: option_db: " + option_db + " **", 'yellow'))
print (colored("**DEBUG: option_db_PubMLST : " + option_db_PubMLST + " **", 'yellow'))
pd_KMA = database_generator.getdbs("KMA", database2use, option_db, Debug)
pd_PubMLST = database_generator.getdbs("MLST", database2use, option_db_PubMLST, Debug)
HCGB_aes.print_sepLine("-",50, False)
## return both dataframes
pd_Merge = pd.concat([pd_KMA, pd_PubMLST], sort=True, ignore_index=True)
return (pd_Merge)
|
for i in range (6):
for j in range (7):
if (i==0 and j %3!=0) or (i==1 and j % 3==0) or (i-j==2) or (i+j==8):
print("*",end=" ")
else:
print(end=" ")
print()
|
import torch.utils.data
from datasets.dataset import DatasetFactory
class CustomDatasetDataLoader:
def __init__(self, opt):
self.opt = opt
self.num_threds = opt.n_threads_train
self.create_dataset()
def create_dataset(self):
self.dataset = DatasetFactory.get_by_name(
self.opt.dataset_mode, self.opt)
def split_dataset(self, split_size_percentage=0.9):
dataset_size = len(self.dataset)
size_of_left_split = round(split_size_percentage*dataset_size)
size_of_right_split = dataset_size - size_of_left_split
return torch.utils.data.random_split(
self.dataset, [size_of_left_split, size_of_right_split])
def create_dataloader(self, data_loader, shuffle_batches):
if hasattr(self.dataset, 'collate_fn'):
self.dataloader = torch.utils.data.DataLoader(
data_loader,
batch_size=self.opt.batch_size,
collate_fn=self.dataset.collate_fn,
shuffle=shuffle_batches,
num_workers=int(self.num_threds),
drop_last=True)
else:
self.dataloader = torch.utils.data.DataLoader(
data_loader,
batch_size=self.opt.batch_size,
shuffle=shuffle_batches,
num_workers=int(self.num_threds),
drop_last=True)
return self.dataloader
def load_data(self):
return self.dataloader
def __len__(self):
return len(self.dataset)
|
from kmeans1d.core import cluster, Clustered, __version__
|
from tests.utils import W3CTestCase
class TestAnonymousBoxGeneration(W3CTestCase):
vars().update(W3CTestCase.find_tests(__file__, 'anonymous-box-generation-'))
|
import pytest
from numpy import array, log
from pandas import DataFrame
from minecraft_learns.common import *
def test_euclidean_distance():
a = array([[0,1,2,3],[2,0,2,-1]])
b = array([1,3,4,1])
dist = euclidean_distance(a, b)
assert dist.shape == (2,)
def test_mean_zero_normalize():
a = array([[0,1,2,3],[2,0,2,-1]])
b = mean_zero_normalize(a)
assert a.shape == b.shape
assert b.mean() == 0
def test_standardize():
a = array([[0,1,2,3],[2,0,2,-1]])
b = standardize(a)
assert a.shape == b.shape
def test_is_dataframe():
a = array([[0,1,2,3],[2,0,2,-1]])
b = DataFrame([[0,1,2,3],[2,0,2,-1]])
assert not is_dataframe(a)
assert is_dataframe(b)
def test_log_transform():
a = array([[0,1,2,3],[2,0,2,-1]])
b = log_transform(a)
assert a.shape == b.shape
assert a.min() >= b.min()
def test_interact():
df = DataFrame([[0,1,2,3],[2,0,2,-1]], columns=["a","b","c","d"])
df = interact(df, ["a", "b"])
assert (df.columns == ["a","b","c","d","a*b"]).all()
def test_one_hot_encode():
df = DataFrame([[0,1,2],[2,0,2]], columns=["a","b","c"])
df = one_hot_encode(df, ["a"])
print(df.columns)
assert (df.columns == ["b","c",0,1]).all()
def test_pca():
a = DataFrame([[0,1,2,3],[2,0,2,-1],[0,1,2,3],[2,0,2,-1]])
assert pca(a).shape[1] == a.shape[1]
assert pca(a,2).shape[1] == 2
def test_normalize():
a = array([[0,1,2,3],[2,0,2,-1]])
b = normalize(a)
assert a.shape == b.shape
assert b.min() == 0
assert b.max() == 1
def test_label_encoding():
columns=["a","b","label"]
df = DataFrame([[0,1,"a"],[2,0,"b"]], columns=columns)
beginshape = df.shape
le, df = label_encoding(df)
assert df.shape == beginshape
assert le is not None
assert (df.columns == columns).all()
|
import base64
import PySimpleGUI as sg
from sys import platform
import backend
icon = base64.b64encode(open("framework.png", "rb").read())
sg.set_options(font=("Courier New", 12))
backend = backend.Backend()
def LedControl(name):
return [
sg.Input(key="Input " + name, visible=False, enable_events=True, default_text="#000000"),
sg.ColorChooserButton("Choose Color"),
sg.Text('#000000', text_color="#000000", key="Display " + name),
sg.Button("Set " + name),
]
def FanFrame():
return sg.Frame("Fan Controls", [[
sg.Checkbox("Manual Fan Control", enable_events=True, key="FanMode"),
sg.Slider((0, 100), 0, 1, orientation='h', disable_number_display=True, enable_events=True, disabled=True, key="FanDuty")
]])
def CommandFrame():
return sg.Frame("Command Controls", [[
sg.Input(key="CommandPath", enable_events=True),
sg.FileBrowse("Command Path")
]])
def LedFrame(names):
controls = []
for name in names:
controls.append(LedControl(name))
return sg.Frame("LED Controls", controls)
layout = [[
CommandFrame(),
FanFrame()
], [
LedFrame(["left", "right", "power"])
]]
window = sg.Window("Framework Control", layout, element_justification="left", icon=icon)
while True:
event, values = window.read()
if isinstance(event, str):
if event.startswith("Input "):
window["Display " + event.split(" ")[1]].update(values[event], text_color=values[event])
elif event.startswith("Set "):
name = event.split(" ")[1]
value = values["Input " + name]
backend.change_color(name, int(value[1:3], 16), int(value[3:5], 16), int(value[5:7], 16))
elif event == "FanMode":
if values["FanMode"]:
window["FanDuty"].update(disabled=False)
backend.fan(values["FanDuty"])
else:
window["FanDuty"].update(disabled=True)
backend.fan(-1)
elif event == "FanDuty":
backend.fan(values["FanDuty"])
elif event == "CommandPath":
backend.set(values["CommandPath"], (platform == "linux" or platform == "linux2"))
elif event == sg.WIN_CLOSED:
break
window.close() |
"""
Find the kth largest element in an unsorted array.
Note that it is the kth largest element in the sorted order,
not the kth distinct element.
Example 1:
Input: [3,2,1,5,6,4] and k = 2
Output: 5
Example 2:
Input: [3,2,3,1,2,4,5,5,6] and k = 4
Output: 4
Note:
You may assume k is always valid, 1 ≤ k ≤ array's length.
"""
import heapq
class Solution:
def findKthLargest(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
return heapq.nlargest(k, nums)[-1]
|
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from sklearn.linear_model import LogisticRegression
sns.set(style="ticks")
df = pd.read_csv('cardio_train_cleaned.csv', sep=';', nrows=1000)
print(df.describe())
df.plot.scatter(x="ap_hi", y="cardio")
heat_map = sns.heatmap(df.corr(method='pearson'), annot=True,
fmt='.2f', linewidths=2)
heat_map.set_xticklabels(heat_map.get_xticklabels(), rotation=45);
plt.rcParams["figure.figsize"] = (50,50) |
from __future__ import print_function, absolute_import, division
from collections import Counter
import cobrame
from cobrame.util import massbalance
def get_remaining_complex_elements(model, complex, modification_formulas):
tmp_met = cobrame.Metabolite('tmp_met')
mets = model.metabolites
components = complex.id.split('_mod_')
base_complex = components[0]
elements = Counter()
# If a the completely unmodified complex is present in the model
# has a formula, initialize the elements dictionary with that
if base_complex in mets and mets.get_by_id(base_complex).formula:
elements.update(mets.get_by_id(base_complex).elements)
for component in components[1:]:
new_elements = elements.copy()
new_complex = '_mod_'.join([base_complex, component])
if new_complex in mets and mets.get_by_id(new_complex).formula:
# default to new_complex elements if both new and old exist
if base_complex in mets and mets.get_by_id(base_complex).formula:
new_elements = Counter()
formula = mets.get_by_id(new_complex).formula
tmp_met.formula = formula
new_elements.update(tmp_met.elements)
# Net effect of an SH modification is adding a Sulfur to elements
elif ':SH' in component:
new_elements['S'] += 1
# modifies O- to SH
elif component == 'cosh':
new_elements['O'] -= 1
new_elements['S'] += 1
new_elements['H'] += 1
elif component in modification_formulas:
formula = modification_formulas[component]
tmp_met.formula = formula
new_elements.update(tmp_met.elements)
elif ':' in component:
value, component = component.split(':')
if component in modification_formulas:
formula = modification_formulas[component]['formula']
elif component + '_c' in mets:
formula = mets.get_by_id(component + '_c').formula
else:
raise UserWarning('No formula found for modification (%s)'
% component)
tmp_met.formula = formula
for e, v in tmp_met.elements.items():
new_elements[e] += v * float(value)
elif 'Oxidized' in component and 'FLAVODOXIN' not in base_complex:
new_elements.update({'H': -2})
if elements == new_elements and 'FLAVODOXIN' not in base_complex:
print(complex.id, base_complex, component)
base_complex = '_mod_'.join([base_complex, component])
elements = new_elements.copy()
return elements
def add_remaining_complex_formulas(model, modification_formulas):
"""
Add formula to complexes that are not formed from a complex formation
reaction (ie. complexes involved in metabolic reactions)
"""
element_dict = {}
# Reset all formulas first
complex_list = []
for c in model.metabolites:
# If not complex or formed by complex formation reaction, do not reset
if not isinstance(c, cobrame.Complex) or c.id in model.process_data:
continue
for r in c.reactions:
if hasattr(r, 'update'):
r.update()
c.formula = ''
c.elements = {}
complex_list.append(c)
# Get formulas only for complexes without complex formation reaction
for c in complex_list:
element_dict[c] = get_remaining_complex_elements(model, c,
modification_formulas)
# Adding elements for complexes dynamically can change function output
# Update all of them after
for c, elements in element_dict.items():
massbalance.elements_to_formula(c, elements)
|
from django.test import TestCase
from rest_framework.test import APIClient
from django.urls import reverse
from faker import Factory
from .models import Produto, Favorito
from clientes.models import Cliente
from usuarios.tests import getApiCliente, criaUsuarios
def criaFavorito() -> object:
"""
param: None
return: Boolean
Criar usuario, 2 produtos e inseri-los na tabela de favoritos !
"""
produto_1 = "1bf0f365-fbdd-4e21-9786-da459d78dd1f"
tb_produto_1 = Produto.objects.create(
produto_id=produto_1
)
produto_2 = "b66897ea-4f5a-b8a9-dc7b-3011f37a18fc"
tb_produto_2 = Produto.objects.create(
produto_id=produto_2
)
email_cliente = faker.email()
tb_cliente = Cliente.objects.create(
email=email_cliente,
nome=faker.name()
)
tb_favorito = Favorito.objects.create(
cliente=tb_cliente
)
tb_favorito.produtos.set([tb_produto_1, tb_produto_2])
return tb_favorito
faker = Factory.create('pt_BR')
# Create your tests here.
class ProdutosModelTest(TestCase):
def setUp(self):
self.produto_id = "1bf0f365-fbdd-4e21-9786-da459d78dd1f"
Produto.objects.create(
produto_id=self.produto_id
)
def testValidaQtdInserts(self):
saved_models = Produto.objects.count()
self.assertEqual(saved_models, 1)
class FavoritosModelTest(TestCase):
def setUp(self):
_ = criaFavorito()
def testValidaQtdInserts(self):
saved_models = Favorito.objects.count()
self.assertEqual(saved_models, 1)
class FavoritosAPITest(TestCase):
def setUp(self):
self.tb_favorito = criaFavorito()
self.list_produtos = [
{"price": 1699.0, "image": "http://challenge-api.luizalabs.com/images/1bf0f365-fbdd-4e21-9786-da459d78dd1f.jpg", "brand": "b\u00e9b\u00e9 confort", "id": "1bf0f365-fbdd-4e21-9786-da459d78dd1f", "title": "Cadeira para Auto Iseos B\u00e9b\u00e9 Confort Earth Brown"},
{"price": 805.0, "image": "http://challenge-api.luizalabs.com/images/b66897ea-4f5a-b8a9-dc7b-3011f37a18fc.jpg", "brand": "narciso rodriguez", "id": "b66897ea-4f5a-b8a9-dc7b-3011f37a18fc", "title": "Narciso Rodriguez For Her L?absolu"},
{"price": 667.8, "image": "http://challenge-api.luizalabs.com/images/f8cb4a82-910e-6654-1240-d994c2997d2c.jpg", "brand": "burigotto", "id": "f8cb4a82-910e-6654-1240-d994c2997d2c", "title": "Cadeira para Auto Burigotto Matrix p/ Crian\u00e7as"},
{"price": 199.0, "image": "http://challenge-api.luizalabs.com/images/b2968188-458c-3860-7729-2e2ec30dabd6.jpg", "brand": "doctor cooler", "id": "b2968188-458c-3860-7729-2e2ec30dabd6", "title": "Cooler 6 Latas Doctor Cooler"}
]
self.endpoint = '/api/v1/favoritos/'
email_usuario, email_superusuario, senha = criaUsuarios() #criando usuario
self.api_client = getApiCliente(email_usuario, senha) #buscando Token
def testValidaGET(self):
response = self.api_client.get(self.endpoint)
self.failUnlessEqual(response.status_code, 200)
def testValidaPOST(self):
#tb = Favorito.objects.get(email=self.email_usuario)
response = self.api_client.post(
self.endpoint,
{
"cliente":self.tb_favorito.cliente.id,
"produtos":{"price": 149.9, "image": "http://challenge-api.luizalabs.com/images/93bd9fbf-5cd3-6385-1600-8eb9d9ee705d.jpg", "brand": "love", "id": "93bd9fbf-5cd3-6385-1600-8eb9d9ee705d", "title": "Banheira Infl\u00e1vel"}
},
format='json'
)
self.failUnlessEqual(response.status_code, 201)
response = self.api_client.get(self.endpoint)
response = response.json()
self.failUnlessEqual(1, len(response.get('results')))
self.failUnlessEqual(3, len(response['results'][0].get('produtos'))) #conferindo se existe 3 produtos, 2 do setUp e um novo
def testValidaPOSTList(self):
response = self.api_client.post(
self.endpoint,
{
"cliente":self.tb_favorito.cliente.id,
"produtos":self.list_produtos
},
format='json'
)
self.failUnlessEqual(response.status_code, 201)
response = self.api_client.get(self.endpoint)
response = response.json()
self.failUnlessEqual(1, len(response.get('results')))
self.failUnlessEqual(4, len(response['results'][0].get('produtos'))) #conferindo se existe 4 produtos, 2 do setUp e 4 da nova lista sendo 2 duplicados
def testValidaDELETE(self):
response = self.api_client.delete(f'{self.endpoint}{self.tb_favorito.id}/')
self.failUnlessEqual(response.status_code, 204) |
__all__ = (
"AsyncResource",
"IPAddressType",
"IPSockAddrType",
"SocketAttribute",
"SocketStream",
"SocketListener",
"UDPSocket",
"UNIXSocketStream",
"UDPPacketType",
"ConnectedUDPSocket",
"UnreliableObjectReceiveStream",
"UnreliableObjectSendStream",
"UnreliableObjectStream",
"ObjectReceiveStream",
"ObjectSendStream",
"ObjectStream",
"ByteReceiveStream",
"ByteSendStream",
"ByteStream",
"AnyUnreliableByteReceiveStream",
"AnyUnreliableByteSendStream",
"AnyUnreliableByteStream",
"AnyByteReceiveStream",
"AnyByteSendStream",
"AnyByteStream",
"Listener",
"Process",
"Event",
"Condition",
"Lock",
"Semaphore",
"CapacityLimiter",
"CancelScope",
"TaskGroup",
"TaskStatus",
"TestRunner",
"BlockingPortal",
)
from typing import Any
from ._resources import AsyncResource
from ._sockets import (
ConnectedUDPSocket,
IPAddressType,
IPSockAddrType,
SocketAttribute,
SocketListener,
SocketStream,
UDPPacketType,
UDPSocket,
UNIXSocketStream,
)
from ._streams import (
AnyByteReceiveStream,
AnyByteSendStream,
AnyByteStream,
AnyUnreliableByteReceiveStream,
AnyUnreliableByteSendStream,
AnyUnreliableByteStream,
ByteReceiveStream,
ByteSendStream,
ByteStream,
Listener,
ObjectReceiveStream,
ObjectSendStream,
ObjectStream,
UnreliableObjectReceiveStream,
UnreliableObjectSendStream,
UnreliableObjectStream,
)
from ._subprocesses import Process
from ._tasks import TaskGroup, TaskStatus
from ._testing import TestRunner
# Re-exported here, for backwards compatibility
# isort: off
from .._core._synchronization import CapacityLimiter, Condition, Event, Lock, Semaphore
from .._core._tasks import CancelScope
from ..from_thread import BlockingPortal
# Re-export imports so they look like they live directly in this package
key: str
value: Any
for key, value in list(locals().items()):
if getattr(value, "__module__", "").startswith("anyio.abc."):
value.__module__ = __name__
|
import time
from ppserver import celery, app
import requests
from random import randint
# from celery import Celery
@celery.task(bind=True)
def long_task(self):
"""My long task."""
c = 0
while c != 100:
c += 1
i = randint(0, 10)
time.sleep(1)
self.update_state(state='PROGRESS',
meta={'current': i,
'total': 10,
'status': 'working...'}
)
requests.get('127.0.0.1/pixel/check')
return {'current': i,
'total': i,
'status': 'Task Complete',
'result': 'Go Away!'
}
@celery.task(bind=True)
def pixel_time(self, mins):
app.logger.info('Running get request to turn on')
try:
requests.get('http://192.168.1.11/pixel/on')
except Exception as e:
app.logger.error(e)
c = 0
app.logger.info( 'Delay time is: {t}'.format( t=mins*60 ) )
while c != (mins * 60):
c += 1
i = randint(0, 10)
time.sleep(1)
self.update_state(state='PROGRESS',
meta={'current': i,
'total': 10,
'status': 'working...'}
)
requests.get('http://192.168.1.11/pixel/off')
|
import numpy as np
from model import create_custom_model
# models
# model = ENGINE + MODEL DEFINITION
# engine is not cofigurable yet
# you can specify your model definition
# MODEL DEFINITION
# 1. states, transtion types, parameters
model_definition = {
# define your model states and transition types
#
# define model arguments (arguments of constructor) and parameters (arguments of
# constuctor)
# arguments are dictionaries: { arg_name : (default value, description) }
# init_arguments .... model parameters single value
# e.g. "p": (0.2, "probability of external constact")
#
# model_parameters .... model parameters: single value or np.array
# those that can differ for each node
# i.e. "beta": (0.2, "transmission rate")
#
# you do note have to define init_{STATE_NAME} arguments, you can use them
# by default (they define numbers of individuals in individual stats,
# the rest of population is assing the the first state)
"states": [
"S",
"S_s",
"E",
"I_n",
"I_a",
"I_s",
"I_d",
"R_d",
"R_u",
"D_d",
"D_u"
],
"transitions": [
("S", "S_s"),
("S", "E"),
("S_s", "S"),
("S_s", "E"),
("E", "I_n"),
("E", "I_a"),
("I_n", "R_u"),
("I_a", "I_s"),
("I_s", "R_u"),
("I_s", "D_u"),
("I_s", "I_d"),
("I_d", "R_d"),
("I_d", "D_d"),
("I_a", "I_d"),
("E", "I_d")
],
"final_states": [
"R_d",
"R_u",
"D_d",
"D_u"
],
"invisible_states": [
"D_u",
"D_d"
],
"init_arguments": {
"p": (0, "probability of interaction outside adjacent nodes"),
"q": (0, " probability of detected individuals interaction outside adjacent nodes"),
"false_symptoms_rate": (0, ""),
"false_symptoms_recovery_rate": (1., ""),
"asymptomatic_rate": (0, ""),
"symptoms_manifest_rate": (1., ""),
},
"model_parameters": {
"beta": (0, "rate of transmission (exposure)"),
"sigma": (0, "rate of infection (upon exposure)"),
"gamma": (0, "rate of recovery (upon infection)"),
"mu_I": (0, "rate of infection-related death"),
"beta_D": (0, "rate of transmission (exposure) for detected inds"),
"gamma_D": (0, "rate of recovery (upon infection) for detected inds"),
"mu_D": (0, "rate of infection-related death for detected inds"),
"theta_E": (0, "rate of baseline testing for exposed individuals"),
"theta_Ia": (0, "rate of baseline testing for Ia individuals"),
"theta_Is": (0, "rate of baseline testing for Is individuals"),
"phi_E": (0, "rate of contact tracing testing for exposed individuals"),
"phi_Ia": (0, "rate of contact tracing testing for Ia individuals"),
"phi_Is": (0, "rate of contact tracing testing for Is individuals"),
"psi_E": (0, "probability of positive test results for exposed individuals"),
"psi_Ia": (0, "probability of positive test results for Ia individuals"),
"psi_Is": (0, "probability of positive test results for Is individuals")
}
}
# 2. propensities function
def calc_propensities(model):
# STEP 1
# pre-calculate matrix multiplication terms that may be used in multiple propensity calculations,
# and check to see if their computation is necessary before doing the multiplication
# number of infectious nondetected contacts
# sum of all I states
numContacts_I = np.zeros(shape=(model.num_nodes, 1))
if any(model.beta):
infected = [
s for s in ("I_n", "I_a", "I_s")
if model.current_state_count(s)
]
if infected:
numContacts_I = model.num_contacts(infected)
numContacts_Id = np.zeros(shape=(model.num_nodes, 1))
if any(model.beta_D):
numContacts_Id = model.num_contacts("I_d")
# STEP 2
# create dict of propensities
# { transition name: probability values }
propensities = dict()
# "S" -> "S_s"
propensities[("S", "S_s")] = model.false_symptoms_rate*(model.X == "S")
# "S" -> "E"
numI = model.current_state_count(
"I_n") + model.current_state_count("I_a") + model.current_state_count("I_s")
S_to_E_koef = (
model.p * (
model.beta * numI +
model.q * model.beta_D * model.current_state_count("I_d")
) / model.current_N()
+
(1 - model.p) * np.divide(
model.beta * numContacts_I +
model.beta_D * numContacts_Id, model.degree, out=np.zeros_like(model.degree), where=model.degree != 0
)
)
propensities[("S", "E")] = S_to_E_koef * (model.X == "S")
propensities[("S_s", "S")
] = model.false_symptoms_recovery_rate*(model.X == "S_s")
# becoming exposed does not depend on unrelated symptoms
propensities[("S_s", "E")] = S_to_E_koef * (model.X == "S_s")
exposed = model.X == "E"
propensities[("E", "I_n")] = model.asymptomatic_rate * \
model.sigma * exposed
propensities[("E", "I_a")] = (
1-model.asymptomatic_rate) * model.sigma * exposed
propensities[("I_n", "R_u")] = model.gamma * (model.X == "I_n")
asymptomatic = model.X == "I_a"
propensities[("I_a", "I_s")
] = model.symptoms_manifest_rate * asymptomatic
symptomatic = model.X == "I_s"
propensities[("I_s", "R_u")] = model.gamma * symptomatic
propensities[("I_s", "D_u")] = model.mu_I * symptomatic
detected = model.X == "I_d"
propensities[("I_d", "R_d")] = model.gamma_D * detected
propensities[("I_d", "D_d")] = model.mu_D * detected
# testing TODO
propensities[("I_a", "I_d")] = (
model.theta_Ia + model.phi_Ia * numContacts_Id) * model.psi_Ia * asymptomatic
propensities[("I_s", "I_d")] = (
model.theta_Is + model.phi_Is * numContacts_Id) * model.psi_Is * symptomatic
propensities[("E", "I_d")] = (
model.theta_E + model.phi_E * numContacts_Id) * model.psi_E * exposed
# STEP 3
# return list of all propensities, list of transition names
# TODO move this step to model.py
propensities_list = []
for t in model.transitions:
propensities_list.append(propensities[t])
stacked_propensities = np.hstack(propensities_list)
return stacked_propensities, model.transitions
# 3. model class
ExtendedNetworkModel = create_custom_model("ExtendedNetworkModel",
**model_definition,
calc_propensities=calc_propensities)
# TODO: inherit from ExtendedNetworkModel a new model (high level) that includes the workaround
# about multi-graphs, manages call backs, etc.
|
from typing import List, Union
import cv2
from pathlib import Path, PosixPath
def make_video(
save_dir: str, img_names: List[Union[str, PosixPath]], width=512, height=512
):
fourcc = cv2.VideoWriter_fourcc("m", "p", "4", "v")
save_name = Path(save_dir).joinpath("result_video.mp4")
video = cv2.VideoWriter(str(save_name), fourcc, 20.0, (width, height))
for img_file in img_names:
img = cv2.imread(str(img_file))
video.write(img)
video.release()
if __name__ == "__main__":
img_dir = "./"
img_names = [Path(img_dir).joinpath("%d.png" % (10 * i)) for i in range(200)]
make_video(img_dir, img_names)
|
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Remove tf error warnings
import tensorflow as tf
from tensorflow.keras.datasets import mnist
from tensorflow.keras import layers
from tensorflow import keras
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True) # Solution for memory errors in TF
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.astype("float32") / 255.0 # As float64 is unnecessary computation
x_test = x_test.astype("float32") / 255.0
model = keras.Sequential()
model.add(keras.Input(shape=(None, 28)))
model.add(
layers.SimpleRNN(512, return_sequences=True, activation='tanh')
)
model.add(layers.SimpleRNN(512, activation='relu'))
model.add(layers.Dense(10))
model.compile(
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=keras.optimizers.Adam(lr=1e-3),
metrics=["accuracy"],
)
model.fit(x_train, y_train, batch_size=64, epochs=10, verbose=2)
model.evaluate(x_test, y_test)
|
import requests
from requests import cookies
import logging
import json
from urllib3 import disable_warnings
from urllib3 import exceptions
from singleton_decorator import singleton
from typing import List, Dict
from collections import namedtuple
LOG = logging.getLogger('apic_exporter.exporter')
TIMEOUT = 10
COOKIE_TIMEOUT = 5
session_tuple = namedtuple('session_tuple', 'session available')
@singleton
class SessionPool(object):
def __init__(self, hosts, user, password):
"""Initializes the Session Pool. Sessions contains the session to a host and an Availability flag"""
self.__sessions = {}
self.__user = user
self.__password = password
for host in hosts:
self.__sessions[host] = self.createSession(host)
def getSession(self, host: str) -> session_tuple:
"""Returns the session and availability"""
return self.__sessions[host]
def createSession(self, host: str) -> session_tuple:
"""Creates the session and requests the cookie."""
session = requests.Session()
session.proxies = {'https': '', 'http': '', 'no': '*'}
session.verify = False
cookie = self.requestCookie(host, session)
if cookie is not None:
session.cookies = cookies.cookiejar_from_dict(
cookie_dict={"APIC-cookie": cookie})
available = True if session is not None and cookie is not None else False
return session_tuple(session, available)
def reset_unavailable_hosts(self):
"""Reset availability of all sessions and try to repair unavailable sessions."""
for host, value in self.__sessions.items():
if value.session is None:
self.__sessions[host] = self.createSession(host)
continue
if len(value.session.cookies) == 0:
cookie = self.requestCookie(host, value.session)
if cookie is not None:
value.session.cookies = cookies.cookiejar_from_dict(
cookie_dict={"APIC-cookie": cookie})
self.__sessions[host] = session_tuple(value.session, True)
else:
self.__sessions[host] = session_tuple(value.session, False)
else:
self.__sessions[host] = session_tuple(value.session, True)
def get_unavailable_sessions(self) -> List[str]:
return [k for k, v in self.__sessions.items() if not v.available]
def set_session_unavailable(self, host: str):
"""Set a given host to be unavailable"""
if host in self.__sessions:
LOG.debug("Flag host %s as unavailable", host)
session, _ = self.__sessions[host]
self.__sessions[host] = session_tuple(session, False)
def refreshCookie(self, host: str) -> requests.Session:
"""Clears old cookie and requests a fresh one"""
session, available = self.__sessions[host]
cookie = self.requestCookie(host, session)
if cookie is not None:
session.cookies.clear_session_cookies()
session.cookies = cookies.cookiejar_from_dict(
cookie_dict={"APIC-cookie": cookie}, cookiejar=session.cookies)
self.__sessions[host] = session_tuple(session, available)
return session
def requestCookie(self, host: str, session: requests.Session) -> str:
"""Login to the host and retrieve cookie"""
disable_warnings(exceptions.InsecureRequestWarning)
LOG.info("Request token for %s", host)
try:
url = "https://" + host + "/api/aaaLogin.json?"
payload = {
"aaaUser": {
"attributes": {
"name": self.__user,
"pwd": self.__password
}
}
}
resp = session.post(url, json=payload, timeout=COOKIE_TIMEOUT)
except (requests.exceptions.ConnectTimeout,
requests.exceptions.ReadTimeout, TimeoutError):
LOG.error("Connection with host %s timed out after %s sec", host,
COOKIE_TIMEOUT)
self.set_session_unavailable(host)
return None
except (requests.exceptions.ConnectionError, ConnectionError) as e:
LOG.error("Cannot connect to %s: %s", url, e)
self.set_session_unavailable(host)
return None
cookie = None
if resp.status_code == 200:
res = json.loads(resp.text)
resp.close()
cookie = res['imdata'][0]['aaaLogin']['attributes']['token']
else:
LOG.error("url %s responds with %s", url, resp.status_code)
return cookie
class Connection():
def __init__(self, hosts: List[str], user: str, password: str):
self.__pool = SessionPool(hosts, user, password)
def getRequest(self, host: str, query: str, timeout: int = TIMEOUT) -> Dict:
"""Perform a GET request against host for the query. Retries if token is invalid."""
disable_warnings(exceptions.InsecureRequestWarning)
url = "https://" + host + query
session, available = self.__pool.getSession(host)
if not available:
LOG.info("Skipped unavailable host %s query %s", host, query)
return None
try:
LOG.debug('Submitting request %s', url)
resp = session.get(url, timeout=timeout)
except (requests.exceptions.ConnectTimeout,
requests.exceptions.ReadTimeout, TimeoutError):
LOG.error("Connection with host %s timed out after %s sec", host,
timeout)
self.__pool.set_session_unavailable(host)
return None
except (requests.exceptions.ConnectionError, ConnectionError) as e:
LOG.error("Cannot connect to %s: %s", url, e)
self.__pool.set_session_unavailable(host)
return None
# token is invalid, request a new token
if resp.status_code == 403 and ("Token was invalid" in resp.text
or "token" in resp.text):
session = self.__pool.refreshCookie(host)
try:
resp = session.get(url, timeout=timeout)
except (requests.exceptions.ConnectTimeout,
requests.exceptions.ReadTimeout, TimeoutError):
LOG.error("Connection with host %s timed out after %s sec",
host, timeout)
self.__pool.set_session_unavailable(host)
return None
except (requests.exceptions.ConnectionError, ConnectionError) as e:
LOG.error("Cannot connect to %s: %s", url, e)
self.__pool.set_session_unavailable(host)
return None
if resp.status_code == 200:
res = json.loads(resp.text)
resp.close()
return res
else:
LOG.error("url %s responding with %s", url, resp.status_code)
return None
def get_unresponsive_hosts(self) -> List[str]:
"""Returns a list of hosts that were not responding since the last reset."""
return self.__pool.get_unavailable_sessions()
def reset_unavailable_hosts(self):
"""Unavailable hosts are repaired and the flags are reset"""
self.__pool.reset_unavailable_hosts()
def isDataValid(self, data: Dict):
"""Checks if the data is a dict that contains 'imdata'."""
if data is None:
return False
if isinstance(data, dict) and isinstance(data.get('imdata'), list):
return True
return False
|
import os
def nyu2_paired_path(nyu2_path):
'''
transform images to list
in the list, the entries are pairs of pathes of images
e.g. (path_train1, path_label1)
'''
paired_path = []
pair = []
jpg_paths, png_paths = [], []
for curDir, dirs, files in os.walk(nyu2_path):
for file in files:
file = os.path.join(curDir, file)
if file.endswith(".jpg"):
jpg_paths.append(file)
else:
png_paths.append(file)
jpg_paths.sort(key=lambda x: x[:-4])
png_paths.sort(key=lambda x: x[:-4])
for jpg_path, png_path in zip(jpg_paths, png_paths):
pair = [jpg_path, png_path]
paired_path.append(pair)
return paired_path |
"""
servers
"""
import functools
from collections import namedtuple
import pandas as pd
@functools.lru_cache(maxsize=None)
def servers_list():
"""
Download a new server list from awesome-erddap.
If loading the latest one fails it falls back to the default one shipped with the package.
"""
from urllib.error import URLError
try:
url = "https://raw.githubusercontent.com/IrishMarineInstitute/awesome-erddap/master/erddaps.json"
df = pd.read_json(url)
except URLError:
from pathlib import Path
path = Path(__file__).absolute().parent
df = pd.read_json(path.joinpath("erddaps.json"))
_server = namedtuple("server", ["description", "url"])
return {
row["short_name"]: _server(row["name"], row["url"])
for k, row in df.iterrows()
if row["short_name"]
}
servers = servers_list()
|
import os
import time
for connections in [10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75]:
print("=" * 80)
print(f"Iniciando o teste com a quantidade {connections} de conexões")
os.system(
f"python main.py logs/1000-urls-para-testes.log https://new.scielo.br --connections {connections} --dont-wait-until-request-time --output-file resultados-dos-testes-de-stress/{connections}-conexoes.csv"
)
print("Dormindo 60 segundos para então iniciar uma nova bateria de testes")
time.sleep(60)
print("=" * 80)
|
import moviepy.editor as mpy
import moviepy.video.fx.all as vfx
import moviepy.audio.fx.all as afx
import gizeh_title as gt
import os
# get the information to render the final video
videofolder = "/home/joaopedro/Videos/GGTP/Creating Videos/Current_Videos/"
defaultVideos = "/home/joaopedro/Videos/GGTP/Creating Videos/Default_Videos/"
savePath = "/home/joaopedro/Videos/GGTP/Creating Videos/Output_Videos/"
codec = 'libx264'
preset = 'ultrafast'
videoname = 'OutputVideo' + '.mp4'
fps = 30
intro = mpy.VideoFileClip(defaultVideos + "intro.mp4")
outro = mpy.VideoFileClip(defaultVideos + "outro.mp4")
# list where the clips are going to be stored
clips = [intro]
title_list = []
with open("list.txt") as f:
for num, line in enumerate(f, 1):
if num % 2 != 0:
title = line.replace('#', '')
title = title.replace('\n', '')
title_list.append(title)
i = 0
# iterate through the video folder, alphanumeric sorted
# every video in the folder are edited and added to the clips list
for path in sorted(os.listdir(videofolder)):
full_path = os.path.join(videofolder, path)
if os.path.isfile(full_path):
currentclip = mpy.VideoFileClip(full_path)
title_clip = gt.title_maker(title_list[i], currentclip)
newclip = (title_clip.fx(vfx.fadein, 2.0)
.fx(vfx.fadeout, 2.0)
.resize(height=720))
i += 1
clips.append(newclip)
clips.append(outro)
finalclip = mpy.concatenate_videoclips(clips)
finalclip.audio = finalclip.audio.set_fps(44100)
finalclip.fx(afx.audio_normalize)
finalclip.write_videofile(
savePath + videoname,
codec=codec,
preset=preset,
fps=fps
)
"""
# old way of iterating through a directory
with os.scandir(videofolder) as it:
for entry in it:
if entry.is_file():
currentclip = mpy.VideoFileClip(entry.path)
newclip = (currentclip.fx(vfx.fadein, 2.0)
.fx(vfx.fadeout, 2.0))
clips.append(newclip)
"""
|
import numpy as np
from nasbench301.surrogate_models.encodings.encodings_nlp import encode_nlp
def encode(arch_strings, data, search_space, nlp_max_nodes, nb101_api):
if search_space == 'nlp':
X = []
epoch = 3
for arch_str in arch_strings:
lc_acc = np.array([100 - loss for loss in data[arch_str]['val_losses']])
accs = lc_acc[:epoch]
enc = encode_nlp(compact=arch_str, max_nodes=nlp_max_nodes, accs=accs, one_hot=False, lc_feature=True, only_accs=False)
X.append(enc)
y = []
for arch_str in arch_strings:
lc_acc = np.array([100 - loss for loss in data[arch_str]['val_losses']])
y.append(lc_acc)
return X, y, None
|
import random
import os
from Crypto.Util.number import getPrime
from const import description, flag, logo
def log(message):
character_color = "\x1b[34m"
reset = "\x1b[0m"
print(f"{character_color}[+] {message}{reset}")
def rsa_0():
while True:
p, q = random.getrandbits(512), random.getrandbits(512)
print(f"{p = }")
print(f"{q = }")
try:
n = int(input("[n?] > "))
except ValueError:
log("Please enter an integer\n\n\n")
continue
if p * q == n:
log(f"Correct! Proceed to the next challenge ->")
return
else:
log("Wrong... Try again!\n\n\n")
continue
def rsa_1():
while True:
p, q = getPrime(512, randfunc=os.urandom), getPrime(512, randfunc=os.urandom)
m = random.getrandbits(128)
e = 0x10001
n = p * q
print(f"{m = }")
print(f"{e = }")
print(f"{n = }")
try:
c = int(input("[c?] > "))
except ValueError:
log("Please enter an integer\n\n\n")
continue
if pow(m, e, n) == c:
log(f"Correct! Proceed to the final challenge!")
return
else:
log("Wrong... Try again!\n\n\n")
continue
def rsa_2():
while True:
p, q = getPrime(512, randfunc=os.urandom), getPrime(512, randfunc=os.urandom)
e = 0x10001
n = p * q
d = pow(e, -1, (p - 1) * (q - 1))
m = random.getrandbits(128)
while pow(pow(m, e, n), d, n) != m:
m = random.getrandbits(128)
c = pow(m, e, n)
print(f"{p = }")
print(f"{q = }")
print(f"{e = }")
print(f"{c = }")
try:
decrypted = int(input("[m?] > "))
except ValueError:
log("Please enter an integer\n\n\n")
continue
if m == decrypted:
log(f"Correct! Here's your reward: {flag}")
return
else:
log("Wrong... Try again!\n\n\n")
continue
if __name__ == "__main__":
print(logo)
challenges = [rsa_0, rsa_1, rsa_2]
for challenge_index in range(3):
print(description[challenge_index])
challenges[challenge_index]()
print("\n")
|
"""
This function takes in file path of clinical notes and outputs HPO terms that are successfully
mapped to the clinical note text. Unlike the common state-of-the-art, this NLP is not relying on the
Unified Medical Language System (UMLS) and therefore does not require a third-party license. Might still
subject to changes while benchmarking.
"""
from nltk.tokenize import word_tokenize
from nltk.stem import PorterStemmer
import obo_parser
import itertools
import re
import os
import argparse
# Create recent HPO library of HPO terms only related to phenotypic abnormalities (HP:0000118)
# The parsed HPO library will be saved in the current directory and is used by the function.
hpo_url = "http://purl.obolibrary.org/obo/hp.obo"
# save file in folder called source if not already existing
sourcedir = os.getcwd()
if os.path.isdir(sourcedir + "/" + "sources") is False:
os.mkdir(sourcedir + "/" + "sources")
output = "./sources/HPO_Terms.tsv"
phenotypic_abnormalities = "HP:0000118"
children_category = True
obo_parser.convert_obo_to_tsv(hpo_url, output, phenotypic_abnormalities, children_category)
with open(output) as HPO:
library = []
for line in HPO:
tsplit = line.split("\t")
library.append([tsplit[0], tsplit[1], tsplit[9]])
HPO.close()
# extract HPO term names to keywords
keywords = []
# exclude header from library
for row in library[1:]:
keywords.append(row[1])
# split keywords into single and multiple keywords
singlekw = []
multiplekw = []
singlekw_low = []
multiplekw_low = []
for term in keywords:
if ' ' in term:
multiplekw.append(term)
multiplekw_low.append(term.lower())
else:
singlekw.append(term)
singlekw_low.append(term.lower())
# get synonym strings
synonyms = []
# exclude header from library
for line in library[1:]:
if line[2].endswith("]"):
# remove parenthesis and text within
line[2] = (re.sub(r" \([^)]*\)", "", line[2]))
# append only synonyms within double quote
synonyms.append(re.findall('"(.*?)"', line[2]))
# split synonyms into single word and multiple word synonyms
singlesyn = []
multiplesyn = []
singlesyn_low = []
multiplesyn_low = []
morethanonesyn = []
morethanonesyn_low = []
for syn in synonyms:
if len(syn) > 1:
for synn in syn:
morethanonesyn.append(synn)
morethanonesyn_low.append(synn.lower())
else:
for synn in syn:
if ' ' in synn:
multiplesyn.append(synn)
multiplesyn_low.append(synn.lower())
else:
singlesyn.append(synn)
singlesyn_low.append(synn.lower())
def myNLP2hpo(input_path, output_path, negation=False):
"""Main function:
Args:
input_path (str): Local path to the folder with clinical note texts. Title of the clinical text should
refer to the patient, e.g. ID.
output_path (str): Local path to folder where extracted HPO for each clinical note should be stored.
negation (bool): Whether to add negation detection feature to the natural language processor.
"""
# load in clinical notes one by one from the input_path
clin_notes = os.listdir(input_path)
for clin in clin_notes:
if clin.endswith('.txt'):
with open(input_path + "/" + clin, encoding="UTF-8") as file:
note = str.replace(file.read(), "\n", "")
else:
print("There is no text file in the given directory.")
break
file.close()
# tokenize text (split each word and assign it as token for easier processing)
tokens = word_tokenize(note)
if negation is True:
# tag terms that follow negations with the prefix "Not_" until the next punctuation mark
NEGATION_ADVERBS = ["no", "without", "nil", "not", "n't", "never", "none", "neither", "nor", "non"]
punctuations = [",", ".", ";", ":", "!", "?", "(", ")", "[", "]", "{", "}", "/"]
for num, tok in enumerate(tokens, start=0):
if tok.lower() in NEGATION_ADVERBS:
while tokens[num+1] not in punctuations:
tokens[num+1] = "Not_"+tokens[num+1]
num = num+1
# in case a sentence starts with negation adverbs, the entire sentence is negated
for num, tok in enumerate(tokens, start=0):
if tok.lower() in NEGATION_ADVERBS and tokens[num-1] == ".":
while tokens[num+1] != ".":
tokens[num+1] = "Not_"+tokens[num+1]
num = num+1
note = " ".join(tokens)
# Stemming #######################################################################
# stem the tokens, keywords and synonyms (take only stem of elements, 'model' and 'model'ing, no plural)
stemmed_tokens = set([PorterStemmer().stem(tok).lower() for tok in tokens])
stemmed_singlekw_low = set([PorterStemmer().stem(kw).lower() for kw in singlekw_low])
stemmed_multiplekw_low = set([PorterStemmer().stem(kw).lower() for kw in multiplekw_low])
stemmed_singlesyn_low = set([PorterStemmer().stem(syn).lower() for syn in singlesyn_low])
stemmed_multiplesyn_low = set([PorterStemmer().stem(syn).lower() for syn in multiplesyn_low])
stemmed_morethanonesyn_low = set([PorterStemmer().stem(syn).lower() for syn in morethanonesyn_low])
singlekw_dict_stem = {PorterStemmer().stem(kw).lower(): kw for kw in singlekw}
multiplekw_dict_stem = {PorterStemmer().stem(kw).lower(): kw for kw in multiplekw}
singlesyn_dict_stem = {PorterStemmer().stem(syn).lower(): syn for syn in singlesyn}
multiplesyn_dict_stem = {PorterStemmer().stem(syn).lower(): syn for syn in multiplesyn}
morethanonesyn_dict_stem = {PorterStemmer().stem(syn).lower(): syn for syn in morethanonesyn}
# lower dictionary for multiple ones
multiplekw_dict_low = {kw: kw.capitalize() for kw in multiplekw_low}
multiplesyn_dict_low = {kw: kw.capitalize() for kw in multiplesyn_low}
morethanonesyn_dict_low = {syn: syn.capitalize() for syn in morethanonesyn_low}
#####################################################################################
#####################################################################################
# now we have: single keywords, single synonyms, multiple-worded keywords, multiple-worded synonyms
# and more than one synonym per term
# and we have: notes tokens
# all HP id, name and synonyms can be retrieved from the library
# check if longer keywords, synonyms are in the clinical notes by matching substring
HP_keywords_list = []
HP_synonyms_list = []
# saving the synonyms in the notes
for syn in stemmed_multiplesyn_low:
if syn in note.lower():
HP_synonyms_list.append(syn)
for syn in multiplesyn_low:
if syn in HP_synonyms_list:
continue
else:
if syn in note.lower():
HP_synonyms_list.append(syn)
for syn in morethanonesyn_low:
if syn in HP_synonyms_list:
continue
else:
if syn in note.lower():
HP_synonyms_list.append(syn)
for syn in stemmed_morethanonesyn_low:
if syn in HP_synonyms_list:
continue
else:
if syn in note.lower():
HP_synonyms_list.append(syn)
# saving the longer keywords in the notes
for kw in multiplekw_low:
if kw in note.lower():
HP_keywords_list.append(kw)
for kw in stemmed_multiplekw_low:
if kw in HP_keywords_list:
continue
else:
if kw in note.lower():
HP_keywords_list.append(kw)
# check if single keywords and synonyms are in the clinical note by looking for exact match by token
HP_keywords = set(stemmed_singlekw_low) & set(stemmed_tokens)
HP_synonyms = set(stemmed_singlesyn_low) & set(stemmed_tokens)
# Gather all information together in a list
HP_keywords_list += list(HP_keywords)
HP_synonyms_list += list(HP_synonyms)
# Collect HP_id information for every matched synonym and term
patient_kw = []
patient_syn = []
for u in HP_keywords_list:
if u in singlekw_dict_stem:
patient_kw.append(singlekw_dict_stem[u])
else:
if u in multiplekw_dict_stem:
patient_kw.append(multiplekw_dict_stem[u])
else:
if u in multiplekw_dict_low:
patient_kw.append(multiplekw_dict_low[u])
else:
print("The HPO keywords %s could not be found in the clinical note." % u)
for g in HP_synonyms_list:
if g in HP_keywords_list:
continue
if g in singlesyn_dict_stem:
patient_syn.append(singlesyn_dict_stem[g])
else:
if g in multiplesyn_dict_stem:
patient_syn.append(multiplesyn_dict_stem[g])
else:
if g in multiplesyn_dict_low:
patient_syn.append(multiplesyn_dict_low[g])
else:
if g in morethanonesyn_dict_low:
patient_syn.append(morethanonesyn_dict_low[g])
else:
if g in morethanonesyn_dict_stem:
patient_syn.append(morethanonesyn_dict_stem[g])
else:
print("The HPO synonym %s could not be found in the clinical notes." % g)
# now we have the patient variable filled with HPO keywords or synonyms that fit to HPO terms in the library
patient_HPO = []
for row in patient_kw:
# exclude library header
for line in library[1:]:
if row == line[1]:
patient_HPO.append([line[0], line[1]])
for row in patient_syn:
# exclude library header
for line in library[1:]:
if row in re.findall('"(.*?)"', line[2]):
patient_HPO.append([line[0], line[1]])
# remove duplicated patient HPO terms
patient_HPO.sort()
patient_HPO_final = list(patient_HPO for patient_HPO, _ in itertools.groupby(patient_HPO))
# print results in console
print("\n", patient_HPO_final, "\n\n in total %s HPO terms successfully extracted from the clinical note %s."
% (len(patient_HPO_final), clin))
# Output in txt file
# create output folder if not already exists
sourcedir = os.getcwd()
if os.path.isdir(sourcedir + "/" + output_path) is False:
os.mkdir(sourcedir + "/" + output_path)
# setup regex compiler to put a white space between HPO_id and HPO_term
rx = re.compile(r'(?<=\d)(?=[^\d\s])')
with open(output_path + "/" + clin.replace(".txt", ".HPO.txt"), "w") as out:
out.write("Patient_HPO_Id\tId_Name\n")
for item in patient_HPO_final:
item = str().join(item)
it = rx.sub('\t', item)
out.write("%s\n" % it)
out.close()
if __name__ == "__main__":
p = argparse.ArgumentParser(description="Map HPO terms to clinical note.")
p.add_argument("-i", "--input_path", help="Local file path to the clinical notes.")
p.add_argument("-o", "--output_path", help="Local path to folder to store the results.")
p.add_argument("-n", "--negation", action="store_true", help="Whether negation should be considered."
"To enable negation put -n.")
args = p.parse_args()
myNLP2hpo(
args.input_path,
args.output_path,
negation=args.negation,
)
|
"""
实在是莫得法了,用用api
create by judy 2019/09/10
"""
|
import torch
from model import UnStructuredModel
import pandas as pd
from os import listdir, mkdir
from os.path import isfile, join, isdir
files_path = "./data_10K_3K/"
#files_path = "./"
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
files = ["yoy1.csv", "yoy2.csv", "yoy3.csv", "yoy4.csv", "yoy5.csv", "yoy6.csv", "yoy7.csv", "yoy8.csv", "yoy9.csv", "yoy10.csv","yoy11.csv"]
#files = ["yoy13.csv", "yoy12.csv"]
sections = ["Ticker", "Company Name", "Industry", "Top 100", "item1a", "item7"]
param_sections = ["item1a", "item7"]
max_length = 500
stride = 250
model_name = 'bert-base-uncased'
embeddingsPath = "embeddingdata" + '_'+ str(max_length) + '_' + str(stride)+ str(model_name)
embeddingsPath = join(files_path, embeddingsPath)
if not isdir(embeddingsPath):
mkdir(embeddingsPath)
def main():
unstructuredmodel = UnStructuredModel(model_name, max_length, stride)
for file in files:
df = pd.read_csv(files_path+file)
for i in range(len(df)):
row = df.iloc[i]
embeddingDict = {}
embeddingFile = join(embeddingsPath, row["Ticker"] +'_'+str(int(row["Year"]) + 2000) + ".pt")
if isfile(embeddingFile):
continue
for section in sections:
if section not in param_sections and section in row.keys():
embeddingDict[section] = row[section]
for section in param_sections:
#print(row)
if section in row.keys() and type(row[section])==str and len(row[section])>100:
embeddingDict[section] = unstructuredmodel.getEmbedding(text=row[section],if_pool=True)
torch.save(embeddingDict,embeddingFile)
if __name__== "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.