content
stringlengths 5
1.05M
|
|---|
f2 = lambda a, b : a * b
print(f2(5, 6))
|
import numpy as np
import math
import torch
import torch.nn as nn
import torch.optim as optim
import torch.autograd as autograd
import torch.nn.functional as F
import pickle
from lib.model import *
from lib.zfilter import ZFilter
from lib.util import *
from lib.trpo import trpo_step
from lib.data import *
import scipy.optimize
def trpo_learn(args):
#env params
env_name, batch_size, vv, als, ex_path, fig_path = args.env_id, args.batch_size, args.vv, args.als, args.ex_path, args.fig_path
#trpo params
max_kl, cr_lr, cg_step_size, damping = args.max_kl, args.cr_lr, args.cg_step_size, args.damping
#data
data_n_steps, max_genert_num, gamma, lambd = args.data_n_steps, args.max_genert_num, args.gamma, args.lambd
#set up
env = gym.make(env_name)
env.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
device = torch.device("cuda" if args.use_cuda and torch.cuda.is_available() else "cpu")
#device = torch.device("cpu")
zflt = ZFilter((env.observation_space.shape[0],), clip=5)
dtype = torch.float64
torch.set_default_dtype(dtype)
#model and optim
policy_model =ModelActor(env.observation_space.shape[0],env.action_space.shape[0]).to(device)
print(env.observation_space.shape[0])
critic_model =ModelCritic(env.observation_space.shape[0]).to(device)
#opt_policy = optim.Adam(policy_model.parameters(), lr = args.lr_policy)
opt_critic = optim.Adam(critic_model.parameters(), lr = args.lr_critic)
# data generate
gene = generate(policy_model, env, env_name, als, device, data_n_steps, ex_path, fig_path, vv, max_genert_num, zflt)
#train ...
V_loss, P_loss = [], []
for trj in gene:
states, actions, rewards, dones =trj['states'],trj['actions'],trj['rewards'],trj['dones']
states = torch.from_numpy(np.stack(states)).to(dtype).to(device)
actions = torch.from_numpy(np.stack(actions)).to(dtype).to(device)
rewards = torch.from_numpy(np.stack(rewards)).to(dtype).to(device)
dones = torch.from_numpy(np.stack(dones)).to(dtype).to(device)
with torch.no_grad():
values = critic_model(states)
old_logprob = policy_model.get_log_prob(states, actions)
adv, ref = cal_adv_ref(rewards, dones, values, gamma, lambd, device)
opt_iter = int(math.ceil(states.shape[0]/batch_size))
V_loss_, P_loss_ = [], []
#for epoch in range(args.ppo_epoches):
perm = np.arange(states.shape[0])
np.random.shuffle(perm)
perm = torch.LongTensor(perm).to(device)
#states, actions, ref = states[perm].clone(), actions[perm].clone(), ref[perm].clone()
#adv, old_logprob = adv[perm].clone(), old_logprob[perm].clone()
"""update critic, another way to optimize, which uses bfgs"""
v_loss = 0
'''
def get_value_loss(flat_params):
set_params(critic_model, torch.tensor(flat_params))
for param in critic_model.parameters():
if param.grad is not None:
param.grad.data.fill_(0)
values_pred = critic_model(states)
value_loss = (values_pred - ref).pow(2).mean()
print(values_pred)
print(ref)
# weight decay
for param in critic_model.parameters():
value_loss += param.pow(2).sum() * 1e-3
value_loss.backward()
v_loss = value_loss.data.cpu().numpy()
print(v_loss)
return value_loss.item(), get_flat_grad_from(critic_model.parameters()).cpu().numpy()
flat_params, _, opt_info = scipy.optimize.fmin_l_bfgs_b(get_value_loss,get_params(critic_model).detach().cpu().numpy(), maxiter=25)
set_params(critic_model, torch.tensor(flat_params))
'''
#critic optim
for i in range(10):
opt_critic.zero_grad()
values = critic_model(states)
loss_v = F.mse_loss(values, ref)
loss_v.backward()
v_loss = loss_v.data.cpu().numpy()
opt_critic.step()
#print(v_loss)
#actor optim
def get_loss():
log_prob = policy_model.get_log_prob(states, actions)
action_loss_v = -adv* torch.exp(log_prob - old_logprob)
return action_loss_v.mean()
def get_kl():
return policy_model.get_kl(states, policy_model)
p_loss = trpo_step(policy_model, get_loss, get_kl, max_kl, cr_lr, cg_step_size, damping, device)
P_loss.append(p_loss)
V_loss.append(v_loss)
pickle.dump((policy_model, critic_model, zflt), open(ex_path+env_name+'_model_'+als+vv+'.p', 'wb'))
plot(0, V_loss, fig_path+'/loss/', env_name+als+vv+'v_loss')
plot(1, P_loss, fig_path+'/loss/', env_name+als+vv+'p_loss')
|
import logging
from fess.test import assert_equal, assert_startswith
from fess.test.ui import FessContext
from playwright.sync_api import Playwright, sync_playwright
logger = logging.getLogger(__name__)
def setup(playwright: Playwright) -> FessContext:
context: FessContext = FessContext(playwright)
context.login()
return context
def destroy(context: FessContext) -> None:
context.close()
def run(context: FessContext) -> None:
logger.info(f"start")
page: "Page" = context.get_admin_page()
label_name: str = context.create_label_name()
# Click text=クローラー
page.click("text=クローラー")
# Click text=重複ホスト
page.click("text=重複ホスト")
assert_equal(page.url, context.url("/admin/duplicatehost/"))
# Click text=n2sm.net
page.click(f"text={label_name}X")
assert_startswith(page.url, context.url("/admin/duplicatehost/details/4/"))
# Click text=削除
page.click("text=削除")
# Click text=キャンセル
page.click("text=キャンセル")
# Click text=削除
page.click("text=削除")
# Click text=キャンセル 削除 >> button[name="delete"]
page.click("text=キャンセル 削除 >> button[name=\"delete\"]")
assert_equal(page.url, context.url("/admin/duplicatehost/"))
page.wait_for_load_state("domcontentloaded")
table_content: str = page.inner_text("section.content")
assert_equal(table_content.find(label_name), -1,
f"{label_name} in {table_content}")
if __name__ == "__main__":
with sync_playwright() as playwright:
context: FessContext = setup(playwright)
run(context)
destroy(context)
|
import logging
import os
import yaml
import unittest
import numpy as np
from copy import copy
from ISR.models.rdn import RDN
from ISR.predict.predictor import Predictor
from unittest.mock import patch, Mock
class PredictorClassTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
logging.disable(logging.CRITICAL)
cls.setup = yaml.load(open(os.path.join('tests', 'data', 'config.yml'), 'r'))
cls.RDN = RDN(arch_params=cls.setup['rdn'], patch_size=cls.setup['patch_size'])
def fake_folders(kind):
return ['data2.gif', 'data1.png', 'data0.jpeg']
def nullifier(*args):
pass
with patch('os.listdir', side_effect=fake_folders):
with patch('os.mkdir', return_value=True):
cls.predictor = Predictor(input_dir='dataname', output_dir='out_dir')
cls.predictor.logger = Mock(return_value=True)
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
self.pred = copy(self.predictor)
pass
def tearDown(self):
pass
def test__load_weights_with_no_weights(self):
self.pred.weights_path = None
try:
self.pred._load_weights()
except:
self.assertTrue(True)
else:
self.assertTrue(False)
def test__load_weights_with_valid_weights(self):
def raise_path(path):
raise ValueError(path)
self.pred.model = self.RDN
self.pred.model.model.load_weights = Mock(side_effect=raise_path)
self.pred.weights_path = 'a/path'
try:
self.pred._load_weights()
except ValueError as e:
self.assertTrue(str(e) == 'a/path')
else:
self.assertTrue(False)
def test__make_directory_structure(self):
self.pred.weights_path = 'a/path/arch-weights_session1_session2.hdf5'
self.pred._make_directory_structure()
self.assertTrue(self.pred.basepath == 'arch-weights/session1/session2')
def test__forward_pass_pixel_range_and_type(self):
def valid_sr_output(*args):
sr = np.random.random((1, 20, 20, 3))
sr[0, 0, 0, 0] = 0.5
return sr
self.pred.model = self.RDN
self.pred.model.model.predict = Mock(side_effect=valid_sr_output)
with patch('imageio.imread', return_value=np.random.random((10, 10, 3))):
sr = self.pred._forward_pass('file_path')
self.assertTrue(type(sr[0, 0, 0]) is np.uint8)
self.assertTrue(np.all(sr >= 0.0))
self.assertTrue(np.all(sr <= 255.0))
self.assertTrue(np.any(sr > 1.0))
self.assertTrue(sr.shape == (20, 20, 3))
def test__forward_pass_4_channela(self):
def valid_sr_output(*args):
sr = np.random.random((1, 20, 20, 3))
sr[0, 0, 0, 0] = 0.5
return sr
self.pred.model = self.RDN
self.pred.model.model.predict = Mock(side_effect=valid_sr_output)
with patch('imageio.imread', return_value=np.random.random((10, 10, 4))):
sr = self.pred._forward_pass('file_path')
self.assertTrue(sr is None)
def test__forward_pass_1_channel(self):
def valid_sr_output(*args):
sr = np.random.random((1, 20, 20, 3))
sr[0, 0, 0, 0] = 0.5
return sr
self.pred.model = self.RDN
self.pred.model.model.predict = Mock(side_effect=valid_sr_output)
with patch('imageio.imread', return_value=np.random.random((10, 10, 1))):
sr = self.pred._forward_pass('file_path')
self.assertTrue(sr is None)
def test_get_predictions(self):
self.pred._load_weights = Mock(return_value=True)
self.pred._forward_pass = Mock(return_value=True)
with patch('imageio.imwrite', return_value=True):
self.pred.get_predictions(self.RDN, 'a/path/arch-weights_session1_session2.hdf5')
pass
def test_output_folder_and_dataname(self):
self.assertTrue(self.pred.data_name == 'dataname')
self.assertTrue(self.pred.output_dir == os.path.join('out_dir', 'dataname'))
def test_valid_extensions(self):
self.assertTrue(self.pred.img_ls == ['data1.png', 'data0.jpeg'])
def test_no_valid_images(self):
def invalid_folder(kind):
return ['data2.gif', 'data1.extension', 'data0']
with patch('os.listdir', side_effect=invalid_folder):
with patch('os.mkdir', return_value=True):
try:
cls.predictor = Predictor(input_dir='lr', output_dir='sr')
except ValueError as e:
self.assertTrue('image' in str(e))
else:
self.assertTrue(False)
|
import os
import bpy
import anm, binmdl, col, cmn, mdl, pth
from bpy_extras.io_utils import ImportHelper, ExportHelper
from bpy.props import StringProperty, BoolProperty, EnumProperty, FloatProperty
from bpy.types import Operator
from bStream import *
class MansionBinImport(bpy.types.Operator, ImportHelper):
bl_idname = "import_model.mansionbin"
bl_label = "Import Bin"
filepath = bpy.props.StringProperty(subtype="FILE_PATH")
@classmethod
def poll(cls, context):
return context is not None
filename_ext = ".bin"
filter_glob: StringProperty(
default="*.bin",
options={'HIDDEN'},
maxlen=255,
)
#apply_wrap_modes: BoolProperty(
# default=False
#)
def execute(self, context):
binmdl.bin_model_import(self.filepath)
return {'FINISHED'}
def invoke(self, context, event):
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
class MansionBinExport(bpy.types.Operator, ExportHelper):
bl_idname = "export_model.mansionbin"
bl_label = "Export Bin"
bl_description = "Export Bin model using this node as the root"
filepath = bpy.props.StringProperty(subtype="FILE_PATH")
@classmethod
def poll(cls, context):
return context is not None
filename_ext = ".bin"
filter_glob: StringProperty(
default="*.bin",
options={'HIDDEN'},
maxlen=255,
)
compat_mode: BoolProperty(
default=True,
name="Console Compatible",
description="Export model as console compatible, disable for smaller models that only work in emulator."
)
use_tristrips: BoolProperty(
default=False,
name="Use Tristrips (EXPERIMENTAL)",
description="Use tristrip primitives. This is an experimental mode export mode."
)
def execute(self, context):
if(os.path.exists(self.filepath)):
os.remove(self.filepath)
binmdl.bin_model_export(self.filepath, self.use_tristrips, self.compat_mode)
return {'FINISHED'}
def invoke(self, context, event):
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
class MansionAnmImport(bpy.types.Operator, ImportHelper):
bl_idname = "import_anim.mansionanm"
bl_label = "Import ANM"
bl_description = "Import Bin animation using this node as the root"
filepath = bpy.props.StringProperty(subtype="FILE_PATH")
@classmethod
def poll(cls, context):
return context is not None
filename_ext = ".anm"
filter_glob: StringProperty(
default="*.anm",
options={'HIDDEN'},
maxlen=255,
)
def execute(self, context):
anm.load_anim(self.filepath)
return {'FINISHED'}
def invoke(self, context, event):
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
class MansionAnmExport(bpy.types.Operator, ExportHelper):
bl_idname = "export_anim.mansionanm"
bl_label = "Export ANM"
bl_description = "Export Bin animation using this node as the root"
filepath = bpy.props.StringProperty(subtype="FILE_PATH")
@classmethod
def poll(cls, context):
return context is not None
filename_ext = ".anm"
filter_glob: StringProperty(
default="*.anm",
options={'HIDDEN'},
maxlen=255,
)
Loop = BoolProperty(
default=False
)
def execute(self, context):
if(os.path.exists(self.filepath)):
os.remove(self.filepath)
anm.write_anim(self.filepath, self.Loop)
return {'FINISHED'}
def invoke(self, context, event):
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
class MansionMDLImport(bpy.types.Operator, ImportHelper):
bl_idname = "import_model.mansionmdl"
bl_label = "Import MDL"
filepath = bpy.props.StringProperty(subtype="FILE_PATH")
@classmethod
def poll(cls, context):
return context is not None
filename_ext = ".mdl"
filter_glob: StringProperty(
default="*.mdl",
options={'HIDDEN'},
maxlen=255,
)
def execute(self, context):
mdl.mdl_model(self.filepath)
return {'FINISHED'}
def invoke(self, context, event):
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
class MansionCmnImport(bpy.types.Operator, ImportHelper):
bl_idname = "import_model.mansioncmn"
bl_label = "Import CMN"
filepath = bpy.props.StringProperty(subtype="FILE_PATH")
@classmethod
def poll(cls, context):
return context is not None
filename_ext = ".cmn"
filter_glob: StringProperty(
default="*.cmn",
options={'HIDDEN'},
maxlen=255,
)
def execute(self, context):
cmn.load_anim(self.filepath)
return {'FINISHED'}
def invoke(self, context, event):
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
class MansionCmnExport(bpy.types.Operator, ExportHelper):
bl_idname = "export_model.mansioncmn"
bl_label = "Export CMN"
filepath = bpy.props.StringProperty(subtype="FILE_PATH")
@classmethod
def poll(cls, context):
return context is not None
filename_ext = ".cmn"
filter_glob: StringProperty(
default="*.cmn",
options={'HIDDEN'},
maxlen=255,
)
def execute(self, context):
if(os.path.exists(self.filepath)):
os.remove(self.filepath)
if(cmn.save_anim(self.filepath)):
return {'FINISHED'}
else:
#TODO: Show error
return {'CANCELLED'}
def invoke(self, context, event):
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
class MansionPthImport(bpy.types.Operator, ImportHelper):
bl_idname = "import_model.mansionpth"
bl_label = "Import PTH"
filepath = bpy.props.StringProperty(subtype="FILE_PATH")
@classmethod
def poll(cls, context):
return context is not None
filename_ext = ".pth"
filter_glob: StringProperty(
default="*.pth",
options={'HIDDEN'},
maxlen=255,
)
def execute(self, context):
pth.load_anim(self.filepath)
return {'FINISHED'}
def invoke(self, context, event):
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
class MansionPthExport(bpy.types.Operator, ExportHelper):
bl_idname = "export_model.mansionpth"
bl_label = "Export PTH"
filepath = bpy.props.StringProperty(subtype="FILE_PATH")
@classmethod
def poll(cls, context):
return context is not None
filename_ext = ".pth"
filter_glob: StringProperty(
default="*.pth",
options={'HIDDEN'},
maxlen=255,
)
def execute(self, context):
if(os.path.exists(self.filepath)):
os.remove(self.filepath)
if(pth.save_anim(self.filepath)):
return {'FINISHED'}
else:
#TODO: Show error
return {'CANCELLED'}
def invoke(self, context, event):
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
class MansionColImport(bpy.types.Operator, ImportHelper):
bl_idname = "import_model.mansioncol"
bl_label = "Import COLMP"
filepath = bpy.props.StringProperty(subtype="FILE_PATH")
@classmethod
def poll(cls, context):
return context is not None
filename_ext = ".mp"
filter_glob: StringProperty(
default="*.mp",
options={'HIDDEN'},
maxlen=255,
)
def execute(self, context):
col.load_model(self.filepath)
return {'FINISHED'}
def invoke(self, context, event):
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
class GrezzoCmbImport(bpy.types.Operator, ImportHelper):
bl_idname = "import_model.grezzocmb"
bl_label = "Import CMB"
filepath = bpy.props.StringProperty(subtype="FILE_PATH")
@classmethod
def poll(cls, context):
return context is not None
filename_ext = ".cmb"
filter_glob: StringProperty(
default="*.cmb",
options={'HIDDEN'},
maxlen=255,
)
def execute(self, context):
cmb.import_model(self.filepath)
return {'FINISHED'}
def invoke(self, context, event):
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
class TOPBAR_MT_file_import_mansion(bpy.types.Menu):
bl_idname = 'import_model.mansion'
bl_label = "Luigi's Mansion"
def draw(self, context):
layout = self.layout
self.layout.operator(MansionBinImport.bl_idname, text="Bin (.bin)")
#self.layout.operator(MansionMDLImport.bl_idname, text="MDL (.mdl)")
self.layout.operator(MansionColImport.bl_idname, text="Collision (.mp)")
self.layout.operator(MansionCmnImport.bl_idname, text="CMN (.cmn)")
self.layout.operator(MansionPthImport.bl_idname, text="PTH (.pth)")
#self.layout.operator(GrezzoCmbImport.bl_idname, text="Grezzo CMB (.cmb)")
class TOPBAR_MT_file_export_mansion(bpy.types.Menu):
bl_idname = 'export_model.mansion'
bl_label = "Luigi's Mansion"
def draw(self, context):
layout = self.layout
self.layout.operator(MansionCmnExport.bl_idname, text="CMN (.cmn)")
self.layout.operator(MansionPthExport.bl_idname, text="PTH (.pth)")
def menu_func_import(self, context):
self.layout.menu(TOPBAR_MT_file_import_mansion.bl_idname)
def menu_func_export(self, context):
self.layout.menu(TOPBAR_MT_file_export_mansion.bl_idname)
def register():
bpy.types.TOPBAR_MT_file_import.append(menu_func_import)
bpy.types.TOPBAR_MT_file_export.append(menu_func_export)
def unregister():
bpy.types.TOPBAR_MT_file_import.remove(menu_func_import)
bpy.types.TOPBAR_MT_file_export.remove(menu_func_export)
|
#SOl Courtney Columbia U Department of Astronomy and Astrophysics NYC 2016
#swc2124@columbia.edu
#--[DESCRIPTION]---------------------------------------------------------#
'''
Date: May 2016
Handeler for twitter text parsing
'''
#--[IMPORTS]--------------------------------------------------------------#
from nltk.tokenize import word_tokenize
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
plt.ioff()
from time import gmtime, strftime, sleep
from astropy.table import Table
from collections import Counter
import numpy as np
import nltk, time, os,sys,json,socket
from datetime import datetime
import csv
#--[PROGRAM-OPTIONS]------------------------------------------------------#
nltk.data.path.append('/root/SHARED/nltk_data/')
hostname = socket.gethostname()
if hostname == 'sol-Linux':
OUT_PUT_PATH = '/home/sol/CLUSTER_RAID/Tweet_Output/Clean_Words/'
All_Words_PATH = '/home/sol/CLUSTER_RAID/Tweet_Code/dictionary.txt'
Table_PATH = '/home/sol/CLUSTER_RAID/Tweet_Output/'
else:
OUT_PUT_PATH = '/root/SHARED/Tweet_Output/Clean_Words/'
All_Words_PATH = '/root/SHARED/Tweet_Code/dictionary.txt'
Table_PATH = '/root/SHARED/Tweet_Output/'
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
def ct(text, colour=WHITE):
seq = "\x1b[1;%dm" % (30+colour) + text + "\x1b[0m"
return seq
def list_files(path):
# returns a list of names (with extension, without full path) of all files
# in folder path
files = []
for name in os.listdir(path):
if os.path.isfile(os.path.join(path, name)):
files.append(name)
return files
Names = {'PRP$':'pronoun, possessive','VBG':'verb, present participle or gerund',
'VBD':'verb, past tense','VBN':'verb, past participle','VBP':'verb, present tense not 3rd person singular',
'WDT':'determiner, WH','JJ':'adjective or numeral, ordinal','WP': 'pronoun, WH',
'VBZ':'verb, present tense 3rd person singular','DT':'determiner','RP':'particle',
'NN':'noun, common, singular or mass','TO':'"to" as preposition or infinitive marker',
'PRP':'pronoun, personal','RB':'adverb','NNS':'noun, common plural','NNP':'noun, proper singular',
'VB':'verb, base form','WRB':'adverb, WH', 'CC':'conjunction, coordinating', 'RBR':'adverb, comparative',
'CD':'cardinal numeral','-NONE-':'No matching tags found','EX':'existential, there there',
'IN':'conjunction or subordinating preposition','WP$':'pronoun, possessive WH',
'MD':'modal auxiliary', 'JJS':'adjective, superlative', 'JJR':'adjective, comparative',
'PDT': 'pre-determiner','RBS':'adverb, superlative', 'FW': 'foreign word',
'NNPS': 'noun, proper plural', 'UH': 'interjection'}
Color_Keys = {'NN':GREEN, 'NNS':GREEN, 'NNP':GREEN, 'NNPS':GREEN, 'MD':YELLOW,
'JJR': YELLOW, 'JJS': YELLOW, 'JJ': YELLOW, 'DT': YELLOW,
'VBG':BLUE,'VBD':BLUE,'VBN':BLUE,'VBP':BLUE,'VBZ':BLUE,'VB':BLUE,
'RBS': MAGENTA,'RBR': MAGENTA,'RB': MAGENTA,'WRB': MAGENTA,
'PRP$':CYAN, 'PRP':CYAN, 'WP':CYAN, 'WP$':CYAN, "IN": RED,
}
names = [ 'time', 'weekday','PRP$', 'VBG', 'VBD',
'VBN', 'VBP', 'WDT', 'JJ', 'WP', 'VBZ', 'DT',
'RP', 'NN', 'TO', 'PRP', 'RB', 'NNS', 'NNP',
'VB', 'WRB', 'CC', 'RBR', 'CD', '-NONE-',
'EX', 'IN', 'WP$', 'MD', 'JJS', 'JJR',
'PDT', 'RBS' , 'FW', 'UH']
dtypes = [ 'float','S10','int', 'int', 'int',
'int', 'int', 'int', 'int', 'int', 'int',
'int', 'int', 'int', 'int', 'int', 'int',
'int', 'int', 'int', 'int', 'int', 'int',
'int', 'int', 'int', 'int', 'int', 'int',
'int', 'int', 'int', 'int', 'int', 'int']
Record_book_keys = ['PRP$', 'VBG', 'VBD',
'VBN', 'VBP', 'WDT', 'JJ', 'WP', 'VBZ', 'DT',
'RP', 'NN', 'TO', 'PRP', 'RB', 'NNS', 'NNP',
'VB', 'WRB', 'CC', 'RBR', 'CD', '-NONE-',
'EX', 'IN', 'WP$', 'MD', 'JJS', 'JJR',
'PDT', 'RBS' , 'FW', 'UH']
plt_clrs = ['indigo','gold','hotpink','firebrick','indianred','sage','yellow','mistyrose',
'darkolivegreen','olive','darkseagreen','pink','tomato','lightcoral','orangered','navajowhite','lime','palegreen',
'darkslategrey','greenyellow','burlywood','seashell','fuchsia','papayawhip','chartreuse','dimgray',
'black','peachpuff','springgreen','aquamarine','orange','lightsalmon','darkslategray','brown',
'indigo','gold','hotpink','firebrick','indianred','sage','yellow','mistyrose']
try:
os.system('clear')
fig = plt.figure(figsize=(20, 10))
ax = fig.add_subplot(111)
last_total_words = 0
total_words = 0
while True:
Date = datetime.now().strftime("%a_%b_%d_%Y_%H_%M")
date = Date
count_all = Counter()
words = []
print ct("-" * 70, WHITE)
print ct(datetime.now().strftime("%a %b %d, %Y %H:%M"), BLUE) + "\t -- \tlast n_words: " + ct(str(last_total_words), RED)
while date == Date:
for word_set in list_files(OUT_PUT_PATH):
with open(OUT_PUT_PATH+word_set,'r') as f:
page = f.read()
with open(OUT_PUT_PATH+word_set,'w') as f:
pass
#os.system('cat ~/SHARED/wordsloded.txt')
#os.system('cat ~/SHARED/cleaning.txt')
new_words = [tweet.replace('\n','').replace('[','').replace(']','').replace('"','').replace(' ','') for tweet in page.split(',') ]
words += [i for i in new_words if i != '']
#print word_set, 'has ', len(words)
#sleep(3)
#print words
#os.system('cat ~/SHARED/counting.txt')
sys.stdout.write("\r" + ct(datetime.now().strftime("%H:%M:%S"), BLUE) + " :\t" + ct(str(len(words)), GREEN))
date = datetime.now().strftime("%a_%b_%d_%Y_%H_%M")
last_total_words = total_words
total_words = len(words)
sys.stdout.write("\n")
sys.stdout.flush()
print ct("Tagging...", BLUE)
tagged = nltk.pos_tag(words)
print ct("Counting...", BLUE)
count_all.update(tagged)
os.system('clear')
print "\t", ct("Number", CYAN), "\t", ct("Word", WHITE), " " * 11, ct("Type", WHITE)
print ct("-" * 70, WHITE)
for i in Counter(tagged).most_common(50):
_space = 15 - len(i[0][0])
if i[0][1] in Color_Keys.keys():
color = Color_Keys[i[0][1]]
print "\t", ct(str(i[1]), CYAN),'\t',ct(i[0][0], WHITE),' ' * _space,ct(Names[i[0][1]], color)
else:
print "\t", ct(str(i[1]), RED),'\t',ct(i[0][0], RED),' ' * _space,ct(Names[i[0][1]], RED)
sleep(0.01 + (np.random.ranf()/1e1))
wrd_type_keys = []
plot_data = []
for (w, k), n in count_all.most_common():
if k in wrd_type_keys:
continue
else:
wrd_type_keys.append(k)
for wrd_typ in wrd_type_keys:
num = 0
for (w, k), n in count_all.most_common():
if k == wrd_typ:
num += n
plot_data.append((wrd_typ, num))
# print plot_data
for wrd_typ, num in plot_data:
_num = round(np.log10(num), 1)
# print wrd_typ, _num
ax.bar(wrd_typ, _num, align='center')
ax.set_title("Word Type Frequency\ntotal words:" + str(total_words) + "\n" + datetime.now().strftime("%a %b %d, %Y %H:%M"))
ax.axes.tick_params(labelrotation=90)
fig.savefig(Table_PATH + "plot")
ax.clear()
# plt.show()
except KeyboardInterrupt:
os.system('clear')
sys.exit(0)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchsummary import summary
import deepy.nn.layer as layer
class OriginalVGG(nn.Module):
"""VGG8, 11, 13, 16, and 19
>>> device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
>>> net = VGG('VGG8').to(device)
>>> summary(net , (3, 32, 32))
----------------------------------------------------------------
Layer (type) Output Shape Param #
================================================================
Conv2d-1 [-1, 64, 32, 32] 1,792
BatchNorm2d-2 [-1, 64, 32, 32] 128
ReLU-3 [-1, 64, 32, 32] 0
Conv2d-4 [-1, 64, 16, 16] 36,928
Conv2d-5 [-1, 128, 16, 16] 73,856
BatchNorm2d-6 [-1, 128, 16, 16] 256
ReLU-7 [-1, 128, 16, 16] 0
Conv2d-8 [-1, 128, 8, 8] 147,584
Conv2d-9 [-1, 256, 8, 8] 295,168
BatchNorm2d-10 [-1, 256, 8, 8] 512
ReLU-11 [-1, 256, 8, 8] 0
Conv2d-12 [-1, 256, 4, 4] 590,080
Conv2d-13 [-1, 512, 4, 4] 1,180,160
BatchNorm2d-14 [-1, 512, 4, 4] 1,024
ReLU-15 [-1, 512, 4, 4] 0
Conv2d-16 [-1, 512, 2, 2] 2,359,808
Conv2d-17 [-1, 512, 2, 2] 2,359,808
BatchNorm2d-18 [-1, 512, 2, 2] 1,024
ReLU-19 [-1, 512, 2, 2] 0
Conv2d-20 [-1, 512, 1, 1] 2,359,808
AvgPool2d-21 [-1, 512, 1, 1] 0
Linear-22 [-1, 10] 5,130
================================================================
Total params: 9,413,066
Trainable params: 9,413,066
Non-trainable params: 0
----------------------------------------------------------------
Input size (MB): 0.01
Forward/backward pass size (MB): 3.10
Params size (MB): 35.91
Estimated Total Size (MB): 39.02
----------------------------------------------------------------
"""
def __init__(self, vgg_name, down_sampling_layer=nn.Conv2d):
super().__init__()
self.CFG = {
'VGG8': [64, 'D', 128, 'D', 256, 'D', 512, 'D', 512, 'D'],
'VGG11': [64, 'D', 128, 'D', 256, 256, 'D', 512, 512, 'D', 512, 512, 'D'],
'VGG13': [64, 64, 'D', 128, 128, 'D', 256, 256, 'D', 512, 512, 'D', 512, 512, 'D'],
'VGG16': [64, 64, 'D', 128, 128, 'D', 256, 256, 256, 'D', 512, 512, 512, 'D', 512, 512, 512, 'D'],
'VGG19': [64, 64, 'D', 128, 128, 'D', 256, 256, 256, 256, 'D', 512, 512, 512, 512, 'D', 512, 512, 512, 512, 'M'],
}
self.down_sampling_layer = down_sampling_layer
self.features = self._make_layers(self.CFG[vgg_name])
self.classifier = nn.Linear(512, 10)
def forward(self, x):
out = self.features(x)
out = out.view(out.size(0), -1)
out = self.classifier(out)
return out
def _make_layers(self, cfg):
layers = []
in_channels = 3
for x in cfg:
if x == 'D':
layers += [self.down_sampling_layer(
in_channels, in_channels,
kernel_size=3, stride=2, padding=1)]
else:
layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
nn.BatchNorm2d(x),
nn.ReLU(inplace=True)]
in_channels = x
layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
return nn.Sequential(*layers)
class ConvNormAct(nn.Module):
""" This module applies
conv => normalization => activation
multiple times.
"""
def __init__(self, in_channels: int, out_channels: int, conv,
normalization, kernel_size: int = 3, padding: int = 1, activation=nn.ReLU,
times: int = 2):
super().__init__()
self.times = times
layers = [
conv(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
padding=padding,
bias=False),
normalization(out_channels),
activation()]
for i in range(self.times - 1):
layers.extend([
conv(in_channels=out_channels,
out_channels=out_channels,
kernel_size=kernel_size,
padding=padding,
bias=False),
normalization(out_channels),
activation()])
self.conv = nn.Sequential(*layers)
def forward(self, x):
x = self.conv(x)
return x
class Down(nn.Module):
def __init__(self, in_channels: int, out_channels: int, conv,
down_conv, normalization,
conv_kernel_size=3,
conv_padding=1,
down_kernel_size=3,
down_padding=1,
activation=nn.ReLU):
super().__init__()
self.mpconv = nn.Sequential(
down_conv(in_channels=in_channels, out_channels=in_channels,
padding=down_padding, kernel_size=down_kernel_size,
stride=2, bias=False),
ConvNormAct(in_channels, out_channels,
conv, normalization,
conv_kernel_size, conv_padding,
activation)
)
def forward(self, x):
x = self.mpconv(x)
return x
class _VGGNd(nn.Module):
""" _VGGNd
"""
def __init__(self, in_channels: int, num_classes: int,
base_channels: int, depth: int,
conv, down_conv,
normalization,
global_pool,
max_channels: int=512,
activation=nn.ReLU):
super().__init__()
self.depth = depth
self.inc = ConvNormAct(in_channels=in_channels,
out_channels=base_channels,
conv=conv,
normalization=normalization,
kernel_size=3,
padding=1,
activation=activation)
self.down_blocks = nn.ModuleList(
[
Down(
in_channels=min(base_channels*(2**i), max_channels),
out_channels=min(base_channels*(2**(i+1)), max_channels),
conv=conv,
down_conv=down_conv,
normalization=normalization,
down_kernel_size=3,
down_padding=1,
activation=activation
)
for i in range(depth)
]
)
self.pool = global_pool(1)
self.linear = nn.Sequential(
nn.Linear(min(base_channels*(2**depth), max_channels), 64),
nn.ReLU(inplace=True),
nn.Dropout(p=0.2),
nn.Linear(64, num_classes),
)
def forward(self, x):
x = self.inc(x)
for i, l in enumerate(self.down_blocks):
x = l(x)
x = self.pool(x)
x = x.view(x.size(0), -1)
x = self.linear(x)
return x
class VGG1d(_VGGNd):
def __init__(self, in_channels: int, num_classes: int,
base_channels: int, depth: int,
conv=nn.Conv1d, down_conv=nn.Conv1d,
normalization=nn.BatchNorm1d,
global_pool=nn.AdaptiveAvgPool1d,
max_channels: int=512,
activation=nn.ReLU):
super().__init__(
in_channels=in_channels,
num_classes=num_classes,
base_channels=base_channels,
depth=depth,
conv=conv,
down_conv=down_conv,
normalization=normalization,
global_pool=global_pool,
max_channels=max_channels,
activation=activation,
)
class VGG2d(_VGGNd):
def __init__(self, in_channels: int, num_classes: int,
base_channels: int, depth: int,
conv=nn.Conv2d, down_conv=nn.Conv2d,
normalization=nn.BatchNorm2d,
global_pool=nn.AdaptiveAvgPool2d,
max_channels: int=512,
activation=nn.ReLU):
super().__init__(
in_channels=in_channels,
num_classes=num_classes,
base_channels=base_channels,
depth=depth,
conv=conv,
down_conv=down_conv,
normalization=normalization,
global_pool=global_pool,
max_channels=max_channels,
activation=activation,
)
|
import json
from scripts.user_module_calls import \
read_user_passwords, USERS_PER_LOAD_BALANCER
def read_load_balancer_dns():
with open("output.json", 'r') as f:
return json.load(f)['load_balancer_dnss']['value']
def create_password_file_content(number_of_users: int):
user_passwords = read_user_passwords()
dns_names = read_load_balancer_dns()
user_strings = []
for counter, up in enumerate(user_passwords):
if counter >= number_of_users:
break # Done
dns_name = dns_names[int(up.number / USERS_PER_LOAD_BALANCER)]
user_strings.append(
f'jupyter: {dns_name}/{up.user}/jupyter\n'
f'airflow: {dns_name}/{up.user}/airflow\n'
f'username: {up.user}\n'
f'password: {up.password}')
with open("user_credentials.txt", "w") as f:
for line in user_strings:
f.write(line + "\n")
f.write('\n\n\n')
|
"""Tests for models of contributions (comments)."""
from django.test import TestCase
from nose.tools import raises
from geokey.contributions.models import Comment, post_save_count_update
from ..model_factories import ObservationFactory, CommentFactory
class TestCommentPostSave(TestCase):
def test_post_save_comment_count_update(self):
observation = ObservationFactory()
CommentFactory.create_batch(5, **{'commentto': observation})
comment = CommentFactory.create(**{
'commentto': observation,
'status': 'deleted'
})
post_save_count_update(
Comment,
instance=comment,
created=True)
observation.refresh_from_db()
self.assertEqual(observation.num_media, 0)
self.assertEqual(observation.num_comments, 5)
class CommentTest(TestCase):
@raises(Comment.DoesNotExist)
def test_delete_comment(self):
comment = CommentFactory()
comment.delete()
Comment.objects.get(pk=comment.id)
def test_delete_nested(self):
comment = CommentFactory()
response = CommentFactory(**{'respondsto': comment})
CommentFactory.create_batch(3, **{'respondsto': response})
self.assertEqual(len(Comment.objects.all()), 5)
comment.delete()
self.assertEqual(len(Comment.objects.all()), 0)
|
import inspect
import string
import hypothesis.strategies as st
import pytest
from hypothesis import assume, example, given
from hydra_zen import builds, make_custom_builds_fn, to_yaml
from tests.custom_strategies import partitions, valid_builds_args
_builds_sig = inspect.signature(builds)
BUILDS_DEFAULTS = {
name: p.default
for name, p in _builds_sig.parameters.items()
if p.kind is p.KEYWORD_ONLY
}
del _builds_sig
BUILDS_NAMES = set(BUILDS_DEFAULTS)
@example(args=[], kwargs=dict(__b=len))
@given(
args=st.lists(st.none() | st.booleans()),
kwargs=st.dictionaries(
st.text(string.ascii_lowercase, min_size=1).filter(
lambda x: x not in BUILDS_DEFAULTS
),
st.none() | st.booleans(),
),
)
def test_make_custom_builds_doesnt_accept_args_not_named_by_builds(args, kwargs):
assume(args or kwargs)
with pytest.raises(TypeError):
# arbitrary args & kwargs
make_custom_builds_fn(*args, **kwargs)
class BadGuy:
pass
def _corrupt_kwargs(kwargs: dict):
return {k: BadGuy for k in kwargs}
@given(
bad_kwargs=valid_builds_args().map(_corrupt_kwargs),
)
def test_raises_on_bad_defaults(bad_kwargs):
try:
builds(f1, **bad_kwargs)
except Exception as e:
with pytest.raises(type(e)):
make_custom_builds_fn(**bad_kwargs)
def f1(x: int):
return
def f2(x, y: str):
return
@pytest.mark.filterwarnings(
"ignore:A structured config was supplied for `zen_wrappers`"
)
@given(
kwargs=partitions(valid_builds_args(), ordered=False),
target=st.sampled_from([f1, f2]),
)
def test_make_builds_fn_produces_builds_with_expected_defaults_and_behaviors(
kwargs, target
):
kwargs_as_defaults, kwargs_passed_through = kwargs
# set a random partition of args as defaults to a custom builds
custom_builds = make_custom_builds_fn(**kwargs_as_defaults)
# pass the remainder of args directly to the customized builds
via_custom = custom_builds(target, **kwargs_passed_through)
# this should be the same as passing all of these args directly to vanilla builds
via_builds = builds(target, **kwargs_passed_through, **kwargs_as_defaults)
assert to_yaml(via_custom) == to_yaml(via_builds)
|
from unittest import mock
import pytest
from dmscripts.helpers.updated_by_helpers import get_user
@pytest.fixture(autouse=True)
def environ():
with mock.patch.dict("os.environ", {}, clear=True):
import os
yield os.environ
@pytest.fixture
def subprocess_run():
with mock.patch("dmscripts.helpers.updated_by_helpers.subprocess.run") as run:
yield run
def test_get_user_can_use_jenkins_envvars(environ):
environ["JENKINS_HOME"] = "/var/lib/jenkins"
environ["BUILD_TAG"] = "jenkins-Do a thing-100"
environ["BUILD_USER"] = "Reginald Jeeves"
assert get_user() == "jenkins-Do a thing-100 started by Reginald Jeeves"
# BUILD_USER relies on a plugin that needs to be enabled for each job, so
# we should check get_user() works even if BUILD_USER isn't present for
# some reason
del environ["BUILD_USER"]
assert get_user() == "jenkins-Do a thing-100"
def test_get_user_can_use_git_user_email(subprocess_run):
proc = mock.Mock()
proc.return_code = 0
proc.stdout = "user.name@example.com"
subprocess_run.return_value = proc
assert get_user() == "user.name@example.com"
def test_get_user_fallsback_to_username_if_git_not_installed(environ, subprocess_run):
environ["USERNAME"] = "cheeseshop"
subprocess_run.side_effect = FileNotFoundError
assert get_user() == "cheeseshop"
|
#ATS:test(SELF, label="Polygon unit tests")
# Unit tests for the Polygon class
import unittest
from math import *
from SpheralTestUtilities import fuzzyEqual
from Spheral2d import *
# Create a global random number generator.
import random
rangen = random.Random()
plots = []
#===============================================================================
# Generate random points in the give box, optionally rotating the results to
# create a non-axis aligned distribution.
#===============================================================================
def randomPoints(numPoints,
xmin, xmax,
ymin, ymax,
theta = None):
result = vector_of_Vector()
# Determine the rotational transform.
if theta is None:
theta = rangen.uniform(0.0, 2.0*pi)
R = rotationMatrix(Vector(cos(theta), sin(theta)))
for i in xrange(numPoints):
result.append(R*Vector(rangen.uniform(xmin, xmax),
rangen.uniform(ymin, ymax)))
return R, result
#===============================================================================
# A local class for sorting points in a counter-clockwise fashion around a
# point.
#===============================================================================
class SortCounterClockwise:
def __init__(self, p0):
self.p0 = p0
return
def __call__(self, p1, p2):
t = (p1 - self.p0).cross(p2 - self.p0).z
if t > 0.0:
return 1
elif t < 0.0:
return -1
else:
return 0
#===============================================================================
# Test class
#===============================================================================
class TestPolygon(unittest.TestCase):
#---------------------------------------------------------------------------
# setUp
#---------------------------------------------------------------------------
def setUp(self):
self.ntests = 5000
self.npoints = 1000
self.xmin, self.xmax = -5.0, 2.0
self.ymin, self.ymax = -1.0, 3.0
self.R, self.points = randomPoints(self.npoints,
self.xmin, self.xmax,
self.ymin, self.ymax)
self.polygon = Polygon(self.points)
#self.plot = append(plotPolygon(self.polygon, False, True))
return
#---------------------------------------------------------------------------
# Find the inner & outer radii of the polygon.
#---------------------------------------------------------------------------
def innerOuterRadii(self, polygon):
rinner = 1e10
router = 0.0
centroid = polygon.centroid
for f in self.polygon.facets:
r = abs((f.point1 - centroid).dot(f.normal))
rinner = min(rinner, r)
for v in polygon.vertices:
r = (v - centroid).magnitude()
router = max(router, r)
router *= 1.0 + 1.0e-5
return rinner, router
#---------------------------------------------------------------------------
# Check that all the seed points are contained in the polygon.
#---------------------------------------------------------------------------
def testContainSeeds(self):
for p in self.points:
self.failUnless(self.polygon.contains(p),
"Polygon does not contain seed point: %s" % str(p))
return
#---------------------------------------------------------------------------
# Check that all the seed points are contained in the polygon using generic
# contain method.
#---------------------------------------------------------------------------
def testGenericContainSeeds(self):
for p in self.points:
self.failUnless(pointInPolygon(p, self.polygon, True),
"Polygon does not contain seed point: %s" % str(p))
return
#---------------------------------------------------------------------------
# Generate random points in the polygon and test they are contained.
#---------------------------------------------------------------------------
def testRandomInnerPoints(self):
rinner, router = self.innerOuterRadii(self.polygon)
centroid = self.polygon.centroid
for i in xrange(self.ntests):
theta = rangen.uniform(0.0, 2.0*pi)
p = centroid + rangen.uniform(0.0, rinner) * Vector(cos(theta), sin(theta))
self.failUnless(self.polygon.contains(p),
"Polygon should contain %s but reports it does not." % str(p))
return
#---------------------------------------------------------------------------
# Generate random points outside the polygon and test they are not contained.
#---------------------------------------------------------------------------
def testRandomOuterPoints(self):
rinner, router = self.innerOuterRadii(self.polygon)
centroid = self.polygon.centroid
for i in xrange(self.ntests):
theta = rangen.uniform(0.0, 2.0*pi)
p = centroid + rangen.uniform(router, 2.0*router) * Vector(cos(theta), sin(theta))
self.failUnless(not self.polygon.contains(p),
"%s should be outside polygon but polygon reports it is contained." % str(p))
return
#---------------------------------------------------------------------------
# Test vertex containment.
#---------------------------------------------------------------------------
def testVertexPointContainment(self):
vertices = self.polygon.vertices
for v in vertices:
self.failUnless(self.polygon.contains(v),
"%s vertex position should be contained." % str(v))
self.failUnless(not self.polygon.contains(v, False),
"%s vertex position should not be contained." % str(v))
return
#---------------------------------------------------------------------------
# Test that nested polygons intersect.
#---------------------------------------------------------------------------
def testIntersectInterior(self):
centroid = self.polygon.centroid
vertices = vector_of_Vector(self.polygon.vertices)
numVerts = len(vertices)
for i in xrange(numVerts):
vertices[i] = 0.5*(centroid + vertices[i])
assert self.polygon.contains(vertices[i])
polygon2 = Polygon(vertices)
self.failUnless(self.polygon.intersect(polygon2),
"Failed to intersect with a contained polygon.")
self.failUnless(polygon2.intersect(self.polygon),
"Failed to intersect when entirely contained within a polygon.")
return
#---------------------------------------------------------------------------
# Test that two polygons just touching intersect.
#---------------------------------------------------------------------------
def testIntersectTouchingPolygons(self):
xmin = self.polygon.xmin.x
vertices = vector_of_Vector(self.polygon.vertices)
numVerts = len(vertices)
for i in xrange(numVerts):
xv = vertices[i].x
vertices[i].x -= 2.0*(xv - xmin)
polygon2 = Polygon(vertices)
self.failUnless(self.polygon.intersect(polygon2),
"Failed to intersect with polygon touching at a vertex")
self.failUnless(polygon2.intersect(self.polygon),
"Failed to intersect with polygon touching at a vertex")
return
#---------------------------------------------------------------------------
# Test that two separate polygons do not intersect.
#---------------------------------------------------------------------------
def testNotIntersectPolygons(self):
xmin = self.polygon.xmin.x
xlength = self.polygon.xmax.x - self.polygon.xmin.x
vertices = vector_of_Vector(self.polygon.vertices)
numVerts = len(vertices)
for i in xrange(numVerts):
xv = vertices[i].x
vertices[i].x -= 2.0*(xv - xmin) + xlength
polygon2 = Polygon(vertices)
#plots.append(plotPolygon(self.polygon))
#plots.append(plotPolygon(polygon2))
self.failUnless(not self.polygon.intersect(polygon2),
"Erroneously claiming polygons intersect : [[%g,%g], [%g,%g], [[%g,%g], [%g,%g]]" % (self.polygon.xmin.x,
self.polygon.xmax.x,
self.polygon.xmin.y,
self.polygon.xmax.y,
polygon2.xmin.x,
polygon2.xmax.x,
polygon2.xmin.y,
polygon2.xmax.y))
self.failUnless(not polygon2.intersect(self.polygon),
"Erroneously claiming polygons intersect : [[%g,%g], [%g,%g], [[%g,%g], [%g,%g]]" % (self.polygon.xmin.x,
self.polygon.xmax.x,
self.polygon.xmin.y,
self.polygon.xmax.y,
polygon2.xmin.x,
polygon2.xmax.x,
polygon2.xmin.y,
polygon2.xmax.y))
return
#---------------------------------------------------------------------------
# Test that a nested box intersects.
#---------------------------------------------------------------------------
def testIntersectBoxInterior(self):
rinner, router = self.innerOuterRadii(self.polygon)
centroid = self.polygon.centroid
box = (centroid - 0.95*rinner*Vector.one,
centroid + 0.95*rinner*Vector.one)
self.failUnless(self.polygon.intersect(box),
"Failed to intersect with a contained box.")
return
#---------------------------------------------------------------------------
# Test that a circumscribing box intersects.
#---------------------------------------------------------------------------
def testIntersectBoxExterior(self):
rinner, router = self.innerOuterRadii(self.polygon)
centroid = self.polygon.centroid
box = (centroid - 2.0*router*Vector.one,
centroid + 2.0*router*Vector.one)
self.failUnless(self.polygon.intersect(box),
"Failed to intersect with a box we are in.")
return
#---------------------------------------------------------------------------
# Test that a box just touching a polygon intersects.
#---------------------------------------------------------------------------
def testIntersectTouchingBox(self):
xmin = self.polygon.xmin.x
vertex = None
for v in self.polygon.vertices:
if fuzzyEqual(v.x, xmin, 1.0e-10):
vertex = v
assert not vertex is None
box = (Vector(v.x - 2.0, v.y - 2.0), v)
self.failUnless(self.polygon.intersect(box),
"Failed to intersect with a box touching on one side.")
#---------------------------------------------------------------------------
# Test that that we don't intersect a box outside the polygon.
#---------------------------------------------------------------------------
def testNotIntersectBox(self):
xmin, xmax = self.polygon.xmin, self.polygon.xmax
delta = xmax - xmin
box = (xmin - 2.0*delta, xmax - 2.0*delta)
self.failUnless(not self.polygon.intersect(box),
"Erroneously intersect external box.")
#---------------------------------------------------------------------------
# Test reconstructing.
#---------------------------------------------------------------------------
def testReconstruct(self):
polygon2 = Polygon()
polygon2.reconstruct(self.polygon.vertices,
self.polygon.facetVertices)
self.failUnless(polygon2 == self.polygon,
"Failed to properly reconstruct polygon from vertices and facets.")
return
#---------------------------------------------------------------------------
# Test volume
#---------------------------------------------------------------------------
def testVolume(self):
verts0 = self.polygon.vertices
c = self.polygon.centroid
cmpmethod = SortCounterClockwise(c)
verts = sorted(list(verts0), cmpmethod)
p0 = verts[0]
answer = 0.0
for i in xrange(2, len(verts)):
answer += (verts[i] - p0).cross(verts[i - 1] - p0).z
answer *= 0.5
vertstring = [str(x) for x in verts]
self.failUnless(fuzzyEqual(self.polygon.volume, answer, 1.0e-10),
"Failed volume computation: %g != %g\n verts = %s" % (self.polygon.volume,
answer,
vertstring))
#---------------------------------------------------------------------------
# Closest point to vertices.
#---------------------------------------------------------------------------
def testClosestPointToVertices(self):
verts = self.polygon.vertices
for p in verts:
cp = self.polygon.closestPoint(p)
self.failUnless(fuzzyEqual((cp - p).magnitude(), 0.0, 1.0e-10),
"Closest point to vertex %s : %s" % (p, cp))
#---------------------------------------------------------------------------
# Closest point on facets.
#---------------------------------------------------------------------------
def testClosestPointOnFacets(self):
facets = self.polygon.facets
for f in facets:
p = f.position
cp = self.polygon.closestPoint(p)
self.failUnless(fuzzyEqual((cp - p).magnitude(), 0.0, 1.0e-10),
"Closest point to facet position %s : %s" % (p, cp))
#---------------------------------------------------------------------------
# Closest point above facets.
#---------------------------------------------------------------------------
def testClosestPointAboveFacets(self):
facets = self.polygon.facets
for f in facets:
chi = rangen.uniform(0.1, 10.0)
cp0 = f.position
p = cp0 + chi*f.normal
cp = self.polygon.closestPoint(p)
self.failUnless(fuzzyEqual((cp0 - cp).magnitude(), 0.0, 1.0e-10),
"Closest point to position off of facet position %s : %s" % (cp0, cp))
#---------------------------------------------------------------------------
# Test ==
#---------------------------------------------------------------------------
def testEqual(self):
self.failUnless(self.polygon == self.polygon,
"Failed self equivalence.")
#---------------------------------------------------------------------------
# Test !=
#---------------------------------------------------------------------------
def testNotEqual(self):
polygon2 = Polygon()
self.failUnless(polygon2 != self.polygon,
"Failed not equal.")
#---------------------------------------------------------------------------
# Test copy constructor
#---------------------------------------------------------------------------
def testCopy(self):
polygon2 = Polygon(self.polygon)
self.failUnless(polygon2 == self.polygon,
"Failed to copy construct.")
#---------------------------------------------------------------------------
# Test shift in-place
#---------------------------------------------------------------------------
def testShiftInPlace(self):
shift = Vector(rangen.uniform(-10.0, -10.0),
rangen.uniform(-10.0, -10.0))
polygon2 = Polygon(self.polygon)
polygon2 += shift
for p0, p1 in zip([self.polygon.xmin, self.polygon.xmax] + list(self.polygon.vertices),
[polygon2.xmin, polygon2.xmax] + list(polygon2.vertices)):
pshift = p0 + shift
self.failUnless(pshift == p1, "In-place shift point comparison failed: %s != %s" % (pshift, p1))
if __name__ == "__main__":
unittest.main()
|
# -*- Python -*-
# This file is licensed under a pytorch-style license
# See frontends/pytorch/LICENSE for license information.
import torch
import torch_mlir
import npcomp
from npcomp.compiler.pytorch.backend import refjit, frontend_lowering
from npcomp.compiler.utils import logging
import test_utils
logging.enable()
lhs = torch.ones((4, 6, 1))
rhs = torch.ones((1, 1, 3)) * 0.6
bias = torch.ones((1, 1, 3)) * 0.2
threshold = torch.tensor((0.75, 0.25, 0.10))
def mul_maximum(lhs, rhs, threshold, bias):
return torch.maximum(lhs * rhs, threshold) + bias
mb = torch_mlir.ModuleBuilder()
with mb.capture_function("mul_maximum", [lhs, rhs, threshold, bias]) as f:
result = mul_maximum(lhs, rhs, threshold, bias)
f.returns([result])
backend = iree.IreeNpcompBackend()
jit_module = backend.load(backend.compile(frontend_lowering.lower_module(mb.module)))
test_utils.compare_outputs(mul_maximum, jit_module.mul_maximum, lhs, rhs,
threshold, bias)
test_utils.compare_outputs(mul_maximum, jit_module.mul_maximum, lhs + 1,
rhs + 2, threshold, bias)
|
from app.models import Commnet,User
from app import db
def setUp(self):
self.user_Rotich = User(username = 'Rotich',password = 'potato', email = 'rotichtitus12@gmail.com')
self.new_comment = Comment(pitch_title='movie',pitch="the heritage was from a history",pitch_comment='This pitch is the best thing since sliced bread',user = self.user_Rotich )
def tearDown(self):
Comment.query.delete()
User.query.delete()
def test_check_instance_variables(self):
self.assertEquals(self.new_comment.pitch_title,'Comment for pitches')
self.assertEquals(self.new_comment.pitch,"the heritage was from a history")
self.assertEquals(self.new_comment.pitch_comment,'This pitch is the best thing since sliced bread')
self.assertEquals(self.new_Comment.user,self.user_Rotich)
def test_get_Comment_by_id(self):
self.new_comment.save_comment()
got_comments = Comment.get_comments(12345)
self.assertTrue(len(got_comments) == 1)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 28 16:53:59 2017
@author: kaihong
"""
import numpy as np
def huber_w(y):
return np.fmin(1, 1/np.abs(y))
def huber_w_smooth(y):
return 1/np.sqrt(1 + y**2)
def exp_w(c=2):
def _exp_w(y):
return np.exp(-0.5*y**2/c**2)
return _exp_w
def SolveGaussHelmertProblem(g, x, lm, Cov_ll, maxiter=100, thres=1e-5):
""" Solve Gauss-Helmert Problem:
argmin ||dl||^2, subject to g(x+dx, l+dl)=0, where x is unknown parameters
and l are observations.
Input
--------
g: callable object
Represent constraint function g(x,l)=0.
f should take two arguments (x and l) and return a tuple (error, Jx, Jl),
where error is g(x,l), Jx is the Jacobian dg/dx and Jl is the Jacobian dg/dl.
x: 1xP array
Parameter initial guess x0 of size P
lm: NxQ array
Observation matrix, each row represent one observation l instance of size Q
Cov_ll: NxQxQ array
Covariance matrice for each observation instance l1 ... lN
maxiter: int
Terminate when maximum number of iteration reached.
thres: float
Terminate when max(|dx|) < thres
Return
--------
xnu: 1xP array
Estimated parameter vector x
Cov_xx: PxP array
Covariance matrix of estimated x
sigma_0: float
factor, should close 1
vv: NxQ array
Observation corrections dl
w: 1xN array
Weights for each observation l, lower weight means an outliar
"""
N, Q = lm.shape[:2]
err_i, A_i, B_i = g(x, lm[0,:])
M = len(err_i) # residual size
P = len(x) # parameter size
R = N * Q - P # estimation redundancy
if R<0:
raise RuntimeError('Not enough observations')
''' init '''
xnu = x.copy()
lnu = lm.copy() # current updated observations
vv = np.zeros_like(lm) # observations correction
Am = np.empty( (N,) + A_i.shape )
Bm = np.empty( (N,) + B_i.shape )
W_gg = np.empty( (N,) + (M, M) )
Cg = np.empty( (N, M) )
Nm = np.empty( (P, P) )
nv = np.empty( P )
X_gg = np.empty( N )
W_ll = np.empty_like(Cov_ll)
for i in range(N):
W_ll[i] = np.linalg.pinv(Cov_ll[i])
for it in range(maxiter):
Nm[:,:] = 0
nv[:] = 0
for i in range(N):
# update Jacobian and residual
err_i, Am[i], Bm[i] = g(xnu, lnu[i,:])
A_i, B_i = Am[i], Bm[i]
Cg[i] = B_i.dot(vv[i]) - err_i
# weights of constraints
W_gg[i] = np.linalg.pinv( B_i.dot(Cov_ll[i]).dot(B_i.T) )
X_gg[i] = np.sqrt( Cg[i].T.dot(W_gg[i]).dot(Cg[i]) )
sigma_gg = np.median(X_gg)/0.6745
w = huber_w_smooth( X_gg/sigma_gg )
for i in range(N):
# normal equation
ATBWB = Am[i].T.dot(w[i]*W_gg[i])
Nm += ATBWB.dot(Am[i])
nv += ATBWB.dot(Cg[i])
lambda_N = np.sort( np.linalg.eigvalsh(Nm) )
if np.abs(np.log( lambda_N[-1]/lambda_N[0] ) ) > 10:
print( 'normal equation matrix nearly singular')
# solve
dx = np.linalg.solve(Nm, nv)
# update
xnu = xnu + dx
omega = 0
for i in range(N):
lamba = W_gg[i].dot( Am[i].dot(dx) - Cg[i] )
dl = -Cov_ll[i].dot( Bm[i].T.dot(lamba) ) - vv[i]
lnu[i,:] = lnu[i,:] + dl
# calculate observation-corrections
vv[i] = lnu[i,:] - lm[i,:]
Cov_xx = np.linalg.pinv(Nm)
omega = np.sum([vv[i].dot(W_ll[i]).dot(vv[i]) for i in range(N)])
sigma_0 = omega/R
print('GH Iter %d: %f' % (it, sigma_0))
if np.abs(dx).max() < thres:
break
return xnu, Cov_xx, sigma_0, vv, w
def test_SolveGaussHelmertProblem():
'''' implicit test model g(x,l) = x*l = 10 '''
def g(x,l):
return np.atleast_1d(np.dot(x,l)-10), np.atleast_2d(l), np.atleast_2d(x)
x_true = np.array([1.,1.,1.])
# data
sigma_l = 1e-3
sigma_x = 1e-2
T = 500
lm = np.empty((T,3))
x0 = x_true + sigma_x*np.random.randn(3)
l_true = np.empty_like(x0)
for t in range(T):
l_true[:2] = 3*np.random.randn(2)
l_true[2] = 10 - np.sum(l_true[:2])
lm[t,:] = l_true + sigma_l*np.random.randn(3)
Cov_ll = np.tile( sigma_l**2*np.eye(3), (T,1,1) )
# solve
xe, Cov_xx, sigma_0, vv, w = SolveGaussHelmertProblem(g, x0, lm, Cov_ll)
np.testing.assert_array_almost_equal(x_true, xe, 4)
print('test: solving implicit test model passed')
'''explict test model f(x,l): A*x=l'''
A = np.diag([3.,2.,1.])
x_true = np.array([1.,2.,3.])
l_true = A.dot(x_true)
def f(x, l):
return (A.dot(x)-l, A, -np.eye(len(l)))
for t in range(T):
lm[t,:] = l_true + sigma_l*np.random.randn(3)
Cov_ll = np.tile( sigma_l**2*np.eye(3), (T,1,1) )
x0 = x_true + sigma_x*np.random.randn(3)
# solve
xe, Cov_xx, sigma_0, vv, w = SolveGaussHelmertProblem(f, x0, lm, Cov_ll)
np.testing.assert_array_almost_equal(x_true, xe, 4)
print('test: solving explicit test model passed')
# batch test
ntest = 100
sigmas = np.empty(ntest)
for i in range(ntest):
x0 = x_true + sigma_x*np.random.randn(3)
for t in range(T):
lm[t,:] = l_true + sigma_l*np.random.randn(3)
xe, Cov_xx, sigmas[i], vv, w = SolveGaussHelmertProblem(f, x0, lm, Cov_ll)
print("Mean sigma_0: %f (which should be close to 1)" % np.mean(sigmas))
if __name__ == "__main__":
test_SolveGaussHelmertProblem()
|
from PyQt5.QtCore import QEvent, pyqtSignal
from PyQt5.QtGui import QMouseEvent, QPalette
from PyQt5.QtWidgets import QWidget, QLabel, QPushButton
from PyQt5.uic import loadUi
from brainframe_qt.ui.resources.paths import qt_ui_paths
class EncodingEntry(QWidget):
encoding_entry_selected_signal = pyqtSignal(bool, str)
"""Emitted when the widget (excluding delete button) is selected (clicked)
Only emitted when self.selectable == True
Connected to:
- EncodingList <-- Dynamic
[parent].encoding_entry_selected_slot
"""
delete_encoding_class_signal = pyqtSignal(str)
"""Emitted when the delete button is pressed
Connected to:
- EncodingList <-- Dynamic
[parent].delete_encoding_class_signal
"""
def __init__(self, encoding_class_name: str, parent=None, ):
super().__init__(parent=parent)
loadUi(qt_ui_paths.encoding_entry_ui, self)
self.encoding_class_name = encoding_class_name
self.encoding_class_name_label: QLabel
self.encoding_class_name_label.setText(self.encoding_class_name)
self.delete_button: QPushButton
self.selectable = True
self._selected = False
self._hovered = False
self.init_ui()
self.init_slots_and_signals()
def init_ui(self):
self.delete_button.setToolTip(
f"Delete all [{self.encoding_class_name}] encodings from the "
f"database")
# Set this before hiding the button, as the button is taller than the
# rest of the widget. Not doing this will make this widget height
# change when the button is hidden/shown
self.setMinimumHeight(self.sizeHint().height())
self.delete_button.hide()
def init_slots_and_signals(self):
# noinspection PyUnresolvedReferences
self.delete_button.clicked.connect(
lambda: self.delete_encoding_class_signal.emit(self.encoding_class_name))
def mouseReleaseEvent(self, event: QMouseEvent):
if self.selectable:
self.selected = not self.selected
self._update_background_color()
# noinspection PyUnresolvedReferences
self.encoding_entry_selected_signal.emit(self.selected,
self.encoding_class_name)
def enterEvent(self, event: QEvent):
self.delete_button.show()
self.hovered = True
super().enterEvent(event)
def leaveEvent(self, event: QEvent):
self.delete_button.hide()
self.hovered = False
super().leaveEvent(event)
@property
def hovered(self):
return self._hovered
@hovered.setter
def hovered(self, hovered):
self._hovered = hovered
self._update_background_color()
@property
def selected(self):
return self._selected
@selected.setter
def selected(self, selected):
self._selected = selected
self._update_background_color()
def _update_background_color(self):
palette = self.parent().palette()
if not self.selected and not self.hovered:
background_color = palette.alternateBase().color()
elif self.hovered and not self.selected:
background_color = palette.button().color()
elif self.selected and not self.hovered:
background_color = palette.dark().color()
else:
background_color = palette.shadow().color()
palette.setColor(QPalette.Window, background_color)
self.setPalette(palette)
|
import os
from math import log10
import cv2
import numpy as np
from .mask_editor import MaskEditor
from .partially_labelled_dataset import (
PartiallyLabelledDataset, ObjectAnnotation,
create_rgb_mask, save_annotations
)
from ..base import (
DragInterpreter, ImageGroupViewer,
random_colors, grabcut, ConnectedComponents,
hide_axes_labels, on_caps_lock_off, overlay_mask
)
MAX_NUM_OBJECTS = 100
class LabelHelper(ImageGroupViewer):
'''
<Object Actions>
w or up arrow: select the next object
s or down arrow: select the previous object
Shift + w or Shift + up arrow: swap current object with the next object
Shift + s or Shift + down arrow: swap current object with the previous object
Ctrl + e: edit the current object mask
Ctrl + a: add a bounding box enclosing entire image
Ctrl + d: delete the current object
Ctrl + Shift + d: delete the current image and label files
j: jump to the first image with no label file
m: rename current object
mouse right + dragging: add a new object with grabcut
Shift + mouse right + dragging: add a new object without grabcut
'''
def __init__(self, dataset: PartiallyLabelledDataset, info=None):
assert dataset.root is not None, 'The dataset passed to LabelHelper is not loaded properly'
super().__init__(
[os.path.basename(image_file) for image_file in dataset.image_files],
dataset.root,
(0.05, 0.125, 0.75, 0.75)
)
self.dataset = dataset
self.info = info or {}
info_panel_top = 0.64
if len(self.info) > 0:
num_items = len(list(self.info.values())[0])
info_panel_top = 0.14 + min(0.07 * num_items, 0.5)
self.info_panel = self.fig.add_axes((0.81, 0.125, 0.14, min(0.07 * num_items, 0.5)))
self.info_panel.set_facecolor('lightgoldenrodyellow')
hide_axes_labels(self.info_panel)
self.rgb_mask_panel = self.fig.add_axes((0.81, info_panel_top + 0.01, 0.125, 0.875 - info_panel_top))
self.rgb_mask = np.array(())
hide_axes_labels(self.rgb_mask_panel)
self.mode = cv2.GC_INIT_WITH_RECT
self.obj_pallete = random_colors(MAX_NUM_OBJECTS)
self.cls_pallete = random_colors(self.dataset.num_classes, bright=False, seed=6, uint8=True)
self.rect_ipr = DragInterpreter()
self.auto_grabcut = True
self.obj_id = 0
self.display()
def set_items(self):
return self.dataset.image_files
def display(self):
super().display()
self.clear_patches()
filename = os.path.basename(self.dataset.image_files[self.id])
if self.should_update():
self.img = self.dataset.load_image(self.id)
self.annotations = self.dataset.load_annotations(self.id)
self.set_image(self.img)
self.obj_id = 0
if filename in self.info:
self.info_panel.clear()
def resolve_lines(s, max_len, max_lines=6):
q, r = len(s).__divmod__(max_len)
lines = [s[i * max_len:(i + 1) * max_len] for i in range(q)]
if r > 0:
lines.append(s[-r:])
if len(lines) > max_lines:
lines = lines[:max_lines]
lines[-1] = lines[-1][-3] + '...'
return '\n'.join(lines)
keys = list(sorted(self.info[filename].keys()))
values = [self.info[filename][key] for key in keys]
for i, items in enumerate(zip(keys, values)):
self.info_panel.text(
0.02, 0.9 - i * 0.2,
resolve_lines(items[0], 14),
bbox=dict(
linewidth=1, alpha=0.0,
edgecolor='none',
facecolor='none',
)
)
self.info_panel.text(
0.48, 0.93 - i * 0.2,
resolve_lines(items[1], 20),
bbox=dict(
linewidth=1, alpha=0.0,
edgecolor='none',
facecolor='none',
),
verticalalignment='top'
)
overlayed = np.copy(self.img)
for obj_id, annotation in enumerate(self.annotations):
overlay_mask(overlayed, annotation.mask(self.img.shape[:2]), self.obj_pallete[obj_id], 0.3 if obj_id == self.obj_id else 0.1)
self.set_image(overlayed)
for obj_id, annotation in enumerate(self.annotations):
alpha = 0.3 if obj_id == self.obj_id else 0.1
linewidth = 3 if obj_id == self.obj_id else 1
color = self.obj_pallete[obj_id]
bbox = annotation.bbox
self.add_patch(bbox.to_patch(
linewidth=linewidth,
edgecolor=color,
facecolor='none'
))
self.patches.append(self.ax.text(
*bbox.tl_corner,
'{}. {}'.format(obj_id, self.dataset.class_id2name[self.annotations[obj_id].class_id]),
bbox=dict(facecolor=color, alpha=alpha)
))
self.rgb_mask = create_rgb_mask(self.annotations, self.cls_pallete, self.img.shape)
self.rgb_mask_panel.clear()
self.rgb_mask_panel.imshow(self.rgb_mask)
self.ax.set_title(
'FILENAME: {} | IMAGE ID: {} | OBJECT ID: {} | OBJECT CLASS: {}'.format(
filename, self.id, self.obj_id,
self.dataset.class_id2name[self.annotations[self.obj_id].class_id]
) if len(self.annotations) > self.obj_id else 'FILENAME: {} | IMAGE ID: {} | NO OBJECT'.format(
filename, self.id
)
)
def on_image_menubar_select(self, event):
self.save_current_labels()
super().on_image_menubar_select(event)
def save_current_labels(self):
prev_annotations = self.dataset.load_annotations(self.id)
if prev_annotations != self.annotations:
label_path = self.dataset.infer_label_path(self.id)
if len(self.annotations) == 0:
os.remove(label_path)
else:
save_annotations(label_path, self.annotations)
def remove_current_object(self):
if self.obj_id < len(self.annotations):
del self.annotations[self.obj_id]
if len(self.annotations) > 0:
self.obj_id = (self.obj_id - 1) % len(self.annotations)
def ask_class_id(self):
return self.ask_multiple_choice_question(
'Which class does this object belong to?',
tuple(self.dataset.class_id2name)
)
def mask_editor_session(self):
self.disable_callbacks()
if self.obj_id < len(self.annotations):
self.disable_menubar()
self.iconify()
mask_touch_helper = MaskEditor(
self.img,
self.annotations[self.obj_id].mask(self.img.shape[:2]),
win_title=os.path.basename(self.dataset.image_files[self.id])
)
mask = mask_touch_helper.mainloop()
self.deiconify()
self.enable_menubar()
if mask is not None:
if np.array_equal(mask % 2, np.zeros_like(mask)):
answer = self.ask_yes_no_question('Would you like to delete the current object?')
if answer:
self.remove_current_object()
else:
answer = self.ask_multiple_choice_question(
'Save edited mask as:',
(
'Overwrite the current object mask',
'Add as a new object',
'Add each component as a new object',
'Split current object into two different objects',
'Do not save'
)
)
if answer == 0:
self.annotations[self.obj_id] = ObjectAnnotation(
np.where(mask % 2 == 1, 255, 0).astype(np.uint8),
self.annotations[self.obj_id].class_id
)
elif answer == 1:
class_id = self.ask_class_id()
if class_id != -1:
self.annotations.insert(
self.obj_id + 1,
ObjectAnnotation(
np.where(mask % 2 == 1, 255, 0).astype(np.uint8),
class_id
)
)
self.obj_id += 1
elif answer == 2:
class_id = self.ask_class_id()
if class_id != -1:
comps = ConnectedComponents(np.where(mask % 2 == 1, 255, 0).astype(np.uint8))
for i in range(len(comps)):
self.annotations.insert(
self.obj_id + i + 1,
ObjectAnnotation(
comps.mask(i),
class_id
)
)
elif answer == 3:
current_mask = self.annotations[self.obj_id].mask(self.img.shape[:2]) // 255
new_mask = np.where(mask % 2 == 1, 1, 0).astype(np.uint8)
class_id = self.annotations[self.obj_id].class_id
others = current_mask * (1 - new_mask)
if not np.array_equal(others, np.zeros_like(others)):
self.annotations[self.obj_id] = ObjectAnnotation(
others,
class_id
)
self.annotations.insert(
self.obj_id + 1,
ObjectAnnotation(
new_mask,
class_id
)
)
self.obj_id += 1
else:
self.show_message('Please add an object by drawing a rectangle first', 'Guide')
self.enable_callbacks()
self.force_focus()
@on_caps_lock_off
def on_key_press(self, event):
if event.key in ['left', 'right', 'a', 'd', 'escape']:
self.save_current_labels()
self.obj_id = 0
super().on_key_press(event)
if event.key != 'escape':
self.display()
else:
super().on_key_press(event)
if event.key in ['w', 'up']:
if len(self.annotations) > 0:
self.obj_id = (self.obj_id + 1) % len(self.annotations)
elif event.key in ['s', 'down']:
if len(self.annotations) > 0:
self.obj_id = (self.obj_id - 1) % len(self.annotations)
elif event.key in ['W', 'shift+up']:
if len(self.annotations) > 1:
next_obj_id = (self.obj_id + 1) % len(self.annotations)
self.annotations[self.obj_id], self.annotations[next_obj_id] = self.annotations[next_obj_id], self.annotations[self.obj_id]
self.obj_id = next_obj_id
elif event.key in ['S', 'shift+down']:
if len(self.annotations) > 1:
prev_obj_id = (self.obj_id + 1) % len(self.annotations)
self.annotations[self.obj_id], self.annotations[prev_obj_id] = self.annotations[prev_obj_id], self.annotations[self.obj_id]
self.obj_id = prev_obj_id
elif event.key == 'ctrl+d':
self.remove_current_object()
elif event.key == 'ctrl+e':
self.mask_editor_session()
elif event.key == 'ctrl+a':
class_id = self.ask_class_id()
if class_id != -1:
self.annotations.append(ObjectAnnotation(
self.img_rect.to_mask(self.img.shape[:2]),
class_id
))
elif event.key == 'ctrl+D':
answer = self.ask_yes_no_question('Do you want to delete the current image and label file?')
if answer:
# delete image and label files
os.remove(self.dataset.image_files[self.id])
label_path = self.dataset.infer_label_path(self.id)
if os.path.isfile(label_path):
os.remove(label_path)
self.dataset.load(self.dataset.root)
self.remove_current_item()
elif event.key == 'm':
class_id = self.ask_class_id()
if class_id != -1:
self.annotations[self.obj_id].class_id = class_id
elif event.key == 'j':
first_unlabeled = -1
for i in range(self.num_items):
if os.path.isfile(self.dataset.infer_label_path(i)):
continue
else:
first_unlabeled = i
break
if first_unlabeled == -1:
self.show_message(
'Every image is labelled',
'Guide'
)
else:
self.prev_id, self.id = self.id, first_unlabeled % self.num_items
self.display()
def on_mouse_press(self, event):
super().on_mouse_press(event)
p = self.get_axes_coordinates(event)
if event.button == 3 and event.key != 'control' and event.inaxes is self.ax:
self.rect_ipr.start_dragging(p)
self.auto_grabcut = (event.key != 'shift')
def on_mouse_move(self, event):
super().on_mouse_move(event)
p = self.get_axes_coordinates(event)
if self.rect_ipr.on_dragging:
self.rect_ipr.update(p)
self.add_transient_patch(self.rect_ipr.rect.to_patch(
linewidth=1,
linestyle='--',
edgecolor='b',
facecolor='none'
))
elif event.inaxes is self.rgb_mask_panel and p in self.img_rect:
match = np.where((self.cls_pallete == self.rgb_mask[p.y][p.x]).all(axis=-1))[0]
class_name = 'Background' if len(match) == 0 else self.dataset.class_id2name[match[0]]
self.transient_patches.append(
self.rgb_mask_panel.text(
0, -1,
class_name,
bbox=dict(
linewidth=1, alpha=0.0,
edgecolor='none',
facecolor='none',
)
)
)
def on_mouse_release(self, event):
super().on_mouse_release(event)
p = self.get_axes_coordinates(event)
if self.rect_ipr.on_dragging:
self.rect_ipr.finish_dragging(p)
self.clear_transient_patch()
rect = self.rect_ipr.rect.intersect(self.img_rect)
if rect.tl_corner != rect.br_corner:
class_id = self.ask_class_id()
if class_id != -1:
rect_mask = rect.to_mask(self.img.shape[:2])
if self.auto_grabcut:
try:
mask = grabcut(self.img, cv2.GC_INIT_WITH_RECT, rect=self.rect_ipr.rect)
except ValueError:
mask = rect_mask
self.annotations.append(ObjectAnnotation(
np.where(mask % 2 == 1, 255, 0).astype('uint8'),
class_id
))
else:
self.annotations.append(ObjectAnnotation(
rect_mask, class_id
))
self.obj_id = len(self.annotations) - 1
self.display()
|
import os
import numpy as np
import pandas as pd
import pickle
import xarray as xr
def save_data_CCO(fit, folder, data_start, data_dec):
print(fit)
if not os.path.exists(folder):
os.makedirs(folder)
os.chdir(folder)
print(os.getcwd())
np.save("data_start", data_start)
np.save("data_dec", data_dec)
try:
sampling_data = pd.DataFrame(["rates","ratio"],fit.extract(permuted=True))
sampling_data.to_csv("samples")
print("success in saveing samples from the posterior")
except:
print("cannot make a pandas data frame")
try:
sampling_data.to_csv("sampling_daten")
except:
print("could not save fit_data")
equi_values = fit.extract("equi_values", permuted=True)["equi_values"]
theta = fit.extract("rates", permuted=True)
ratio = fit.extract("ratio", permuted=True)
lp__ = fit.extract("lp__", permuted=True)["lp__"]
lp__ = pd.DataFrame(data=lp__)
lp__.to_csv("lp__")
occupat = fit.extract("occupat", permuted=True)["occupat"]
try:
latent_time = fit.extract("LATENT_TIME", permuted = True)["LATENT_TIME"]
np.save("latent_time", np.array(latent_time))
except:
print("LATENT TIME doesn t exist")
try:
latent_time_decay = fit.extract("LATENT_TIME_DECAY", permuted=True)["LATENT_TIME_DECAY"]
np.save("latent_time_decay", np.array(latent_time_decay))
except:
print("LATENT TIME doesn t exist")
try:
occupat_dec = fit.extract("occupat_decay", permuted=True)["occupat_decay"]
np.save("occupat_dec2", np.array(occupat_dec))
except:
print("occupat_decay doesn t exist")
#mu = fit.extract("mu", permuted = True)["mu"]
#np.save("mu", np.array(mu))
np.save("equi_values2", np.array(equi_values))
np.save("occupat2",np.array(occupat))
log_lik_t = fit.extract("log_lik_t", permuted=True)["log_lik_t"]
log_lik_h = fit.extract("log_lik_h", permuted=True)["log_lik_h"]
np.save("log_lik_t2", np.array(log_lik_t))
np.save("log_lik_h2", np.array(log_lik_h))
print(occupat)
column_names = list()
N_free_param = 3
for id in range(1,N_free_param):
column_names.append("theta["+str(id)+"]")
try:
theta = pd.DataFrame(data = theta["rates"], columns =column_names)
theta.to_csv("test")
except:
print("could not save theta")
for id in range(1,N_free_param):
column_names.append("rates["+str(id)+"]")
try:
ratio = pd.DataFrame(data=ratio["ratio"])
except:
print("ratio data frame ratio")
try:
ratio.to_csv("ratio")
except:
print("could not save")
def save_data_fluorescenc(fit, data_start, data_dec, N_free_param, execution_time, chains, sampling_iter, seed):
#if not os.path.exists(folder):
# os.makedirs(folder)
#os.chdir(folder)
try:
LogLikeLihood = fit.extract("LogLikeLihood", permuted=True)["LogLikeLihood"]
np.save("marginalLikelihood", np.array(LogLikeLihood))
except:
pass
try:
stepsize = fit.get_stepsize()[0]
print("step size"+str(stepsize))
# by default .get_inv_metric returns a list
inv_metric = fit.get_inv_metric(as_dict=True)[0]
init_last_pos = fit.get_last_position()[0]
last = pd.DataFrame.from_dict(init_last_pos, orient='index')
pd.to_pickle(last, "last_param_position")
np.save("inv_metric_sampler", inv_metric)
np.save("seed", seed)
np.save("setp_size", stepsize)
except:
print("could not save control params")
pass
print("saving in: "+os.getcwd())
np.save("data_start", data_start)
np.save("data_dec", data_dec)
exec_time = pd.DataFrame({"exec_time": execution_time,"chains": chains, "samplin_iter":sampling_iter}, index = [0])
exec_time.to_csv("execution_time_in_seconds")
#try:
# sampling_data = pd.DataFrame(fit.extract(["rates","ratio", ], permuted=True))
#except:
# print("cannot make a pandas data frame")
#try:
# sampling_data.to_csv("sampling_daten")
#except:
# print("could not save fit_data")
try:
param_likelihood_hold= fit.extract("param_likelihood_start_hold", permuted=True)["param_likelihood_start_hold"]
print(param_likelihood_hold)
param_likelihood_hold = np.swapaxes(param_likelihood_hold, 0,1)
print("param_like.shape: " + str(param_likelihood_hold.shape))
param_hold = xr.DataArray(data=param_likelihood_hold[:, :, :, :, :],
dims=("N_conc_time_series", "samples_posterior", "signal_type",
"mean_and_correlations", "data_point"),
coords={
# "N_conc_time_series":["0.0625", "0.125", "0.25", "0.5", "1", "2","4","8","16","64"],
"signal_type": ["fluores", "current"],
"mean_and_correlations": ["mean", "corr_1", "corr_2"]})
param_hold.to_netcdf("param_likelihood_start_hold")
param_likelihood_decay_hold = np.array(
fit.extract("param_likelihood_decay_hold", permuted=True)["param_likelihood_decay_hold"])
param_likelihood_decay_hold = np.swapaxes(param_likelihood_decay_hold, 0, 1)
param_hold = xr.DataArray(data=param_likelihood_decay_hold[:, :, :, :, :],
dims=("N_conc_time_series", "samples_posterior", "signal_type",
"mean_and_correlations", "data_point"),
coords={
# "N_conc_time_series": ["0.0625", "0.125", "0.25", "0.5", "1", "2", "4", "8", "16", "64"],
"signal_type": ["fluores", "current"],
"mean_and_correlations": ["mean", "corr_1", "corr_2"]})
param_hold.to_netcdf("param_likelihood_decay_hold")
SummandsLogLikTraces_hold = fit.extract("SummandsLogLikTraces_hold", permuted=True)["SummandsLogLikTraces_hold"]
np.save("SummandsLogLikTraces_hold",SummandsLogLikTraces_hold)
except:
print("no hold out set")
try:
param_likelihood= fit.extract("param_likelihood_start", permuted=True)["param_likelihood_start"]
print(param_likelihood)
param_likelihood = np.swapaxes(param_likelihood, 0,1)
print("param_like.shape: " + str(param_likelihood.shape))
except:
pass
major_axis = list()
for i in range( 1 , 21):
major_axis.append(str(i))
try:
param = xr.DataArray(data=param_likelihood[:,:,:,:,:],
dims= ("N_conc_time_series","samples_posterior","signal_type",
"mean_and_correlations","data_point"),
coords={#"N_conc_time_series":["0.0625", "0.125", "0.25", "0.5", "1", "2","4","8","16","64"],
"signal_type": ["fluores", "current"],
"mean_and_correlations":["mean", "corr_1", "corr_2"]})
param.to_netcdf("param_likelihood_start")
param_likelihood_decay = np.array(fit.extract("param_likelihood_decay", permuted=True)["param_likelihood_decay"])
param_likelihood_decay = np.swapaxes(param_likelihood_decay, 0, 1)
param = xr.DataArray(data=param_likelihood_decay[:, :, :, :,:],
dims=("N_conc_time_series", "samples_posterior", "signal_type",
"mean_and_correlations", "data_point"),
coords={
#"N_conc_time_series": ["0.0625", "0.125", "0.25", "0.5", "1", "2", "4", "8", "16", "64"],
"signal_type": ["fluores", "current"],
"mean_and_correlations": ["mean", "corr_1", "corr_2"]})
param.to_netcdf("param_likelihood_decay")
except:
print("likelihood wasnt saved")
pass
SummandsLogLikTraces = fit.extract("SummandsLogLikTraces", permuted=True)["SummandsLogLikTraces"]
np.save("SummandsLogLikTraces",SummandsLogLikTraces)
## Splitted Kalman filter stuff
try:
param = xr.DataArray(data=param_likelihood[:,:, :, :, :, :],
dims=("N_conc_time_series","Splits", "samples_posterior", "signal_type",
"mean_and_correlations", "data_point"),
coords={
# "N_conc_time_series":["0.0625", "0.125", "0.25", "0.5", "1", "2","4","8","16","64"],
"signal_type": ["fluores", "current"],
"mean_and_correlations": ["mean", "corr_1", "corr_2"]})
param.to_netcdf("param_likelihood_start")
param_likelihood_decay = np.array(
fit.extract("param_likelihood_decay", permuted=True)["param_likelihood_decay"])
param_likelihood_decay = np.swapaxes(param_likelihood_decay, 0, 1)
param = xr.DataArray(data=param_likelihood_decay[:,:, :, :, :, :],
dims=("N_conc_time_series","Splits", "samples_posterior", "signal_type",
"mean_and_correlations", "data_point"),
coords={
# "N_conc_time_series": ["0.0625", "0.125", "0.25", "0.5", "1", "2", "4", "8", "16", "64"],
"signal_type": ["fluores", "current"],
"mean_and_correlations": ["mean", "corr_1", "corr_2"]})
param.to_netcdf("param_likelihood_decay")
except:
print("likelihood wasnt saved")
pass
try:
backround_sigma = np.array(fit.extract("var_exp", permuted=True)["var_exp"])
np.save("measurement_sigma",np.array(backround_sigma))
except:
print("could save backround noise")
try:
N_traces = fit.extract("N_ion_trace", permuted=True)["N_ion_trace"]
np.save("N_traces",np.array(N_traces))
except:
print("N_traces param to fit")
try:
lp__ = fit.extract("lp__", permuted=True)["lp__"]
lp__ = pd.DataFrame(data=lp__)
lp__.to_csv("lp__")
except:
print("lp_ saving doesn t work")
try:
OpenVar = fit.extract("OpenVar", permuted = True)["OpenVar"]
np.save("var_open", np.array(OpenVar ))
except:
print("var_open doesn t exist")
try:
latent_time = fit.extract("LATENT_TIME", permuted = True)["LATENT_TIME"]
np.save("latent_time", np.array(latent_time))
except:
print("LATENT TIME doesn t exist")
try:
latent_time_decay = fit.extract("LATENT_TIME_DECAY", permuted=True)["LATENT_TIME_DECAY"]
np.save("latent_time_decay", np.array(latent_time_decay))
except:
print("LATENT TIME doesn t exist")
try:
occupat_dec = fit.extract("occupat_decay", permuted=True)["occupat_decay"]
np.save("occupat_dec2", np.array(occupat_dec))
except:
print("occupat_decay doesn t exist")
#mu = fit.extract("mu", permuted = True)["mu"]
#np.save("mu", np.array(mu))
try:
equi_values = fit.extract("equi_values", permuted=True)["equi_values"]
np.save("equi_values2", np.array(equi_values))
except:
print("could not open equi_values")
try:
occupat = fit.extract("occupat", permuted=True)["occupat"]
print(occupat)
np.save("occupat2",np.array(occupat))
except:
print("could not save occupat")
try:
log_lik_t = fit.extract("log_lik_t", permuted=True)["log_lik_t"]
np.save("log_lik_t2", np.array(log_lik_t))
except:
print("could not save log_lik_t")
try:
log_lik_h = fit.extract("log_lik_h", permuted=True)["log_lik_h"]
np.save("log_lik_h2", np.array(log_lik_h))
except:
print("cold not save log_lik_h")
column_names = list()
for id in range(1,np.int(N_free_param/2+1)):
column_names.append("theta["+str(id)+"]")
try:
i_single =fit.extract("i_single_channel", permuted = True)["i_single_channel"]
np.save("i_single", np.array(i_single))
except:
print("i_single problems")
theta = fit.extract("rates", permuted=True)
theta = pd.DataFrame(data = theta["rates"], columns =column_names)
theta.to_csv("test")
for id in range(1,np.int(N_free_param/2+1)):
column_names.append("rates["+str(id)+"]")
try:
ratio = fit.extract("ratio", permuted=True)
ratio = pd.DataFrame(data=ratio["ratio"])
except:
print("ratio to data frame ratio did not work")
try:
ratio.to_csv("ratio")
except:
print("could not save ratio")
try:
lamb = fit.extract("lambda_fluoresc", permuted=True)
lamb = pd.DataFrame(data=lamb["lambda_fluoresc"])
except:
print("ratio to data frame ratio did not work")
try:
lamb.to_csv("lambda_fluoresc")
except:
print("could not save ratio")
try:
var_fluoresc = fit.extract("var_fluoresc", permuted=True)
var_fluoresc = pd.DataFrame(data=var_fluoresc["var_fluoresc"])
var_fluoresc.to_csv("var_fluoresc")
except Exception as e:
print(e)
print("could not save var_fluorescs")
def save_data_new(fit, data_start, data_dec,dataStartHold, dataDecHold, N_free_param, execution_time,seed):
try:
stepsize = fit.get_stepsize()
print("step size" + str(stepsize))[0]
# by default .get_inv_metric returns a list
inv_metric = fit.get_inv_metric(as_dict=True)[0]
init = fit.get_last_position()[0]
# increment seed by 1
control = {"stepsize": stepsize,
"inv_metric": inv_metric,
"adapt_engaged": False
}
np.save("inv_metric_sampler", inv_metric)
np.save("last_param_position", init)
np.save("seed", seed)
np.save("setp_size", stepsize)
except:
print("could not save control params")
pass
# if not os.path.exists(folder):
# os.makedirs(folder)
# os.chdir(folder)
print("saving in: " + os.getcwd())
np.save("data_start", data_start)
np.save("data_dec", data_dec)
np.save("data_start_hold", dataStartHold)
np.save("data_dec_hold", dataDecHold)
exec_time = np.array(execution_time)
np.save("execution_time_in_seconds", exec_time)
try:
sampling_data = pd.DataFrame(fit.extract(["rates", "ratio", ], permuted=True))
except:
print("cannot make a pandas data frame")
try:
sampling_data.to_csv("sampling_daten")
except:
print("could not save fit_data")
for name in ("param_likelihood_start","ParamLikeliStartHoldout"):
try:
param_likelihood = np.array(fit.extract(name, permuted=True)[name])
param_likelihood = np.swapaxes(param_likelihood, 0, 1)
print("param_like.shape: "+ param_likelihood.shape)
except:
print("param likihood existiert nicht")
try:
major_axis = list()
for i in range(1, 21):
major_axis.append(str(i))
param = xr.DataArray(data=param_likelihood[:, :, :, :],
dims=("N_conc_time_series", "samples_posterior", "data_point", "parameter_likelihood"),
coords={
"N_conc_time_series": ["0.0625", "0.125", "0.25", "0.5", "1", "2", "4", "8", "16",
"64"],
"parameter_likelihood": ["mean", "sigma"]})
param.to_netcdf(name)
except:
print("could not save likelihood")
for fname in ("param_likelihood_decay", "ParamLikeliDecayHoldout"):
try:
param_likelihood_decay = np.array(
fit.extract(fname, permuted=True)[fname])
param_likelihood_decay = np.swapaxes(param_likelihood_decay, 0, 1)
param = xr.DataArray(data=param_likelihood_decay[:, :, :, :],
dims=("N_conc_time_series", "samples_posterior", "data_point", "parameter_likelihood"),
coords={
"N_conc_time_series": ["0.0625", "0.125", "0.25", "0.5", "1", "2", "4", "8", "16",
"64"],
"parameter_likelihood": ["mean", "sigma"]})
param.to_netcdf(fname)
except:
print("could not save likelihood")
try:
backround_sigma = np.array(fit.extract("var_exp", permuted=True)["var_exp"])
np.save("measurement_sigma", np.array(backround_sigma))
except:
print("could save backround noise")
try:
N_traces = fit.extract("N_ion_trace", permuted=True)["N_ion_trace"]
np.save("N_traces", np.array(N_traces))
except:
print("N_traces param to fit")
try:
hyper_mu_N = fit.extract("hyper_mu_N", permuted=True)["hyper_mu_N"]
sigma_N = fit.extract("sigma_N", permuted=True)["sigma_N"]
np.save("hyper_mu_N", hyper_mu_N)
np.save("sigma_N", sigma_N)
except:
pass
try:
mu_i = fit.extract("mu_i", permuted=True)["mu_i"]
sigma_i = fit.extract("sigma_i", permuted=True)["sigma_i"]
np.save("mu_i", mu_i)
np.save("sigma_i", sigma_i)
except:
pass
try:
N_traces = fit.extract("mu_N", permuted=True)["mu_N"]
np.save("mu_N", np.array(N_traces))
except:
print("mu_N param to fit")
try:
N_traces = fit.extract("var_N", permuted=True)["var_N"]
np.save("var_N", np.array(N_traces))
except:
print("var_N param to fit")
try:
mu_k = fit.extract("mu_k", permuted=True)["mu_k"]
np.save("mu_k", np.array(mu_k))
sigma_k = fit.extract("sigma_k", permuted=True)["sigma_k"]
np.save("sigma_k", np.array(sigma_k))
except:
pass
try:
open_variance = fit.extract("open_variance", permuted=True)["open_variance"]
np.save("open_variance", np.array(open_variance))
except:
print("could not save open_variance param to fit")
try:
lp__ = fit.extract("lp__", permuted=True)["lp__"]
lp__ = pd.DataFrame(data=lp__)
lp__.to_csv("lp__")
except:
print("lp_ saving doesn t work")
try:
latent_time = fit.extract("LATENT_TIME", permuted=True)["LATENT_TIME"]
np.save("latent_time", np.array(latent_time))
except:
print("LATENT TIME doesn t exist")
try:
latent_time_decay = fit.extract("LATENT_TIME_DECAY", permuted=True)["LATENT_TIME_DECAY"]
np.save("latent_time_decay", np.array(latent_time_decay))
except:
print("LATENT TIME doesn t exist")
try:
occupat_dec = fit.extract("occupat_decay", permuted=True)["occupat_decay"]
np.save("occupat_dec2", np.array(occupat_dec))
except:
print("occupat_decay doesn t exist")
# mu = fit.extract("mu", permuted = True)["mu"]
# np.save("mu", np.array(mu))
try:
equi_values = fit.extract("equi_values", permuted=True)["equi_values"]
np.save("equi_values2", np.array(equi_values))
except:
print("could not open equi_values")
try:
occupat = fit.extract("occupat", permuted=True)["occupat"]
print(occupat)
np.save("occupat2", np.array(occupat))
except:
print("could not save occupat")
try:
log_lik_t = fit.extract("log_lik_t", permuted=True)["log_lik_t"]
np.save("log_lik_t2", np.array(log_lik_t))
except:
print("could not save log_lik_t")
try:
log_lik_h = fit.extract("logLikHoldout", permuted=True)["logLikHoldout"]
np.save("logLikHoldout", np.array(log_lik_h))
except:
print("cold not save log_lik_h")
column_names = list()
for id in range(1, np.int(N_free_param / 2 + 1)):
column_names.append("theta[" + str(id) + "]")
try:
lambda_brigthness = fit.extract("lambda_brigthness", permuted=True)["lambda_brigthness"]
np.save("lambda_brigthness", np.array(lambda_brigthness))
except:
print("lambda_brigthness")
try:
time_error = fit.extract("time_error", permuted=True)["time_error"]
np.save("time_error", np.array(time_error))
except:
print("lambda_brigthness")
theta = fit.extract("rates", permuted=True)
theta = pd.DataFrame(data=theta["rates"], columns=column_names)
theta.to_csv("test")
for id in range(1, np.int(N_free_param / 2 + 1)):
column_names.append("rates[" + str(id) + "]")
try:
ratio = fit.extract("ratio", permuted=True)
ratio = pd.DataFrame(data=ratio["ratio"])
except:
print("ratio to data frame ratio did not work")
try:
ratio.to_csv("ratio")
except:
print("could not save ratio")
def save_data_cross(fit, data_start, data_dec, N_free_param, execution_time, chains, sampling_iter):
#if not os.path.exists(folder):
# os.makedirs(folder)
#os.chdir(folder)
print("saving in: "+os.getcwd())
np.save("data_start", data_start)
np.save("data_dec", data_dec)
exec_time = np.array([execution_time, chains, sampling_iter])
np.save("execution_time_in_seconds", exec_time)
try:
sampling_data = pd.DataFrame(fit.extract(["rates","ratio", ], permuted=True))
except:
print("cannot make a pandas data frame")
try:
sampling_data.to_csv("sampling_daten")
except:
print("could not save fit_data")
try:
param_likelihood= np.array(fit.extract("param_likelihood_start", permuted=True)["param_likelihood_start"])
param_likelihood = np.swapaxes(param_likelihood, 0,1)
print(param_likelihood.shape)
except:
print("param likihood existiert nicht")
try:
major_axis = list()
for i in range( 1 , 21):
major_axis.append(str(i))
param = xr.DataArray(data=param_likelihood[:,:,:,:],
dims= ("N_conc_time_series","samples_posterior","data_point","parameter_likelihood"),
coords={"N_conc_time_series":["0.0625", "0.125", "0.25", "0.5", "1", "2","4","8","16","64"],
"parameter_likelihood": ["mean", "sigma"]})
param.to_netcdf("param_likelihood_start")
param_likelihood_decay = np.array(fit.extract("param_likelihood_decay", permuted=True)["param_likelihood_decay"])
param_likelihood_decay = np.swapaxes(param_likelihood_decay, 0, 1)
param = xr.DataArray(data=param_likelihood_decay[:, :, :, :],
dims=("N_conc_time_series", "samples_posterior", "data_point", "parameter_likelihood"),
coords={
"N_conc_time_series": ["0.0625", "0.125", "0.25", "0.5", "1", "2", "4", "8", "16", "64"],
"parameter_likelihood": ["mean", "sigma"]})
param.to_netcdf("param_likelihood_decay")
except:
print("could not save likelihood")
try:
backround_sigma = np.array(fit.extract("var_exp", permuted=True)["var_exp"])
np.save("measurement_sigma",np.array(backround_sigma))
except:
print("could save backround noise")
try:
N_traces = fit.extract("N_ion_trace", permuted=True)["N_ion_trace"]
np.save("N_traces",np.array(N_traces))
except:
print("N_traces param to fit")
try:
lp__ = fit.extract("lp__", permuted=True)["lp__"]
lp__ = pd.DataFrame(data=lp__)
lp__.to_csv("lp__")
except:
print("lp_ saving doesn t work")
try:
latent_time = fit.extract("LATENT_TIME", permuted = True)["LATENT_TIME"]
np.save("latent_time", np.array(latent_time))
except:
print("LATENT TIME doesn t exist")
try:
latent_time_decay = fit.extract("LATENT_TIME_DECAY", permuted=True)["LATENT_TIME_DECAY"]
np.save("latent_time_decay", np.array(latent_time_decay))
except:
print("LATENT TIME doesn t exist")
try:
occupat_dec = fit.extract("occupat_decay", permuted=True)["occupat_decay"]
np.save("occupat_dec2", np.array(occupat_dec))
except:
print("occupat_decay doesn t exist")
#mu = fit.extract("mu", permuted = True)["mu"]
#np.save("mu", np.array(mu))
try:
equi_values = fit.extract("equi_values", permuted=True)["equi_values"]
np.save("equi_values2", np.array(equi_values))
except:
print("could not open equi_values")
try:
occupat = fit.extract("occupat", permuted=True)["occupat"]
print(occupat)
np.save("occupat2",np.array(occupat))
except:
print("could not save occupat")
try:
log_lik_t = fit.extract("log_lik_t", permuted=True)["log_lik_t"]
np.save("log_lik_t", np.array(log_lik_t)[::4,:,::])
except Exception as e:
print(str(e))
print("could not save log_lik_t")
log_lik_h = fit.extract("logLikeHold", permuted=True)["logLikeHold"]
np.save("log_lik_h" , np.array(log_lik_h)[::4,:,::])
#except Exception as e:
# print(str(e))
# print("cold not save log_lik_h")
column_names = list()
for id in range(1,np.int(N_free_param/2+1)):
column_names.append("theta["+str(id)+"]")
try:
i_single =fit.extract("i_single_channel", permuted = True)["i_single_channel"]
np.save("i_single", np.array(i_single))
except:
print("i_single problems")
theta = fit.extract("rates", permuted=True)
theta = pd.DataFrame(data = theta["rates"], columns =column_names)
theta.to_csv("test")
for id in range(1,np.int(N_free_param/2+1)):
column_names.append("rates["+str(id)+"]")
try:
ratio = fit.extract("ratio", permuted=True)
ratio = pd.DataFrame(data=ratio["ratio"])
except:
print("ratio to data frame ratio did not work")
try:
ratio.to_csv("ratio")
except:
print("could not save ratio")
try:
lamb = fit.extract("lambda_fluoresc", permuted=True)
lamb = pd.DataFrame(data=lamb["lambda_fluoresc"])
except:
print("ratio to data frame ratio did not work")
try:
lamb.to_csv("lambda_fluoresc")
except:
print("could not save ratio")
def main():
save_data(bla)
if __name__ == "__main__":
main()
|
from django.utils.translation import ugettext_lazy as _
from wagtail.core import blocks
from wagtail.images.blocks import ImageChooserBlock
__all__ = ['Carousel', 'Parallax']
class Carousel(blocks.ListBlock):
"""Carousel ('div' tag) to cycle through different images."""
def __init__(self, child_block=None, **kwargs):
if child_block is None:
child_block = ImageChooserBlock()
super().__init__(child_block, **kwargs)
class Meta:
label = _('Carousel')
icon = 'image'
template = 'wagtail_materializecss/javascript/carousel.html'
class Parallax(blocks.StructBlock):
"""Multiple parallaxes on a page will make a fun scroll effect."""
image = ImageChooserBlock(required=True)
class Meta:
label = _('Parallax')
icon = 'image'
template = 'wagtail_materializecss/javascript/parallax.html'
|
import aria2p
import logging
import sys
from ravager.config import LOGS_DIR, LOG_LEVEL
logging.basicConfig(format='"%(asctime)s — %(name)s — %(levelname)s — %(funcName)s:%(lineno)d — %(message)s"',
level=LOG_LEVEL,
handlers=[logging.FileHandler("{}/ravager.log".format(LOGS_DIR)),
logging.StreamHandler(sys.stdout)])
logger = logging.getLogger(__file__)
aria2c = aria2p.Client(
host="http://localhost",
port=6801,
secret="qwerty"
)
aria2 = aria2p.API(aria2c)
|
from datetime import datetime
import sys
import re
from urlparse import urljoin
import iso8601
from lxml import etree
from jparser import PageModel
from ocd_backend.extractors import HttpRequestMixin
from ocd_backend.items import BaseItem
from ocd_backend.utils.misc import html_cleanup, html_cleanup_with_structure
from ocd_backend.utils.voc import VocabularyMixin
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.common.exceptions import WebDriverException
class FeedItem(BaseItem, VocabularyMixin):
def get_original_object_id(self):
return unicode(self.original_item['link'])
def get_original_object_urls(self):
return {
'html': self.original_item['link']
}
def get_rights(self):
return unicode(self.original_item.get('rights', 'Undefined'))
def get_collection(self):
return unicode(self.source_definition.get('collection', 'Unknown'))
def get_combined_index_data(self):
combined_index_data = {
'hidden': self.source_definition['hidden'],
'source': unicode(
self.source_definition.get('source', 'Partij nieuws')),
'type': unicode(self.source_definition.get('type', 'Partij')),
'parties': [unicode(self.source_definition['collection'])]
}
# TODO: provide easier way for default mapping
mappings = {
'summary': 'description'
}
mappings.update(self.source_definition.get('mappings', {}))
for fld in ['title', 'summary']:
if self.original_item.get(fld, None) is not None:
mapping_fld = mappings.get(fld, fld)
combined_index_data[mapping_fld] = unicode(self.original_item[fld])
# try to get the full content, if available
try:
combined_index_data['description'] = unicode(self.original_item[
'content'][0]['value'])
except LookupError:
pass
try:
combined_index_data['date'] = iso8601.parse_date(
self.original_item['published_parsed'])
except LookupError:
pass
if self.source_definition.get('location', None) is not None:
combined_index_data['location'] = unicode(self.source_definition[
'location'].decode('utf-8'))
combined_index_data['date_granularity'] = 12
return combined_index_data
def get_index_data(self):
return {}
def get_all_text(self):
text_items = []
return u' '.join(text_items)
class FeedFullTextItem(FeedItem, HttpRequestMixin):
def get_combined_index_data(self):
combined_index_data = super(
FeedFullTextItem, self).get_combined_index_data()
r = self.http_session.get(self.original_item['link'])
print >>sys.stderr, "Got %s with status code : %s" % (
self.original_item['link'], r.status_code)
# only continue if we got the page
if r.status_code < 200 or r.status_code >= 300:
return combined_index_data
try:
html = etree.HTML(r.content)
except etree.ElementTree.ParseError as e:
return combined_index_data
output = u''
for elem in html.xpath(self.source_definition['content_xpath']):
output += unicode(etree.tostring(elem))
if output.strip() != u'':
combined_index_data['description'] = output
return combined_index_data
class FeedContentFromPageItem(FeedItem, HttpRequestMixin):
def get_combined_index_data(self):
combined_index_data = super(
FeedContentFromPageItem, self).get_combined_index_data()
if re.match(r'^https?\:\/\/', self.original_item['link']):
page_link = self.original_item['link']
else:
page_link = urljoin(self.source_definition['file_url'], self.original_item['link'])
r = self.http_session.get(page_link, timeout=5)
print >>sys.stderr, "Got %s with status code : %s" % (
self.original_item['link'], r.status_code)
# only continue if we got the page
if r.status_code < 200 or r.status_code >= 300:
return combined_index_data
try:
full_content = r.content
except etree.ElementTree.ParseError as e:
return combined_index_data
# TODO: Fix byte 0xff problem: 'utf8' codec can't decode byte 0xff in position <x>: invalid start byte
# TODO: Fix Unicode strings with encoding declaration are not supported. Please use bytes input or XML fragments without declaration.
# TODO: remove things like: Share on Facebook Share Share on Twitter Tweet Share on Pinterest Share Share on LinkedIn Share Send email Mail Print Print
try:
cleaned = PageModel(full_content.decode(r.encoding)).extract()
except Exception as e:
print >>sys.stderr, e
cleaned = {}
output = u''
for elem in cleaned.get('content', []):
if elem['type'] == 'text':
# if it starts with these words it's probably garbage
if re.match('^\s*(Share|Deel|Delen|Send|Print)\s*', elem['data']) is None:
output += '<p>%s</p>' % (elem['data'],)
if elem['type'] == 'image':
output += '<img src="%s" />' % (elem['data']['src'],)
if output.strip() != u'':
combined_index_data['description'] = unicode(output)
return combined_index_data
|
import pandas as pd
import cv2
import torch
import torch.optim as optim
import numpy as np
from vel.rl.metrics import EpisodeRewardMetric
from vel.storage.streaming.stdout import StdoutStreaming
from vel.util.random import set_seed
from vel.rl.models.policy_gradient_model import PolicyGradientModelFactory, PolicyGradientModel
from vel.rl.models.backbone.nature_cnn_two_tower import NatureCnnTwoTowerFactory
from vel.rl.models.deterministic_policy_model import DeterministicPolicyModel
from vel.rl.reinforcers.on_policy_iteration_reinforcer import OnPolicyIterationReinforcer, OnPolicyIterationReinforcerSettings
from vel.schedules.linear import LinearSchedule
from vel.rl.algo.policy_gradient.ppo import PpoPolicyGradient
from vel.rl.env_roller.vec.step_env_roller import StepEnvRoller
from vel.api.info import TrainingInfo, EpochInfo
from vel.rl.commands.rl_train_command import FrameTracker
from vel.openai.baselines.common.vec_env.dummy_vec_env import DummyVecEnv
from bc_gym_planning_env.envs.synth_turn_env import ColoredEgoCostmapRandomAisleTurnEnv
from bc_gym_planning_env.envs.base.action import Action
def train_model():
"""a sample training script, that creates a PPO instance and train it with bc-gym environment
:return: None
"""
device = torch.device('cpu')
seed = 1001
# Set random seed in python std lib, numpy and pytorch
set_seed(seed)
env_function = lambda: ColoredEgoCostmapRandomAisleTurnEnv()
vec_env = DummyVecEnv([env_function])
# Again, use a helper to create a model
# But because model is owned by the reinforcer, model should not be accessed using this variable
# but from reinforcer.model property
model = PolicyGradientModelFactory(
backbone=NatureCnnTwoTowerFactory(input_width=133, input_height=117, input_channels=1)
).instantiate(action_space=vec_env.action_space)
# Set schedule for gradient clipping.
cliprange = LinearSchedule(
initial_value=0.01,
final_value=0.0
)
# Reinforcer - an object managing the learning process
reinforcer = OnPolicyIterationReinforcer(
device=device,
settings=OnPolicyIterationReinforcerSettings(
discount_factor=0.99,
batch_size=256,
experience_replay=4
),
model=model,
algo=PpoPolicyGradient(
entropy_coefficient=0.01,
value_coefficient=0.5,
max_grad_norm=0.02,
cliprange=cliprange
),
env_roller=StepEnvRoller(
environment=vec_env,
device=device,
gae_lambda=0.95,
number_of_steps=128,
discount_factor=0.99,
)
)
# Model optimizer
optimizer = optim.Adam(reinforcer.model.parameters(), lr=5e-6, eps=1.0e-5)
# Overall information store for training information
training_info = TrainingInfo(
metrics=[
EpisodeRewardMetric('episode_rewards'), # Calculate average reward from episode
],
callbacks=[
StdoutStreaming(), # Print live metrics every epoch to standard output
FrameTracker(1.1e8) # We need frame tracker to track the progress of learning
]
)
# A bit of training initialization bookkeeping...
training_info.initialize()
reinforcer.initialize_training(training_info)
training_info.on_train_begin()
# Let's make 10 batches per epoch to average metrics nicely
# Rollout size is 8 environments times 128 steps
num_epochs = int(1.1e8 / (128 * 1) / 10)
# Normal handrolled training loop
for i in range(1, num_epochs+1):
epoch_info = EpochInfo(
training_info=training_info,
global_epoch_idx=i,
batches_per_epoch=10,
optimizer=optimizer
)
reinforcer.train_epoch(epoch_info)
if i % 1000 == 0:
torch.save(model.state_dict(), 'tmp_checkout.data')
evaluate_model(model, vec_env, device, takes=1)
training_info.on_train_end()
def evaluate_model(model, env, device, takes=1, debug=False):
"""evaluate the performance of a rl model with a given environment
:param model: a trained rl model
:param env: environment
:param device: cpu or gpu
:param takes: number of trials/rollout
:param debug: record a video in debug mode
:return: None
"""
model.eval()
rewards = []
lengths = []
frames = []
for i in range(takes):
result = record_take(model, env, device)
rewards.append(result['r'])
lengths.append(result['l'])
frames.append(result['frames'])
if debug:
save_as_video(frames)
print(pd.DataFrame({'lengths': lengths, 'rewards': rewards}).describe())
model.train(mode=True)
@torch.no_grad()
def record_take(model, env_instance, device, debug=False):
"""run one rollout of the rl model with the environment, until done is true
:param model: rl policy model
:param env_instance: an instance of the environment to be evaluated
:param device: cpu or gpu
:param debug: debug mode has gui output
:return: some basic metric info of this rollout
"""
frames = []
steps = 0
rewards = 0
observation = env_instance.reset()
print("Evaluating environment...")
while True:
observation_tensor = _dict_to_tensor(observation, device)
if isinstance(model, PolicyGradientModel):
actions = model.step(observation_tensor, argmax_sampling=False)['actions'].to(device)[0]
elif isinstance(model, DeterministicPolicyModel):
actions = model.step(observation_tensor)['actions'].to(device)[0]
else:
raise NotImplementedError
action_class = Action(command=actions.cpu().numpy())
observation, reward, done, epinfo = env_instance.step(action_class)
steps += 1
rewards += reward
if debug or device.type == 'cpu':
frames.append(env_instance.render(mode='human'))
if done:
print("episode reward: {}, steps: {}".format(rewards, steps))
return {'r': rewards, 'l': steps, 'frames': frames}
def _dict_to_tensor(numpy_array_dict, device):
"""Convert numpy array to a tensor
:param numpy_array_dict dict: a dictionary of np.array
:param device: put tensor on cpu or gpu
:return: a dictionary of torch tensors
"""
if isinstance(numpy_array_dict, dict):
torch_dict = {}
for k, v in numpy_array_dict.items():
torch_dict[k] = torch.from_numpy(numpy_array_dict[k]).to(device)
return torch_dict
else:
return torch.from_numpy(numpy_array_dict).to(device)
def eval_model():
"""load a checkpoint data and evaluate its performance
:return: None
"""
device = torch.device('cpu')
seed = 1001
# Set random seed in python std lib, numpy and pytorch
set_seed(seed)
env_function = lambda: ColoredEgoCostmapRandomAisleTurnEnv()
vec_env = DummyVecEnv([env_function])
vec_env.reset()
model = PolicyGradientModelFactory(
backbone=NatureCnnTwoTowerFactory(input_width=133, input_height=117, input_channels=1)
).instantiate(action_space=vec_env.action_space)
model_checkpoint = torch.load('tmp_checkout.data', map_location='cpu')
model.load_state_dict(model_checkpoint)
evaluate_model(model, vec_env, device, takes=10)
def save_as_video(frames):
"""function to record a demo video
:param frames list[np.array]: a list of images
:return: None, video saved as a file
"""
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'XVID')
video_shape = (400, 600)
out = cv2.VideoWriter('output.avi', fourcc, 400.0, video_shape)
for trial in frames:
for frame in trial:
frame = frame[0]
frame = cv2.resize(frame, video_shape)
# write the flipped frame
out.write(frame)
cv2.imshow('frame', frame)
cv2.waitKey(1)
# Release everything if job is finished
out.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
train_model()
eval_model()
|
# -*- coding: utf-8 -*-
"""
A program to describe the treatment and recovery process for an incident Stage I oral cancer.
Entities with a diagnosed stage I cancer will be treated either surgically (with/without RT),
with some other treatment type (usually Chemo+RT), or may receive no treatment. Their
post-treatment survival is calculated based on treatment type, stage, and other individual
characteristics.
After recovery (from surgery, chemo, and/or RT), entities are routed to the follow-up
surveillance model component. Entities with less than three months survival are routed to
the "End of Life" component.
Time of recurrence and death of disease (without further treatment) are set here. As with
"SysP_Followup", the base case of the model is using the date of DIAGNOSIS as the starting
point, rather than the date cancer is developed. This is consistent with the use of
retrospective survival data from diagnosed patients.
@author: icromwell
"""
import random
from Glb_CompTime import CompTime
class StageOneTx:
def __init__(self, estimates, regcoeffs):
self._estimates = estimates
self._regcoeffs = regcoeffs
self.tx_time_treatment = estimates.Tx_time_treatment.sample()
self.prob_other_RT = estimates.Tx_other_RT.sample()
self.prob_other_chemo = estimates.Tx_other_chemo.sample()
def Process(self, entity):
start = entity.allTime
entity.time_Sysp = entity.allTime
# Schedule next event
# Generate random time to event - either recurrence or death
makeEvent = CompTime(self._estimates, self._regcoeffs)
nextEvent = makeEvent.Process(entity, 'FirstEvent', 'FirstEvent_death')
if nextEvent[0] < 3650: # Entity experiences some event between 3 months and 10 years
entity.utility.append(("Stage I Cancer Under Treatment", self._estimates.Util_StageI_Tx.sample(), entity.allTime))
"Schedule next event"
if nextEvent[1] == 1: #Event is recurrence
entity.time_Recurrence = start + nextEvent[0]
entity.time_DeadofDisease = 99999
elif nextEvent[1] == 2: #Event is death
entity.time_Recurrence = 99999
entity.time_DeadofDisease = start + nextEvent[0]
"""If death or recurrence occurs before 3 months, schedule at that time.
Otherwise, follow-up starts at 3 months"""
if nextEvent[0] < 90:
if nextEvent[1] == 1: #Entity dies before 90 days
entity.statenum = 5.0 # Entity is in EoL care state
entity.currentState = "Terminal Disease"
entity.endOfLife = 1
entity.time_Sysp += nextEvent[0]
else:
entity.time_Sysp += self.tx_time_treatment
entity.stateNum = 4.0
entity.currentState = "Post-treatment follow-up"
else: # Entity does not experience another disease event
entity.time_DeadofDisease = 99999
entity.time_Recurrence = 99999
entity.time_Sysp += self.tx_time_treatment
entity.stateNum = 4.0
entity.currentState = "Post-treatment follow-up"
# Resource utilization according to treatment type
if entity.tx_prim == 'Surgery':
entity.hadSurgery = 1
entity.surgery = 1
entity.resources.append(("Treatment - Stage I - Surgery", entity.allTime))
entity.events.append(("Treatment - Stage I - Surgery", entity.allTime))
elif entity.tx_prim == 'SurgeryRT':
entity.hadSurgery = 1
entity.resources.append(("Treatment - Stage I - Surgery + RT", entity.allTime))
entity.events.append(("Treatment - Stage I - Surgery + RT", entity.allTime))
entity.hadRT = 1
entity.RTCount += 1
elif entity.tx_prim == 'Other':
probRT = random.random()
probChemo = random.random()
entity.resources.append(("Treatment - Stage I - Other", entity.allTime))
entity.events.append(("Treatment - Stage I - Other", entity.allTime))
if probRT < self.prob_other_RT:
entity.hadRT = 1
entity.RTCount += 1
if probChemo < self.prob_other_chemo:
entity.chemoCount += 1
entity.hadChemo = 1
else:
entity.stateNum = 99
entity.currentState = "Error: Entity has not been assigned a valid treatment"
|
#
# @lc app=leetcode id=205 lang=python3
#
# [205] Isomorphic Strings
#
# @lc code=start
class Solution:
def isIsomorphic(self, s: str, t: str) -> bool:
ss, tt = {}, {}
for i in range(len(s)): # map s to t ,map t to s
if s[i] not in ss:
ss[s[i]] = t[i]
elif ss[s[i]] is not t[i]:
return False
if t[i] not in tt:
tt[t[i]] = s[i]
elif tt[t[i]] is not s[i]:
return False
return True
# @lc code=end
# Accepted
# 30/30 cases passed(36 ms)
# Your runtime beats 96.83 % of python3 submissions
# Your memory usage beats 100 % of python3 submissions(12.9 MB)
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import urllib.request
import json
from cookbook.util.logging import LoggerFactory
logger = LoggerFactory.getLogger(__name__)
def title():
logger.debug('# Http')
def cook():
host = 'weather.livedoor.com'
api = '/forecast/webservice/json/v1?city=130010'
## HTTPリクエスト
with urllib.request.urlopen('http://' + host + api) as res:
## HTTPレスポンス解析
data = json.loads(res.read().decode('utf-8'))
location = data['location']
logger.debug("%s, %s" % (location[''], location['city']))
for forecast in data['forecasts']:
logger.debug("%s[%s]: %s" % (forecast['dateLabel'], forecast['date'], forecast['telop']))
if __name__ == '__main__':
title()
cook()
|
#!/usr/bin/env python
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
import click
from functools import partial
import gzip
from multiprocessing import Pool
import os
import pandas as pd
from pybedtools import BedTool
from pybedtools.helpers import cleanup
import re
from tqdm import tqdm
from urllib.request import urlretrieve
bar_format = "{percentage:3.0f}%|{bar:20}{r_bar}"
# Globals
scripts_dir = os.path.dirname(os.path.realpath(__file__))
CONTEXT_SETTINGS = {
"help_option_names": ["-h", "--help"],
}
@click.command(no_args_is_help=True, context_settings=CONTEXT_SETTINGS)
@click.argument(
"input_file", type=click.Path(exists=True, resolve_path=True)
)
@click.option(
"-g", "--genome",
help="Genome.",
type=click.Choice(["hg38", "mm10"]),
required=True
)
@click.option(
"-i", "--input",
help="Input type.",
type=click.Choice(["bed", "fasta"]),
required=True
)
@click.option(
"-l", "--length",
help="Sequence length (in bp).",
type=int,
required=True
)
@click.option(
"-o", "--output-file",
help="Output file. [default: stdout]",
type=click.File(mode="w"),
default="-"
)
@click.option(
"-t", "--threads",
help="Threads to use.",
type=int,
default=1,
show_default=True
)
def main(**params):
seqs = get_background_seqs(params["input_file"], params["genome"],
params["input"], params["length"], params["threads"])
# # Get FASTA sequences
# kwargs = {"total": len(intensity_files), "bar_format": bar_format}
# pool = Pool(params["threads"])
# p = partial(__get_FASTA_sequences, no_linker=params["no_linker"],
# output_dir=params["output_dir"])
# for _ in tqdm(pool.imap(p, intensity_files), **kwargs):
# pass
def get_background_seqs(input_file, genome, input_type, length, threads=1):
# Get cis-regulatory regions (CREs)
cres = __get_CREs(genome, length)
def __get_CREs(genome, length):
# Initialize
bed_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)), ".%s-ccREs.bed" % genome
)
url = "https://api.wenglab.org/screen_v13/fdownloads/"
if genome == "hg38":
url += "GRCh38-ccREs.bed"
else:
url += "mm10-ccREs.bed"
# Download CREs
if not os.path.exists(bed_file):
urlretrieve(url, bed_file)
# BedTool
b = BedTool(bed_file)
# Get centers
centers = []
for i in b:
center = int((i.start+i.end)/2)
centers.append([i.chrom, str(center - 1), str(center)])
c = BedTool("\n".join(["\t".join(c) for c in centers]), from_string=True)
print(c)
exit(0)
def __get_FASTA_sequences(intensity_file, no_linker=False, output_dir="./"):
# Initialize
sequences = []
prefix = re.search("^(\S+).spatialDetrend_quantNorm.pbm.\S+.txt$",
os.path.split(intensity_file)[1]).group(1)
sequences_file = os.path.join(output_dir, "%s.fa.gz" % prefix)
# Intensities as pandas DataFrame
df = pd.read_csv(intensity_file, sep="\t", skiprows=1, usecols=[4, 5, 6, 7],
names=["name", "sequence", "linker_sequence", "signal"])
# Save sequences
for _, row in df.iterrows():
if no_linker:
s = Seq(row["sequence"])
else:
s = Seq(row["sequence"] + row["linker_sequence"])
record = SeqRecord(s, row["name"], description=str(row["signal"]))
sequences.append(record)
with gzip.open(sequences_file, "wt") as handle:
SeqIO.write(sequences, handle, "fasta")
if __name__ == "__main__":
main()
|
from django.contrib.auth.models import User, Group
from testapp.models import CmbUser
from rest_framework import serializers
# Java DTO 역할을 하는 직렬화 클래스
# class UserSerializer(serializers.HyperlinkedModelSerializer):
# class Meta:
# model = User
# # url 필드는 상세페이지 링크를 출력해준다.
# fields = ['url', 'username', 'email']
# class UserSerializer(serializers.Serializer):
# username = serializers.CharField(max_length=200)
# email = serializers.EmailField()
#
# def update(self, instance, validated_data):
# instance.username = validated_data.get('username', instance.username)
# instance.email = validated_data.get('email', instance.email)
# instance.save()
# return instance
#
# def create(self, validated_data):
# return User.objects.create(**validated_data)
#
#
# class GroupSerializer(serializers.HyperlinkedModelSerializer):
# class Meta:
# model = Group
# fields = ['url', 'name']
class CmbUserSerializer(serializers.Serializer):
username = serializers.CharField(max_length=63)
country = serializers.CharField(max_length=63)
def update(self, instance, validated_data):
pass
def create(self, validated_data):
return CmbUser.objects.create(**validated_data)
|
# Baseball Practice
# https://web.archive.org/web/20000118165317/http://geocities.com/SoHo/Gallery/6446/bball.htm
name = "Baseball"
duration = 200
frames = [
" \n" +
" / \n" +
" / \n" +
" <<O O \n" +
" | o/|> \n" +
" /| | \n" +
" / | | \n" +
"jgs^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^",
" \n" +
" / \n" +
" / \n" +
" <<O O \n" +
" \\ o/\\ \n" +
" /\\ / \n" +
" / / \\ \n" +
"jgs^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^",
" \n" +
" / \n" +
" / o \n" +
" <<O _O> \n" +
" \\ _\\ \n" +
" /\\ | / \n" +
" / / \\ \n" +
"jgs^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^",
" \n" +
" / \n" +
" / o \n" +
" <<O <O \n" +
" \\ _\\ \n" +
" /\\ / / \n" +
" / / \\ \n" +
"jgs^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^",
" \n" +
" / \n" +
" / o \n" +
" <<O \\O__ \n" +
" \\ \\ \n" +
" /\\ /| \n" +
" / / | \\ \n" +
"jgs^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^",
" \n" +
" / \n" +
" / \n" +
" <<O o _O \n" +
" \\ /\\ \n" +
" /\\ /| \n" +
" / / | \\ \n" +
"jgs^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^",
" \n" +
" /\n" +
" / \n" +
" <<O o . . _O \n" +
" \\ /\\ \n" +
" /\\ /\\__ \n" +
" / / | \n" +
"jgs^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^",
" \n" +
" \\ \n" +
" \\ \n" +
" <<O o . . . . \n" +
" \\ __O__ \n" +
" /\\ / /\\/ \n" +
" / / | \n" +
"jgs^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^",
" \n" +
" \n" +
" \n" +
" ___,_O . . . . . O \n" +
" \\ o ' ' /\\ \n" +
" |\\ /\\ \n" +
" / \\ | \\ \n" +
"jgs^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^",
" \n" +
" \n" +
" \n" +
" O . . . . . O_ \n" +
" \\\\____o ' ' /\\ \n" +
" |\\ /| \n" +
" / \\ | \\ \n" +
"jgs^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^",
" \n" +
" / o \n" +
" / . \n" +
" O>> . O_ \n" +
" \\ /\\ \n" +
" |\\ /| \n" +
" / \\ || \n" +
"jgs^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^",
" . ' ' o \n" +
" / . \n" +
" / . \n" +
" O>> . O\n" +
" \\ /|\\ \n" +
" |\\ /| \n" +
" / \\ || \n" +
"jgs^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^",
" . ' ' ' .\n" +
" . ' o\n" +
" . \n" +
" O . _O/\n" +
" /|\\ |\n" +
" |\\| //\n" +
" / || \\\\ \n" +
"jgs^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^",
" . ' ' ' .\n" +
" . ' .\n" +
" . ' . o\n" +
" O . _O/\n" +
" /|\\ |\n" +
" |\\| //\n" +
" |/| \\\\ \n" +
"jgs^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^",
" . ' ' ' .\n" +
" . ' .\n" +
" ' .\n" +
" O _O_o\n" +
" /|\\ \\ \n" +
" | |\\ //\n" +
" | |/ \\\\ \n" +
"jgs^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^",
" ' ' .\n" +
" ' .\n" +
" ' .\n" +
" O _O\n" +
" /|> |\\o \n" +
" / |\\ /| \n" +
" / |/ \\| \n" +
"jgs^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^",
" \n" +
" .\n" +
" ' .\n" +
" O O\n" +
" /|> /|\\o \n" +
" / |\\ /| \n" +
" / |/ \\| \n" +
"jgs^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^",
" \n" +
" \n" +
" \n" +
" _O O\n" +
" / |> /|\\o \n" +
" / /| /| \n" +
" / | \\| \n" +
"jgs^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^",
" \n" +
" /\n" +
" /\n" +
" <<O O\n" +
" | o/|> \n" +
" /| /| \n" +
" / | \\| \n" +
"jgs^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^",
" \n" +
" /\n" +
" /\n" +
" <<O O\n" +
" | o/|>\n" +
" /| |\n" +
" / | |\n" +
"jgs^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^"
]
|
from .joblib.test import test_memory
from .joblib.test import test_hashing
|
from typing import Tuple
from PySide2.QtCore import Qt
from PySide2.QtWidgets import QGridLayout, QSplitter, QWidget
from ..state import GlobalState
from ..utils import is_image_file
from ..widgets import FileTagView, FileTreeView, ImageView, wrap_image
class FileTab(QWidget):
"""Combines a filesystem view, tag adding entry, tag list, and image display."""
title = 'Filesystem'
def __init__(self, global_state: GlobalState):
super().__init__()
self.global_state = global_state
layout, self._file_tree, self._tagging, self._image = self._layout()
self.setLayout(layout)
# -- Initialization
def _layout(self) -> Tuple[QGridLayout, FileTreeView, FileTagView, ImageView]:
layout = QGridLayout()
layout.setContentsMargins(5, 5, 5, 5)
splitter = QSplitter()
layout.addWidget(splitter, 0, 0, 0, 0)
left_splitter = QSplitter(Qt.Vertical)
splitter.addWidget(left_splitter)
# Top left
file_tree = FileTreeView(self._on_file_tree_selection_changed)
left_splitter.addWidget(file_tree)
# Bottom left
tagging = FileTagView(self.global_state)
left_splitter.addWidget(tagging)
# Right
image = ImageView()
splitter.addWidget(wrap_image(image))
# Initialize equal widths (needs to be set at the end)
splitter.setSizes([1000000, 1000000])
return layout, file_tree, tagging, image
def _on_file_tree_selection_changed(self, new_selection, _old_selection):
indices = new_selection.indexes()
if len(indices) == 0:
return
filepath = self._file_tree.model().filePath(indices[0])
if filepath and is_image_file(filepath):
self._tagging.load(filepath)
self._image.load(filepath)
|
from .node import Node
from .graph import Graph
from .sorter import Sorter
from .topological_sorter import TopologicalSorter
from .bfs_topological_sorter import BFSTopologicalSorter
from .dfs_topological_sorter import DFSTopologicalSorter
from .exhaustive_bfs_topological_sorter import ExhaustiveBFSTopologicalSorter
from .exhaustive_dfs_topological_sorter import ExhaustiveDFSTopologicalSorter
from .analyzer import Analyzer
from .cycle_analyzer import CycleAnalyzer
from .dominator_analyzer import DominatorAnalyzer
|
# DeepSlide
# Jason Wei, Behnaz Abdollahi, Saeed Hassanpour
# Run the resnet on generated patches.
import utils_model
from utils_model import *
#validation patches
get_predictions( patches_eval_folder = config.patches_eval_val,
auto_select = config.auto_select,
eval_model = config.eval_model,
checkpoints_folder = config.checkpoints_folder,
output_folder = config.preds_val)
#test patches
get_predictions( patches_eval_folder = config.patches_eval_test,
auto_select = config.auto_select,
eval_model = config.eval_model,
checkpoints_folder = config.checkpoints_folder,
output_folder = config.preds_test)
|
import sys
sys.path.append('../')
from datetime import date
from main import db
class Pusheen(db.Model):
__tablename__ = 'pusheen'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(120), unique=False, nullable=True)
date_of_birth = db.Column(db.Date, unique=False, nullable=True)
pusheen_fav_food = db.relationship('PusheenFavFood', backref='pusheen', lazy='dynamic')
def __repr__(self):
return '<Pusheen %r>' % self.name
def __init__(self, name, date_of_birth):
self.name = name
self.date_of_birth = date_of_birth
class Food(db.Model):
__tablename__ = 'fav_food'
id = db.Column(db.Integer, primary_key=True)
food = db.Column(db.String(120), unique=False, nullable=True)
pusheen_fav_food = db.relationship('PusheenFavFood', backref='fav_food', lazy='dynamic')
def __repr__(self):
return '<Pusheen %r>' % self.name
def __init__(self, food):
self.food = food
class PusheenFavFood(db.Model):
__tablename__ = 'pusheen_fav_food'
id = db.Column(db.Integer, primary_key=True)
pusheen_id = db.Column(db.Integer, db.ForeignKey('pusheen.id'))
fav_food_id = db.Column(db.Integer, db.ForeignKey('fav_food.id'))
|
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='amadash',
version='1.0.0',
description='Amazon Dash button monitor',
author='Igor Partola',
author_email='igor@igorpartola.com',
license='MIT',
packages=find_packages(exclude=['ez_setup', 'conf', 'tmp']),
entry_points={
'console_scripts': [
'amadash = amadash.main:main',
'amadash-discover = amadash.discover:main',
]
},
#install_requires=['pypcap'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries',
],
)
|
"""Synthesizes speech from the input string of text or ssml.
Make sure to be working in a virtual environment.
Note: ssml must be well-formed according to:
https://www.w3.org/TR/speech-synthesis/
"""
from google.cloud import texttospeech
from pydub import AudioSegment
from pydub.playback import play
def create_client():
# Instantiates a client
client = texttospeech.TextToSpeechClient()
return client
def create_speech(text, client):
# Set the text input to be synthesized
synthesis_input = texttospeech.SynthesisInput(text=text)
# Build the voice request, select the language code ("en-US") and the ssml
# voice gender ("neutral")
voice = texttospeech.VoiceSelectionParams(
language_code="en-US", ssml_gender=texttospeech.SsmlVoiceGender.NEUTRAL
)
# Select the type of audio file you want returned
audio_config = texttospeech.AudioConfig(
audio_encoding=texttospeech.AudioEncoding.MP3
)
# Perform the text-to-speech request on the text input with the selected
# voice parameters and audio file type
response = client.synthesize_speech(
input=synthesis_input, voice=voice, audio_config=audio_config
)
# The response's audio_content is binary.
with open("output.mp3", "wb") as out:
# Write the response to the output file.
out.write(response.audio_content)
print('Audio content written to file "output.mp3"')
return True
def play_speech():
song = AudioSegment.from_mp3("output.mp3")
play(song)
|
import sys
packages = []
final_cost = 0
factories, total = map(int, sys.stdin.readline().split())
for i in range(factories):
cost, boxes = map(int, sys.stdin.readline().split())
packages.append([cost, boxes])
packages.sort()
for i in packages:
if total - i[1] > 0:
total -= i[1]
final_cost += i[0]*i[1]
else:
final_cost += i[0]*(total)
break
print(final_cost)
|
import rospy
from naoqi_2d_simulator import *
if __name__ == '__main__':
rospy.init_node("naoqi_2d_simulator_node")
sim = Naoqi2DSimulator()
sim.run_main_thread()
rospy.spin()
|
import logging
from contextlib import contextmanager
from functools import lru_cache
from typing import Iterator, Optional
import sqlalchemy as sa
from sqlalchemy.orm import Session
from fastapi_auth.fastapi_util.settings.database_settings import DatabaseBackend, get_database_settings
@lru_cache()
def get_engine() -> sa.engine.Engine:
db_settings = get_database_settings()
uri = db_settings.sqlalchemy_uri
log_sqlalchemy_sql_statements = db_settings.log_sqlalchemy_sql_statements
database_backend = db_settings.backend
return get_new_engine(uri, log_sqlalchemy_sql_statements, database_backend)
@lru_cache()
def get_sessionmaker() -> sa.orm.sessionmaker:
return get_sessionmaker_for_engine(get_engine())
def get_new_engine(
uri: str,
log_sqlalchemy_sql_statements: bool = False,
database_backend: DatabaseBackend = DatabaseBackend.postgresql,
) -> sa.engine.Engine:
if log_sqlalchemy_sql_statements:
logging.getLogger("sqlalchemy.engine").setLevel(logging.INFO)
else:
logging.getLogger("sqlalchemy.engine").setLevel(logging.ERROR)
kwargs = {}
if database_backend == DatabaseBackend.sqlite:
kwargs.update({"connect_args": {"check_same_thread": False}})
return sa.create_engine(uri, pool_pre_ping=True, **kwargs)
def get_sessionmaker_for_engine(engine: sa.engine.Engine) -> sa.orm.sessionmaker:
return sa.orm.sessionmaker(autocommit=False, autoflush=False, bind=engine)
def get_session() -> sa.orm.Session:
return get_sessionmaker()()
@contextmanager
def context_session(engine: Optional[sa.engine.Engine] = None) -> Iterator[Session]:
yield from _get_db(engine)
def get_db() -> Iterator[Session]:
"""
Intended for use as a FastAPI dependency
"""
yield from _get_db()
def _get_db(engine: Optional[sa.engine.Engine] = None) -> Iterator[Session]:
if engine is None:
session = get_session()
else:
session = get_sessionmaker_for_engine(engine)()
try:
yield session
session.commit()
except Exception as exc:
session.rollback()
raise exc
finally:
session.close()
|
#!/usr/bin/python
# uuidgen.py script to generate a UUIDv4 for guests
#
#
# Any domain specific UUID rules can be added to this generic example
#
import uuid
#
print uuid.uuid4()
|
from threading import Thread
import threading
from tkinter import *
from tkinter import ttk
import datetime
from time import sleep
import peewee as pw
from src.dependencies.functions import *
|
import re
import sys
PASSWORD_RE = re.compile('(\d+)-(\d+) (\w): (\w+)')
if __name__ == '__main__':
correct_count = 0
for line in sys.stdin:
min_count, max_count, letter, password = PASSWORD_RE.match(line).groups()
min_count = int(min_count)
max_count = int(max_count)
letter_count = len(re.findall(letter, password))
if min_count <= letter_count <= max_count:
correct_count += 1
print(correct_count)
|
import re
from .exceptions import LoginRequired
__all__ = (
"CODINGAMER_HANDLE_REGEX",
"CLASH_OF_CODE_HANDLE_REGEX",
)
CODINGAMER_HANDLE_REGEX = re.compile(r"[0-9a-f]{32}[0-9]{7}")
CLASH_OF_CODE_HANDLE_REGEX = re.compile(r"[0-9]{7}[0-9a-f]{32}")
def validate_leaderboard_type(type: str) -> str:
"""Validates that the leaderboard type is one of ``"GENERAL"``,
``"CONTESTS"``, ``"BOT_PROGRAMMING"``, ``"OPTIM"`` or ``"CODEGOLF"``.
Parameters
----------
type : :class:`str`
The type to validate.
Returns
-------
:class:`str`
The valid type.
Raises
------
ValueError
The type is invalid.
"""
type = type.upper()
if type not in [
"GENERAL",
"CONTESTS",
"BOT_PROGRAMMING",
"OPTIM",
"CODEGOLF",
]:
raise ValueError(
"type argument must be one of: GENERAL, CONTESTS, "
f"BOT_PROGRAMMING, OPTIM, CODEGOLF. Got: {type}"
)
return type
def validate_leaderboard_group(group: str, logged_in: bool) -> bool:
"""Validates that the leaderboard group is one of ``"global"``,
``"country"``, ``"company"``, ``"school"`` or ``"following"`` and that the
user is logged in except for ``"global"``.
Parameters
----------
type : :class:`str`
The type to validate.
logged_in : :class:`bool`
Whether the user is logged in.
Returns
-------
:class:`str`
The valid group.
Raises
------
ValueError
The group is invalid.
"""
group = group.lower()
if group not in [
"global",
"country",
"company",
"school",
"following",
]:
raise ValueError(
"group argument must be one of: global, country, company, "
f"school, following. Got: {group}"
)
if group in ["country", "company", "school", "following"] and not logged_in:
raise LoginRequired()
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Utilities used by tests
"""
import copy
import logging
import os
import shutil
import string
import subprocess
import sys
import tempfile
from contextlib import contextmanager
from pathlib import Path
from typing import Any, Dict, Iterator, List, Optional, Union
from omegaconf import DictConfig, OmegaConf
from typing_extensions import Protocol
from hydra._internal.hydra import Hydra
from hydra._internal.utils import detect_task_name
from hydra.core.global_hydra import GlobalHydra
from hydra.core.utils import JobReturn, split_config_path
from hydra.types import TaskFunction
# CircleCI does not have the environment variable USER, breaking the tests.
os.environ["USER"] = "test_user"
log = logging.getLogger(__name__)
@contextmanager
def does_not_raise(enter_result: Any = None) -> Iterator[Any]:
yield enter_result
class TaskTestFunction:
"""
Context function
"""
def __init__(self) -> None:
self.temp_dir: Optional[str] = None
self.overrides: Optional[List[str]] = None
self.calling_file: Optional[str] = None
self.calling_module: Optional[str] = None
self.config_path: Optional[str] = None
self.config_name: Optional[str] = None
self.strict: Optional[bool] = None
self.hydra: Optional[Hydra] = None
self.job_ret: Optional[JobReturn] = None
def __call__(self, cfg: DictConfig) -> Any:
"""
Actual function being executed by Hydra
"""
return 100
def __enter__(self) -> "TaskTestFunction":
try:
config_dir, config_name = split_config_path(
self.config_path, self.config_name
)
job_name = detect_task_name(self.calling_file, self.calling_module)
self.hydra = Hydra.create_main_hydra_file_or_module(
calling_file=self.calling_file,
calling_module=self.calling_module,
config_path=config_dir,
job_name=job_name,
strict=self.strict,
)
self.temp_dir = tempfile.mkdtemp()
overrides = copy.deepcopy(self.overrides)
assert overrides is not None
overrides.append(f"hydra.run.dir={self.temp_dir}")
self.job_ret = self.hydra.run(
config_name=config_name, task_function=self, overrides=overrides
)
return self
finally:
GlobalHydra().clear()
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
# release log file handles
logging.shutdown()
assert self.temp_dir is not None
shutil.rmtree(self.temp_dir)
class TTaskRunner(Protocol):
def __call__(
self,
calling_file: Optional[str],
calling_module: Optional[str],
config_path: Optional[str],
config_name: Optional[str],
overrides: Optional[List[str]] = None,
strict: Optional[bool] = None,
) -> TaskTestFunction:
...
class SweepTaskFunction:
"""
Context function
"""
def __init__(self) -> None:
"""
if sweep_dir is None, we use a temp dir, else we will create dir with the path from sweep_dir.
"""
self.temp_dir: Optional[str] = None
self.overrides: Optional[List[str]] = None
self.calling_file: Optional[str] = None
self.calling_module: Optional[str] = None
self.task_function: Optional[TaskFunction] = None
self.config_path: Optional[str] = None
self.config_name: Optional[str] = None
self.strict: Optional[bool] = None
self.sweeps = None
self.returns = None
def __call__(self, cfg: DictConfig) -> Any:
"""
Actual function being executed by Hydra
"""
if self.task_function is not None:
return self.task_function(cfg)
return 100
def __enter__(self) -> "SweepTaskFunction":
overrides = copy.deepcopy(self.overrides)
assert overrides is not None
if self.temp_dir:
Path(self.temp_dir).mkdir(parents=True, exist_ok=True)
else:
self.temp_dir = tempfile.mkdtemp()
overrides.append(f"hydra.sweep.dir={self.temp_dir}")
try:
config_dir, config_name = split_config_path(
self.config_path, self.config_name
)
job_name = detect_task_name(self.calling_file, self.calling_module)
hydra_ = Hydra.create_main_hydra_file_or_module(
calling_file=self.calling_file,
calling_module=self.calling_module,
config_path=config_dir,
job_name=job_name,
strict=self.strict,
)
self.returns = hydra_.multirun(
config_name=config_name, task_function=self, overrides=overrides
)
finally:
GlobalHydra().clear()
return self
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
assert self.temp_dir is not None
shutil.rmtree(self.temp_dir)
class TSweepRunner(Protocol):
returns: List[List[JobReturn]]
def __call__(
self,
calling_file: Optional[str],
calling_module: Optional[str],
task_function: Optional[TaskFunction],
config_path: Optional[str],
config_name: Optional[str],
overrides: Optional[List[str]],
strict: Optional[bool] = None,
temp_dir: Optional[Path] = None,
) -> SweepTaskFunction:
...
def chdir_hydra_root() -> None:
"""
Change the cwd to the root of the hydra project.
used from unit tests to make them runnable from anywhere in the tree.
"""
_chdir_to_dir_containing(target="ATTRIBUTION")
def chdir_plugin_root() -> None:
"""
Change the cwd to the root of the plugin (location of setup.py)
"""
_chdir_to_dir_containing(target="setup.py")
def _chdir_to_dir_containing(target: str, max_up: int = 6) -> None:
cur = os.getcwd()
while not os.path.exists(os.path.join(cur, target)) and max_up > 0:
cur = os.path.relpath(os.path.join(cur, ".."))
max_up = max_up - 1
if max_up == 0:
raise IOError(f"Could not find {target} in parents of {os.getcwd()}")
os.chdir(cur)
def verify_dir_outputs(
job_return: JobReturn, overrides: Optional[List[str]] = None
) -> None:
"""
Verify that directory output makes sense
"""
assert isinstance(job_return, JobReturn)
assert job_return.working_dir is not None
assert job_return.task_name is not None
assert job_return.hydra_cfg is not None
assert os.path.exists(
os.path.join(job_return.working_dir, job_return.task_name + ".log")
)
hydra_dir = os.path.join(
job_return.working_dir, job_return.hydra_cfg.hydra.output_subdir
)
assert os.path.exists(os.path.join(hydra_dir, "config.yaml"))
assert os.path.exists(os.path.join(hydra_dir, "overrides.yaml"))
assert OmegaConf.load(
os.path.join(hydra_dir, "overrides.yaml")
) == OmegaConf.create(overrides or [])
def _get_statements(indent: str, statements: Union[None, str, List[str]]) -> str:
if isinstance(statements, str):
statements = [statements]
code = ""
if statements is None or len(statements) == 0:
code = "pass"
else:
for p in statements:
code += f"{indent}{p}\n"
return code
def integration_test(
tmpdir: Path,
task_config: DictConfig,
overrides: List[str],
prints: Union[str, List[str]],
expected_outputs: Union[str, List[str]],
prolog: Union[None, str, List[str]] = None,
filename: str = "task.py",
env_override: Dict[str, str] = {},
clean_environment: bool = False,
) -> str:
Path(tmpdir).mkdir(parents=True, exist_ok=True)
if isinstance(expected_outputs, str):
expected_outputs = [expected_outputs]
if isinstance(task_config, (list, dict)):
task_config = OmegaConf.create(task_config)
if isinstance(prints, str):
prints = [prints]
prints = [f'f.write({p} + "\\n")' for p in prints]
s = string.Template(
"""import hydra
import os
from hydra.core.hydra_config import HydraConfig
$PROLOG
@hydra.main($CONFIG_NAME)
def experiment(cfg):
with open("$OUTPUT_FILE", "w") as f:
$PRINTS
if __name__ == "__main__":
experiment()
"""
)
print_code = _get_statements(indent=" ", statements=prints)
prolog_code = _get_statements(indent="", statements=prolog)
config_name = ""
if task_config is not None:
cfg_file = tmpdir / "config.yaml"
with open(str(cfg_file), "w") as f:
f.write("# @package _global_\n")
OmegaConf.save(task_config, f)
config_name = "config_name='config'"
output_file = str(tmpdir / "output.txt")
# replace Windows path separator \ with an escaped version \\
output_file = output_file.replace("\\", "\\\\")
code = s.substitute(
PRINTS=print_code,
CONFIG_NAME=config_name,
OUTPUT_FILE=output_file,
PROLOG=prolog_code,
)
task_file = tmpdir / filename
task_file.write_text(str(code), encoding="utf-8")
cmd = [sys.executable, str(task_file)]
cmd.extend(overrides)
orig_dir = os.getcwd()
try:
os.chdir(str(tmpdir))
if clean_environment:
modified_env = {}
else:
modified_env = os.environ.copy()
modified_env.update(env_override)
subprocess.check_call(cmd, env=modified_env)
with open(output_file, "r") as f:
file_str = f.read()
output = str.splitlines(file_str)
if expected_outputs is not None:
assert len(output) == len(
expected_outputs
), f"Unexpected number of output lines from {task_file}, output lines:\n\n{file_str}"
for idx in range(len(output)):
assert (
output[idx] == expected_outputs[idx]
), f"Unexpected output for {prints[idx]} : expected {expected_outputs[idx]}, got {output[idx]}"
# some tests are parsing the file output for more specialized testing.
return file_str
finally:
os.chdir(orig_dir)
|
import numpy as np
import gym
import matplotlib.pyplot as plt
class Model(object):
def __init__(self, alpha, stateSpace):
self.ALPHA = alpha
self.weights = {}
self.stateSpace = stateSpace
for state in stateSpace:
self.weights[state] = 0
def calculateV(self, state):
v = self.weights[state]
return v
def updateWeights(self, G, state, t):
value = self.calculateV(state)
self.weights[state] += self.ALPHA/t*(G - value)
def aggregateState(posBins, velBins, obs):
pos = int(np.digitize(obs[0], posBins))
vel = int(np.digitize(obs[1], velBins))
state = (pos, vel)
return state
def policy(vel):
#_, velocity = state
# 0 - backward, 1 - none, 2 - forward
if vel < 4:
return 0
elif vel >= 4:
return 2
if __name__ == '__main__':
GAMMA = 1.0
env = gym.make('MountainCar-v0')
posBins = np.linspace(-1.2, 0.5, 8)
velBins = np.linspace(-0.07, 0.07, 8)
stateSpace = []
for i in range(1,9):
for j in range(1,9):
stateSpace.append((i,j))
numEpisodes = 20000
nearExit = np.zeros((3, int(numEpisodes/1000)))
leftSide = np.zeros((3, int(numEpisodes/1000)))
x = [i for i in range(nearExit.shape[1])]
for k, LR in enumerate([0.1, 0.01, 0.001]):
dt = 1.0
model = Model(LR, stateSpace)
for i in range(numEpisodes):
if i % 1000 == 0:
print('start episode', i)
idx = i // 1000
state = aggregateState(posBins, velBins, (0.43, 0.054))
nearExit[k][idx] = model.calculateV(state)
state = aggregateState(posBins, velBins, (-1.1, 0.001))
leftSide[k][idx] = model.calculateV(state)
dt += 0.1
observation = env.reset()
done = False
memory = []
while not done:
state = aggregateState(posBins, velBins, observation)
action = policy(state[1])
observation_, reward, done, _ = env.step(action)
memory.append((state, action, reward))
observation = observation_
state = aggregateState(posBins, velBins, observation)
memory.append((state, action, reward))
G = 0
last = True
statesReturns = []
for state, action, reward in reversed(memory):
if last:
last = False
else:
statesReturns.append((state, G))
G = GAMMA*G + reward
statesReturns.reverse()
statesVisited = []
for state, G in statesReturns:
if state not in statesVisited:
model.updateWeights(G, state, dt)
statesVisited.append(state)
plt.subplot(221)
plt.plot(x, nearExit[0], 'r--')
plt.plot(x, nearExit[1], 'g--')
plt.plot(x, nearExit[2], 'b--')
plt.title('near exit, moving right')
plt.subplot(222)
plt.plot(x, leftSide[0], 'r--')
plt.plot(x, leftSide[1], 'g--')
plt.plot(x, leftSide[2], 'b--')
plt.title('left side, moving right')
plt.legend(('alpha = 0.1', 'alpha = 0.01', 'alpha = 0.001'))
plt.show()
|
"""Nvim API subpackage.
This package implements a higher-level API that wraps msgpack-rpc `Session`
instances.
"""
from .buffer import Buffer
from .common import DecodeHook, SessionHook
from .nvim import Nvim, NvimError
from .tabpage import Tabpage
from .window import Window
__all__ = ('Nvim', 'Buffer', 'Window', 'Tabpage', 'NvimError', 'SessionHook',
'DecodeHook')
|
language = {
'so': 'So language - Congo',
'Afrikaans': 'Afrikaans',
'العربية': 'Arabic',
'беларуская': 'Belarusian',
'Български език': 'Bulgarian',
'বাংলা': 'Bengali',
'Català': 'Catalan',
'Čeština': 'Czech',
'Cymraeg': 'Welsh',
'Dansk': 'Danish',
'Deutsch': 'German',
'Ελληνικά': 'Greek',
'English': 'English',
'Esperanto': 'Esperanto',
'Español': 'Spanish',
'eesti keel': 'Estonian',
'فارسی': 'Persian',
'Suomi': 'Finnish',
'Wikang Filipino': 'Filipino',
'Français': 'French',
'Gaeilge': 'Irish',
'Gàidhlig': 'Scottish Gaelic',
'Sprēkō Þiudiskō': '?',
'עברית': 'Hebrew',
'हिन्दी': 'Hindi',
'Hrvatski': 'Croatian',
'Magyar': 'Hungarian',
'Bahasa Indonesia': 'Indonesian',
'Íslenska': 'Icelandic',
'Italiano': 'Italian',
'日本語': 'Japanese',
'한국어': 'Korean',
'Lingua latina': 'Latin',
'Lietuvių': 'Lithuanian',
'Latviešu valoda': 'Latvian',
'मराठी': 'Marathi',
'بهاس ملايو': 'Kelantanese Malay',
'Plattdüütsch': 'Low German',
'Nederlands': 'Dutch',
'Norsk': 'Norwegian',
'ਪੰਜਾਬੀ': 'Punjabi language',
'Polski': 'Polish',
'Português brasileiro': 'Brazil Portuguese',
'Português europeu': 'European Portuguese',
'Khuzdul': 'Khuzdul - Tolkien',
'Quenya': 'Quenya - Tolkien',
'Română': 'Romanian',
'Русский': 'Russian',
'Sindarin': 'Sindarin - Tolkien',
'Slovenčina': 'Slovak',
'Slovenščina': 'Slovenian',
'af Soomaali': 'Somali',
'Shqip': 'Albanian',
'српски': 'Serbian',
'Svenska': 'Swedish',
'Kiswahili': 'Swahili',
'ไทย': 'Thai',
'tlhIngan-Hol': 'Klingon',
'Thermian': 'Thermian - Galaxy Quest',
'Türkçe': 'Turkish',
'українська': 'Ukrainian',
'Bahasa Malaysia': 'Malaysian',
'Tiếng Việt': 'Vietnamese',
'中文': 'Chinese'
}
|
class Solution:
def nextGreaterElement(self, n):
"""
:type n: int
:rtype: int
"""
s = list(map(int, str(n)))
i = len(s) - 1
while i > 0 and s[i] <= s[i - 1]:
i -= 1
# 21
if i == 0:
return -1
# S[i] > S[i - 1]
j = i
while j + 1 < len(s) and s[j + 1] > s[i - 1]:
j += 1
# find s[j + 1] <= s[i - 1]
s[i-1], s[j] = s[j], s[i-1]
s[i:] = reversed(s[i:])
ans = int(''.join(map(str, s)))
return ans if ans <= ((1<<31)-1) else -1
|
import argparse,math,os,sys,tables
import numpy as np
from operator import itemgetter
from sklearn import linear_model, model_selection, metrics
import random
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
UNCHANGED,DOWN,UP,TARGET = 0,1,2,3
statdict = { UNCHANGED:'.', DOWN:'DOWN', UP:'UP', TARGET:'TARGET' }
class dataset(object):
def __init__(self):
self.index = None
self.info = None # include chrom and TSS and gene symbol
self.x = None
self.y = None
self.bwdir = None
self.bwfiles = None
self.rpfiles = None
def gene_sym(symfile):
"""
One representative of each gene symbol is chosen.
"""
# TODO: change the selection from the first to the longest gene body
# return {"refseq":["symbol","chr"]}
fp = open(symfile)
symdict = {} # {"gene_symbol": ["refseq_ID", "chr"]} the first refseq ID in the TSS file
for line in fp:
f = line.strip().split('\t')
g = f[3]
IDs = g.split(':')
if IDs[1] not in symdict:
symdict[IDs[1]] = [IDs[0], f[0]]
rsymdict = {}
for elem in symdict:
rsymdict[symdict[elem][0]] = [elem,symdict[elem][1]]
fp.close()
return rsymdict
def logtransform(x):
xt = np.array(x)
pcount = 1
xt += pcount
med = np.median( xt,0 )
x = np.log2(xt) - np.log2(med)
return x
def sqrttransform(x):
xt = np.array(x)
med = np.median( xt,0 )
x = np.sqrt(xt) - np.sqrt(med)
return x
def read_hdf5( h5file, sample_name):
""" Apply motif stat function to all data in motif_file_name.
Data in numpy array val corresponds to idx entries. If idx if None all entries are used."""
a = h5file.get_node("/", sample_name )
m = a.read()
return m
def getSampleNames_hdf5(h5file):
samplenames = []
for array in h5file.walk_nodes("/","Array"):
if array.name not in samplenames:
samplenames.append(array.name)
else:
continue
#samplenames = samplenames[340:]
return samplenames
def readregpotfiles(sym,genome,samplenames,h5file):
# make list of regulatory potential file
# read in regulatory potential from files in directory
index = None
x = None
nsamples = len(samplenames)
refseqID = read_hdf5( h5file, 'refseqID' )
print(refseqID)
symID = read_hdf5(h5file, 'symbol')
print(symID)
chrom = read_hdf5(h5file, 'chr')
start = read_hdf5(h5file, 'start')
for k,name in enumerate(samplenames):
if index == None:
index = {}
info = {}
i = 0
for j,geneid in enumerate(refseqID):
geneid = geneid.decode("utf-8") # gene symbol
if geneid in sym:
symid = sym[geneid][0]
if symid not in index:
index[symid] = i
info[symid] = [chrom[j].decode("utf-8"),start[j]]
i += 1
ngenes = len(index)
x = np.zeros((ngenes,nsamples))
print(np.shape(x))
RP = read_hdf5( h5file, name )
for i,geneid in enumerate(refseqID):
geneid = geneid.decode("utf-8")
if geneid in sym:
symid = sym[geneid][0]
rp = RP[i]
try:
x[index[symid],k] = rp ### float num was ignored here, e.g., 'chr', 'refseqID', 'start', 'symbol'
except:
pass
z = dataset()
z.rpfiles = samplenames
z.x = x # x.shape = ngenes,nsamples # x is RP not relative RP, change to relative RP
print(np.median(x, axis=1))
z.index = index # {symbol:'start position'}
z.info = info # {'symbol':[chr,start]}
return z
def read_genelistOnly(sym, fname, index, exptype):
status = np.zeros( len(index) )
genenames = np.ndarray(shape=(len(index)),dtype=object)
print(list(index.keys())[0:20])
train_chroms = ['chr1','chr3','chr5','chr7','chr9','chr11','chr13','chr15','chr17','chr19','chr21']
test_chroms = ['chr2','chr4','chr6','chr8','chr10','chr12','chr14','chr16','chr18','chr20','chr22']
train_index = []
test_index = []
fp = open(fname).readlines()
genes = [g.strip() for g in fp]
allgenes = list(sym.keys())
print(allgenes[0:20])
for ag in allgenes:
if exptype == 'Gene_Only':
try:
i = index[sym[ag][0]]
if sym[ag][1] in train_chroms:
train_index.append(i)
elif sym[ag][1] in test_chroms:
test_index.append(i)
#print i
if sym[ag][0] in genes:
#print sym[ag][0]
status[i] = TARGET
else:
status[i] = UNCHANGED
genenames[i] = ag
except:
continue
else:
try:
i = index[sym[ag][0]]
if sym[ag][1] in train_chroms:
train_index.append(i)
elif sym[ag][1] in test_chroms:
test_index.append(i)
else:
pass
if ag in genes:
status[i] = TARGET
else:
status[i] = UNCHANGED
genenames[i] = ag
except:
continue
print('file: %s\ttarget: %d\tunchanged: %d\n' % ( fname, sum( status == TARGET ), sum( status == UNCHANGED ) ))
# print(genenames[0:20])
return (genenames, status,train_index,test_index)
def dataset_annotation(annotationf):
# get the cell annotation for each datasetID
inf = open(annotationf,'rU')
ann = {}
for line in inf:
if line.startswith('datasetID'):
pass
else:
line = line.strip().split('\t')
ID = line[0] # dataset id -> GSM id
info = [line[4],line[5],line[7]] # CellLineName, Tissue/Organ, DetailedTissue
try:
ann[ID] = info
except:
ann[ID] = 'NA'
return ann
def lasso_test(x,y):
# given x,y, return auc score
LR_l1 = linear_model.LogisticRegression(penalty='l1', tol=0.01)
LR_l1.fit(x,y)
#np.mean( model_selection.cross_val_score( LR_l1, X_train, y_train, scoring='roc_auc', cv=5 ))
yhat = LR_l1.predict_log_proba(x)
fpr, tpr, thresholds = metrics.roc_curve(y, yhat[:,1], pos_label=1)
auc = metrics.auc(fpr,tpr)
selected_features = len([i for i in LR_l1.coef_[0] if i !=0])
return auc,selected_features
def lasso_test_best_alpha(x,y,prename):
# given x,y, return alpha used for adaptive lasso
alphas = [i for i in np.logspace(-2,1,10)]
alpha_cvs = []
plt.figure()
for alpha in alphas:
LR_l1 = linear_model.LogisticRegression(penalty='l1', tol=0.01,fit_intercept=True,C=alpha);print(alpha)
cvs_scores = model_selection.cross_val_score( LR_l1, x, y, scoring='roc_auc', cv=5 )
alpha_cvs.append(cvs_scores)
LR_l1.fit(x,y)
yhat = LR_l1.predict_log_proba(x)
fpr, tpr, thresholds = metrics.roc_curve(y, yhat[:,1], pos_label=1)
auc = metrics.auc(fpr,tpr)
selected_features = len([i for i in estimator.coef_[0] if i !=0])
print(alpha,np.mean(cvs_scores),auc,selected_features)
# plot the auc figs
y_mean = np.mean(cvs_scores)
y_err = np.std(cvs_scores)
plt.errorbar(alpha,y_mean,y_err,color='r',ecolor='grey',fmt='o',capsize=4)
plt.ylim([0,1])
plt.xscale('log')
plt.savefig(prename+'_alpha_auc.png',bbox_inches='tight',pad_inches=0.1,transparent=True)
plt.close()
#alpha_cvs_mean = [i.mean() for i in alpha_cvs]
#best_alpha = alphas[alpha_cvs_mean.index(max(alpha_cvs_mean))]
return alphas,alpha_cvs
def best_alpha(x,y):
# given x,y, return alpha used for adaptive lasso
alphas = [i for i in np.logspace(-2,1,10)]
#alphas = [0.005,0.01,0.02,0.05,0.1,0.2,0.5,1,2]
alpha_cvs = []
for alpha in alphas:
LR_l1 = linear_model.LogisticRegression(penalty='l1', tol=0.01,fit_intercept=True,C=alpha)
cvs_scores = model_selection.cross_val_score( LR_l1, x, y, scoring='roc_auc', cv=5 )
alpha_cvs.append(cvs_scores)
print(' =best-alpha= ',alpha, '==mean-cvs==',np.mean(cvs_scores))
alpha_cvs_mean = [i.mean() for i in alpha_cvs]
best_alpha = alphas[alpha_cvs_mean.index(max(alpha_cvs_mean))]
return best_alpha,max(alpha_cvs_mean)
def adaptive_lasso(x,y,samplefiles,name,maxsamples,ann,genenames):
# test of adaptive lasso
g = lambda w:np.sqrt(np.abs(w))
gprime = lambda w: 1.0/(2.*np.sqrt(np.abs(w))+np.finfo(float).eps)
n_samples,n_features = x.shape
n_lasso_iterations = 10
weights = np.ones(n_features)
selected_features = n_features
print('Run adaptive lasso for 10 rounds...')
print('Round, Alpha, Features number ')
for k in range(n_lasso_iterations):
if selected_features >maxsamples:
alpha=0.02
else:
alpha=0.2
X_w = x / weights
#alpha,best_cvs = best_alpha(X_w,y) # TODO: if you need to select best alpha for each step later
#alpha = 0.1
# set fixed seed and default solver
estimator = linear_model.LogisticRegression(penalty='l1', tol=0.01, fit_intercept=True, C=alpha, random_state=2019, solver="liblinear")
estimator.fit(X_w,y)
coef_ = estimator.coef_/weights
weights = gprime(coef_)
selected_features = len([i for i in coef_[0] if i !=0])
print('{}, {}, {}'.format(k,alpha,selected_features))
rand_idx = list(range(x.shape[0]))
random.shuffle( rand_idx )
# xt = np.multiply(x,coef_);print(xt.shape)
xt,yt = X_w[rand_idx,:], y[rand_idx]
cvs_scores = model_selection.cross_val_score(estimator ,xt,yt, scoring='roc_auc', cv=5 )
best_cvs = np.mean(cvs_scores)
yhat = estimator.predict_log_proba(xt)
fpr, tpr, thresholds = metrics.roc_curve(yt, yhat[:,1], pos_label=1)
auc = metrics.auc(fpr,tpr)
# print(k,'alpha',alpha)
# print(k,'best_cvs',best_cvs)
# print(k,'auc',auc)
# print(k,'selected_features',selected_features)
outf = open('{}_adaptive_lasso_Info.txt'.format(name),'w')
for coef in sorted([ i for i in coef_[0] if i!=0], key=abs, reverse=True):
samplefile = samplefiles[list(coef_[0]).index(coef)]
dataID = samplefile.split('_')[0]
if dataID in list(ann.keys()):
annInfo = ann[dataID]
else:
annInfo = ['NA','NA','NA']
outf.write('{}\t{}\t{}\n'.format(dataID, coef, '\t'.join(annInfo)))
outf.write('AUC = {}\n'.format(auc))
outf.write('best_cvs = {}\n'.format(best_cvs))
outf.write('selected_features = {}\n'.format(selected_features))
return auc,selected_features
def main(args):
'''
Input arguments from command line.
'''
# read all parameters from arguments
gxfile = args.expr # input gene symbols/refseqID
name = args.name # output name
exptype = args.exptype
genome = args.genome # species
symfile = args.sym
annotation = args.annotation
rp_hdf5 = args.histRP
transform = args.transform
maxsamples = args.maxsamples
# TODO: symfile is the refseqID annotation file, change to gene symbol file?
sym = gene_sym(symfile) # {"resfseq": {"gene_symbol", "chr"}}
h5file = tables.open_file( rp_hdf5, driver="H5FD_CORE")
samplenames = getSampleNames_hdf5(h5file)
z = readregpotfiles(sym,genome,samplenames,h5file)
h5file.close()
if transform == 'log':
z.x = logtransform(z.x)
if transform == 'sqrt':
z.x = sqrttransform(z.x)
(genenames,z.y,train_index,test_index) = read_genelistOnly(sym, gxfile, z.index, exptype)
sys.stdout.flush()
print('Do regrssion with TARGET genes...')
y = 1*( z.y == TARGET )
x = z.x[:,:-5] # remove the last few columns: refseq, start, chr, etc...
print("Adaptive lasso RP matrix shape...")
print("{}\n".format(np.shape(x)))
ann = dataset_annotation(annotation)
try:
auc,selected_features = adaptive_lasso(x,y,z.rpfiles,name,maxsamples,ann,genenames)
except:
pass
finally:
sys.stderr.write("""\nERROR: bart2 exited with errors!
Please check whether you selected the correct species or uploaded the correct gene list!\n""")
sys.exit(1)
print("Adaptive lasso regression AUC score and selected features...")
print(auc)
print(selected_features)
sys.stdout.flush()
if __name__ == "__main__":
try:
parser = argparse.ArgumentParser(description="""Regression of regulatory potential to gene expression changes.""")
# related to input file type
parser.add_argument( '-e','--expr', dest='expr', required = True, type = str, help = 'The related differential expression file')
parser.add_argument( '--exptype', dest='exptype', required = True, choices=['Gene_Response','Gene_Only'], type = str, \
help = 'Gene_Response includes 2 columns, one is the geneID, and the other is 1/0, 1 for target and 0 for un-target; \
Gene_Only includes 1 column, only the gene list of the targets. \
Only official gene symbol or refseqID are allowd for the geneID.')
parser.add_argument( '-r','--historicalRP', dest='histRP', required = True, type = str, \
help = 'The file with hdf5 format which contain the H3K27ac RP information')
# transform method
parser.add_argument('-t', '--transform', dest="transform", type=str, default='sqrt', choices=['sqrt', 'log'], required=True, \
help='Use sqrt transform or log transform on RP ')
parser.add_argument( '-a', dest='annotation', required=True, type=str, help='The annotation file for each dataset' )
parser.add_argument( '-m', dest='sym', type=str, required=True, help='refseqTSS is six columns: <chromosome name> <TSS> <TSS+1> <refseq:genesymbok> <score> <strand>')
# parser.add_argument( '-m', dest='sym', type=str, required=True, help='genesymbolTSS is six columns: <chromosome name> <TSS-1> <TSS+1> <genesymbol> <score> <strand>')
parser.add_argument( '-g','--genome', dest="genome", type=str, default='hg38', choices=['mm9','hg19','hg38','mm10'], required=False, help='genome')
parser.add_argument( '--maxsamples', dest='maxsamples', type=int, default=20, required=False, \
help='Maximum number of samples to include in regression model.' )
parser.add_argument( '-a', dest='annotation', required=True, type=str, help='The annotation file for each dataset' )
parser.add_argument( '-n','--name', dest='name',required = True, type = str, help = 'The prefix of the output names')
args = parser.parse_args()
main(args)
except KeyboardInterrupt:
sys.stderr.write("User interrupted me!\n")
sys.exit(0)
|
import machine
import ssd1306
import network
import time
SSID = 'myhomewifi'
PWD = 'myhomewifipassword'
WIDTH = const(128)
HEIGHT = const(32)
pscl = machine.Pin(5, machine.Pin.OUT)
psda = machine.Pin(4, machine.Pin.OUT)
i2c = machine.I2C(scl=pscl, sda=psda)
ssd = ssd1306.SSD1306_I2C(WIDTH, HEIGHT, i2c)
ssd.text(' Hello, World!', 0, 5, 1)
ssd.hline(10, 16, 107, 1) #Line in middle
# Box around screen
ssd.hline(0, 0, 127, 1)
ssd.hline(0, 31, 127, 1)
ssd.vline(0, 0, 31, 1)
ssd.vline(127, 0, 31, 1)
ssd.show()
# Turn off access point and turn on station WiFi. When WLAN connected,
# show the IP address.
ap_if = network.WLAN(network.AP_IF)
ap_if.active(False)
sta_if = network.WLAN(network.STA_IF)
sta_if.connect(SSID, PWD)
sta_if.active(True)
while not sta_if.isconnected():
pass
WLAN_IP = sta_if.ifconfig()[0]
ssd.text(WLAN_IP, 2, 20, 1)
ssd.show()
# Show time
while True:
time_str = str(time.time())
ssd.fill_rect(1, 5, 126, 8, 0)
ssd.text(time_str, 38, 4, 1)
ssd.show()
time.sleep(0.98)
|
"""
Simple script that shows the video-predictor API in action
"""
from robonet.video_prediction.testing.model_evaluation_interface import VPredEvaluation
import numpy as np
test_hparams = {}
test_hparams['designated_pixel_count'] = 1 # number of selected pixels
test_hparams['run_batch_size'] = 200 # number of predictions run through model concurrently
N_ACTIONS = 300 # total actions to predict: can be different from run_batch_size!
# feed in restore path and test specific hyperparams
model = VPredEvaluation('~/Downloads/franka_sanity/sanity_check_model/checkpoint_170000', test_hparams)
model.restore()
# context tensors needed for prediction
context_tensors = {}
context_tensors['context_actions'] = np.zeros((model.n_context - 1, model.adim))
context_tensors['context_states'] = np.zeros((model.n_context, model.sdim)) # not needed for all models
height, width = model.img_size
context_tensors['context_frames'] = np.zeros((model.n_context, model.n_cam, height, width, 3)) # inputs should be RGB float \in [0, 1]
context_tensors['context_pixel_distributions'] = np.zeros((model.n_context, model.n_cam, height, # spatial disributions (sum across image should be 1)
width, test_hparams['designated_pixel_count']))
context_tensors['context_pixel_distributions'][:, :, 24, 32, :] = 1.0
# actions for frames to be predicted
action_tensors = {}
action_tensors['actions'] = np.zeros((N_ACTIONS, model.horizon, model.adim))
results = model(context_tensors, action_tensors)
predicted_frames = results['predicted_frames'] # RGB images, shape (N_ACTIONS, HORIZON, N_CAMS, 48, 64, 3)
predicted_distributions = results['predicted_pixel_distributions'] # pixel distributions, shape (N_ACTIONS, HORIZON, N_CAMS, 48, 64, designated_pixel_count)
print('predicted_frames has shape', predicted_frames.shape)
|
from xml.etree.ElementTree import Element
from frappe.model.document import Document
from trebelge.TRUBLCommonElementsStrategy.TRUBLAttachment import TRUBLAttachment
from trebelge.TRUBLCommonElementsStrategy.TRUBLCommonElement import TRUBLCommonElement
from trebelge.TRUBLCommonElementsStrategy.TRUBLParty import TRUBLParty
from trebelge.TRUBLCommonElementsStrategy.TRUBLPeriod import TRUBLPeriod
class TRUBLDocumentReference(TRUBLCommonElement):
_frappeDoctype: str = 'UBL TR DocumentReference'
def process_element(self, element: Element, cbcnamespace: str, cacnamespace: str) -> Document:
frappedoc: dict = {}
# ['ID'] = ('cbc', '', 'Zorunlu (1)', 'id')
id_: Element = element.find('./' + cbcnamespace + 'ID')
if id_ is not None:
if id_.text is not None:
frappedoc['id'] = id_.text.strip()
# ['IssueDate'] = ('cbc', '', 'Zorunlu (1)', 'issuedate')
issuedate_ = element.find('./' + cbcnamespace + 'IssueDate')
# if id_.attrib.keys() is not None:
# return None
if issuedate_ is not None:
if issuedate_.text is not None:
frappedoc['issuedate'] = issuedate_.text.strip()
# ['DocumentTypeCode'] = ('cbc', '', 'Seçimli (0...1)', 'documenttypecode')
# ['DocumentType'] = ('cbc', '', 'Seçimli (0...1)', 'documenttype')
cbcsecimli01: list = ['DocumentTypeCode', 'DocumentType']
for elementtag_ in cbcsecimli01:
field_: Element = element.find('./' + cbcnamespace + elementtag_)
if field_ is not None:
if field_.text is not None:
frappedoc[elementtag_.lower()] = field_.text.strip()
# ['Attachment'] = ('cac', 'Attachment', 'Seçimli (0...1)', 'attachment')
attachment_ = element.find('./' + cacnamespace + 'Attachment')
if attachment_ is not None:
tmp = TRUBLAttachment().process_element(attachment_, cbcnamespace, cacnamespace)
if tmp is not None:
frappedoc['attachment'] = tmp.name
# ['ValidityPeriod'] = ('cac', 'Period', 'Seçimli (0...1)', 'validityperiod')
validityperiod_ = element.find('./' + cacnamespace + 'ValidityPeriod')
if validityperiod_ is not None:
tmp = TRUBLPeriod().process_elementasdict(validityperiod_, cbcnamespace, cacnamespace)
if tmp != {}:
for key in ['startdate', 'starttime', 'enddate', 'endtime', 'durationmeasure',
'durationmeasure_unitcode', 'description']:
try:
frappedoc[key] = tmp[key]
except KeyError:
pass
# ['IssuerParty'] = ('cac', 'Party', 'Seçimli (0...1)', 'issuerparty')
issuerparty_ = element.find('./' + cacnamespace + 'IssuerParty')
if issuerparty_ is not None:
tmp = TRUBLParty().process_element(issuerparty_, cbcnamespace, cacnamespace)
if tmp is not None:
frappedoc['issuerparty'] = tmp.name
if frappedoc == {}:
return None
document: Document = self._get_frappedoc(self._frappeDoctype, frappedoc)
# ['DocumentDescription'] = ('cbc', '', 'Seçimli(0..n)', 'documentdescription')
documentdescriptions_: list = element.findall('./' + cbcnamespace + 'DocumentDescription')
if len(documentdescriptions_) != 0:
for documentdescription_ in documentdescriptions_:
element_ = documentdescription_.text
if element_ is not None and element_.strip() != '':
document.append("documentdescription", dict(note=element_.strip()))
document.save()
return document
def process_elementasdict(self, element: Element, cbcnamespace: str, cacnamespace: str) -> dict:
pass
|
print("Program to print Armstrong Numbers\n");
n=int(input("Enter any number: "));
i=n
count=0
while(i>0):
i=i//10
count=count+1
sum=0
i=n
while(i>0):
digit=i%10
x=1
pro=1
while(x<=count):
pro=pro*digit
x=x+1
sum=sum+pro
i=i//10
if(sum==n):
print("The given number",n,"is an armstrong number");
else:
print("The number is not an armstrong number");
|
import builtins
import numpy as np
import pytest
import autofit as af
import autogalaxy as ag
from autofit import Paths
class MockAnalysis:
def __init__(self, number_galaxies, shape, value):
self.number_galaxies = number_galaxies
self.shape = shape
self.value = value
# noinspection PyUnusedLocal
def galaxy_images_for_model(self, model):
return self.number_galaxies * [np.full(self.shape, self.value)]
class MockMask:
pass
class Optimizer:
def __init__(self, name="dummy_phase"):
self.name = name
self.phase_path = ""
class DummyPhaseImaging(af.AbstractPhase):
def make_result(self, result, analysis):
pass
def __init__(self, search):
super().__init__(search=search)
self.dataset = None
self.results = None
self.mask = None
def run(self, dataset, results, mask=None, info=None, **kwargs):
self.save_metadata(dataset)
self.dataset = dataset
self.results = results
self.mask = mask
return af.Result(af.ModelInstance(), 1)
class MockImagingData(af.Dataset):
def __init__(self, metadata=None):
self._metadata = metadata or dict()
@property
def metadata(self) -> dict:
return self._metadata
@property
def name(self) -> str:
return "data_name"
class MockFile:
def __init__(self):
self.text = None
self.filename = None
def write(self, text):
self.text = text
def __enter__(self):
return self
def __exit__(self, *args):
pass
@pytest.fixture(name="mock_files", autouse=True)
def make_mock_file(monkeypatch):
files = []
def mock_open(filename, flag, **kwargs):
assert flag in ("w+", "w+b", "a")
file = MockFile()
file.filename = filename
files.append(file)
return file
monkeypatch.setattr(builtins, "open", mock_open)
yield files
class MockSearch:
def __init__(self, name):
self.name = name
self.paths = Paths(name)
#
# class TestMetaData:
# def test_files(self, mock_files):
# pipeline = ag.PipelineDataset(
# "pipeline_name", DummyPhaseImaging(search=MockSearch("name"))
# )
# pipeline.run(dataset=MockImagingData(), mask=MockMask())
#
# assert (
# mock_files[2].text
# == "phase=name\nphase_tag=\npipeline=pipeline_name\npipeline_tag=\nnon_linear_search=search\ndataset_name=data_name"
# )
#
# assert "name////non_linear.pickle" in mock_files[3].filename
# class TestPassMask:
# def test_pass_mask(self):
# mask = MockMask()
# phase1 = DummyPhaseImaging("one")
# phase2 = DummyPhaseImaging("two")
# pipeline = ag.PipelineDataset("", phase_1, phase_2)
# pipeline.run(dataset=MockImagingData(), mask=mask)
#
# assert phase1.mask is mask
# assert phase2.mask is mask
#
#
# class TestPipelineImaging:
# def test_run_pipeline(self):
# phase1 = DummyPhaseImaging("one")
# phase2 = DummyPhaseImaging("two")
#
# pipeline = ag.PipelineDataset("", phase_1, phase_2)
#
# pipeline.run(dataset=MockImagingData(), mask=MockMask())
#
# assert len(phase2.results) == 2
#
# def test_addition(self):
# phase1 = DummyPhaseImaging("one")
# phase2 = DummyPhaseImaging("two")
# phase3 = DummyPhaseImaging("three")
#
# pipeline1 = ag.PipelineDataset("", phase_1, phase_2)
# pipeline2 = ag.PipelineDataset("", phase_3)
#
# assert (phase_1, phase_2, phase_3) == (pipeline1 + pipeline2).phases
class DummyPhasePositions(af.AbstractPhase):
def make_result(self, result, analysis):
pass
def __init__(self, name):
super().__init__(MockSearch(name=name))
self.results = None
self.pixel_scales = None
self.search = Optimizer(name)
def run(self, pixel_scales, results):
self.save_metadata(MockImagingData())
self.pixel_scales = pixel_scales
self.results = results
return af.Result(af.ModelInstance(), 1)
|
# Class for opsim field based slicer.
import numpy as np
from functools import wraps
import warnings
from rubin_sim.maf.plots.spatialPlotters import OpsimHistogram, BaseSkyMap
from .baseSpatialSlicer import BaseSpatialSlicer
__all__ = ['OpsimFieldSlicer']
class OpsimFieldSlicer(BaseSpatialSlicer):
"""A spatial slicer that evaluates pointings based on matched IDs between the simData and fieldData.
Note that this slicer uses the fieldId of the simulated data fields to generate the spatial matches.
Thus, it is not suitable for use in evaluating dithering or high resolution metrics
(use the HealpixSlicer instead for those use-cases).
When the slicer is set up, it takes two arrays: fieldData and simData. FieldData is a numpy.recarray
containing the information about the fields - this is the basis for slicing.
The simData is a numpy.recarray that holds the information about the pointings - this is the data that
is matched against the fieldData.
Parameters
----------
simDataFieldIDColName : str, optional
Name of the column in simData for the fieldId
Default fieldId.
simDataFieldRaColName : str, optional
Name of the column in simData for the RA.
Default fieldRA.
simDataFieldDecColName : str, optional
Name of the column in simData for the fieldDec.
Default fieldDec.
latLongDeg : bool, optional
Whether the RA/Dec values in *fieldData* are in degrees.
If using a standard metricBundleGroup to run the metric, FieldData is fetched
by utils.getFieldData, which always returns radians (so the default here is False).
fieldIdColName : str, optional
Name of the column in the fieldData for the fieldId (to match with simData).
Default fieldId.
fieldRaColName : str, optional
Name of the column in the fieldData for the RA (used for plotting).
Default fieldRA.
fieldDecColName : str, optional
Name of the column in the fieldData for the Dec (used for plotting).
Default fieldDec.
verbose : `bool`, optional
Flag to indicate whether or not to write additional information to stdout during runtime.
Default True.
badval : float, optional
Bad value flag, relevant for plotting. Default -666.
"""
def __init__(self, simDataFieldIdColName='fieldId',
simDataFieldRaColName='fieldRA', simDataFieldDecColName='fieldDec', latLonDeg=False,
fieldIdColName='fieldId', fieldRaColName='fieldRA', fieldDecColName='fieldDec',
verbose=True, badval=-666):
super(OpsimFieldSlicer, self).__init__(verbose=verbose, badval=badval)
self.fieldId = None
self.simDataFieldIdColName = simDataFieldIdColName
self.fieldIdColName = fieldIdColName
self.fieldRaColName = fieldRaColName
self.fieldDecColName = fieldDecColName
self.latLonDeg = latLonDeg
self.columnsNeeded = [simDataFieldIdColName, simDataFieldRaColName, simDataFieldDecColName]
while '' in self.columnsNeeded:
self.columnsNeeded.remove('')
self.fieldColumnsNeeded = [fieldIdColName, fieldRaColName, fieldDecColName]
self.slicer_init = {'simDataFieldIdColName': simDataFieldIdColName,
'simDataFieldRaColName': simDataFieldRaColName,
'simDataFieldDecColName': simDataFieldDecColName,
'fieldIdColName': fieldIdColName,
'fieldRaColName': fieldRaColName,
'fieldDecColName': fieldDecColName, 'badval': badval}
self.plotFuncs = [BaseSkyMap, OpsimHistogram]
self.needsFields = True
def setupSlicer(self, simData, fieldData, maps=None):
"""Set up opsim field slicer object.
Parameters
-----------
simData : numpy.recarray
Contains the simulation pointing history.
fieldData : numpy.recarray
Contains the field information (ID, Ra, Dec) about how to slice the simData.
For example, only fields in the fieldData table will be matched against the simData.
RA and Dec should be in degrees.
maps : list of rubin_sim.maf.maps objects, optional
Maps to run and provide additional metadata at each slicePoint. Default None.
"""
if 'ra' in self.slicePoints:
warning_msg = 'Warning: this OpsimFieldSlicer was already set up once. '
warning_msg += 'Re-setting up an OpsimFieldSlicer can change the field information. '
warning_msg += 'Rerun metrics if this was intentional. '
warnings.warn(warning_msg)
# Set basic properties for tracking field information, in sorted order.
idxs = np.argsort(fieldData[self.fieldIdColName])
# Set needed values for slice metadata.
self.slicePoints['sid'] = fieldData[self.fieldIdColName][idxs]
if self.latLonDeg:
self.slicePoints['ra'] = np.radians(fieldData[self.fieldRaColName][idxs])
self.slicePoints['dec'] = np.radians(fieldData[self.fieldDecColName][idxs])
else:
self.slicePoints['ra'] = fieldData[self.fieldRaColName][idxs]
self.slicePoints['dec'] = fieldData[self.fieldDecColName][idxs]
self.nslice = len(self.slicePoints['sid'])
self._runMaps(maps)
# Set up data slicing.
self.simIdxs = np.argsort(simData[self.simDataFieldIdColName])
simFieldsSorted = np.sort(simData[self.simDataFieldIdColName])
self.left = np.searchsorted(simFieldsSorted, self.slicePoints['sid'], 'left')
self.right = np.searchsorted(simFieldsSorted, self.slicePoints['sid'], 'right')
self.spatialExtent = [simData[self.simDataFieldIdColName].min(),
simData[self.simDataFieldIdColName].max()]
self.shape = self.nslice
@wraps(self._sliceSimData)
def _sliceSimData(islice):
idxs = self.simIdxs[self.left[islice]:self.right[islice]]
# Build dict for slicePoint info
slicePoint = {}
for key in self.slicePoints:
if (np.shape(self.slicePoints[key])[0] == self.nslice) & \
(key != 'bins') & (key != 'binCol'):
slicePoint[key] = self.slicePoints[key][islice]
else:
slicePoint[key] = self.slicePoints[key]
return {'idxs': idxs, 'slicePoint': slicePoint}
setattr(self, '_sliceSimData', _sliceSimData)
def __eq__(self, otherSlicer):
"""Evaluate if two grids are equivalent."""
result = False
if isinstance(otherSlicer, OpsimFieldSlicer):
if np.all(otherSlicer.shape == self.shape):
# Check if one or both slicers have been setup
if (self.slicePoints['ra'] is not None) or (otherSlicer.slicePoints['ra'] is not None):
if (np.array_equal(self.slicePoints['ra'], otherSlicer.slicePoints['ra']) &
np.array_equal(self.slicePoints['dec'], otherSlicer.slicePoints['dec']) &
np.array_equal(self.slicePoints['sid'], otherSlicer.slicePoints['sid'])):
result = True
# If they have not been setup, check that they have same fields
elif ((otherSlicer.fieldIdColName == self.fieldIdColName) &
(otherSlicer.fieldRaColName == self.fieldRaColName) &
(otherSlicer.fieldDecColName == self.fieldDecColName)):
result = True
return result
|
from django.contrib import admin
from .models import Document, Test_Data, Question_Data, Question_Attempt, Student_Data, Class_Section, Class_Assignment, Category
# Register your models here.
# Register your models here.
admin.site.register(Test_Data)
admin.site.register(Student_Data)
admin.site.register(Question_Data)
admin.site.register(Question_Attempt)
admin.site.register(Class_Section)
admin.site.register(Class_Assignment)
admin.site.register(Category)
admin.site.register(Document)
|
import boto3
import datetime
from datetime import date, timedelta
import io
import os
import zipfile
import pathos.multiprocessing as mp
class DQCountExtractor(object):
"""Extracts xml counts from DQ for a range of given dates"""
def __init__(self):
self.aws_secret_access_key = os.environ['AWS_SECRET_ACCESS_KEY']
self.aws_access_key_id = os.environ['AWS_ACCESS_KEY_ID']
self.aws_region_name = os.environ['AWS_DEFAULT_REGION']
self.bucket = os.environ['BUCKET']
self.start_date = os.environ['START_DATE']
self.end_date = os.environ['END_DATE']
self.date_separator = "/"
if self.start_date.find("-") > 0 and self.end_date.find("-") > 0:
self.date_separator = "-"
start_year, start_mon, start_day = self.start_date.split(self.date_separator)
end_year, end_mon, end_day = self.end_date.split(self.date_separator)
self.d1 = date(int(start_year), int(start_mon), int(start_day))
d2 = date(int(end_year), int(end_mon), int(end_day))
self.delta = d2 - self.d1
def count_extractor(self, should_multi_process=True):
session = boto3.Session(aws_access_key_id=self.aws_access_key_id, aws_secret_access_key=self.
aws_secret_access_key, region_name=self.aws_region_name)
print("AWS session started...")
get_result_args = []
for i in range(self.delta.days + 1):
get_result_args.append((i, self.d1, session, self.bucket, self.date_separator))
return_list = []
if should_multi_process:
pool_size = int(os.environ['POOL_SIZE']) # Number of processes that should run in parallel
try:
p = mp.Pool(pool_size)
p_map = p.map(get_results, get_result_args)
return_list = p_map
finally:
p.close()
else: # Test mode - no parallel processing
for d in get_result_args:
r = get_results(d)
return_list.append(r)
# for e in return_list:
# print(e)
def get_results(t):
day = t[0]
start_date = t[1]
aws_session = t[2]
bucket_name = t[3]
date_separator = t[4]
if date_separator == "/":
file_received_date = datetime.datetime.strptime(str(start_date + timedelta(day)), '%Y-%m-%d').\
strftime('%Y/%m/%d')
key_prefix = 's4/parsed/' + str(file_received_date) + '/'
else:
file_received_date = datetime.datetime.strptime(str(start_date + timedelta(day)), '%Y-%m-%d').\
strftime('%Y-%m-%d')
key_prefix = 'parsed/' + str(file_received_date) + '/'
s3 = aws_session.resource('s3')
s3_bucket = s3.Bucket(bucket_name)
filtered_objects = s3_bucket.objects.filter(Prefix=key_prefix)
return get_counts(filtered_objects, file_received_date)
def get_counts(filtered_objects, file_received_date):
zip_file_counter = 0
xml_file_counter = 0
uncompressed_xml_size = 0
for filtered_object in filtered_objects:
if filtered_object.key.endswith('.zip'):
zip_file_counter = zip_file_counter + 1
with io.BytesIO(filtered_object.get()["Body"].read()) as tf:
tf.seek(0)
with zipfile.ZipFile(tf, mode='r') as zip_file:
for xml_info in zip_file.infolist():
if ".xml" in str(xml_info):
xml_file_counter = xml_file_counter + 1
deflate_file_size = xml_info.file_size
uncompressed_xml_size = uncompressed_xml_size + deflate_file_size
count_list = [file_received_date, zip_file_counter, xml_file_counter, uncompressed_xml_size]
print(count_list)
return count_list
if __name__ == '__main__':
DQCountExtractor().count_extractor()
|
/home/runner/.cache/pip/pool/c4/85/f3/59e253f622f4b09996a8c492d6c9939761a39b588c86b26ada5bbfd1df
|
#!/usr/bin/python
import click
import os
from sidr import default
from sidr import runfile
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
def validate_taxdump(value, method):
if (os.path.isfile("%s/%s" % (value, "names.dmp")) and
os.path.isfile("%s/%s" % (value, "nodes.dmp")) and
os.path.isfile("%s/%s" % (value, "merged.dmp")) and
os.path.isfile("%s/%s" % (value, "delnodes.dmp"))):
return value
else:
with click.Context(method) as ctx:
click.echo(ctx.get_help())
raise click.BadParameter("Could not find names.dmp in taxdump, specify a value or make sure the files are present")
@click.group()
def cli():
"""
Analyzes genomic data and attempts to classify contigs using a machine learning framework.
SIDR uses data fron BLAST (or similar classifiers)to train a Decision Tree model to classify sequence data as either belonging to a target organism, or belonging to something else. This classification can be used to filter the data for later assembly.
To use SIDR, you will need to construct a preliminary assembly, align your reads back to that assembly, then use BLAST to classify the assembly contigs.
"""
pass
@cli.command(name="default", context_settings=CONTEXT_SETTINGS)
@click.option('--bam', '-b', type=click.Path(exists=True), help="Alignment of reads to preliminary assembly, in BAM format.")
@click.option('--fasta', '-f', type=click.Path(exists=True), help="Preliminary assembly, in FASTA format.")
@click.option('--blastresults', '-r', type=click.Path(exists=True), help="Classification of preliminary assembly from BLAST (or similar tools).")
@click.option('--taxdump', '-d', type=click.Path(), default=os.environ.get('BLASTDB'), help="Location of the NCBI Taxonomy dump. Default is $BLASTDB.")
#@click.option('--model', '-m', 'modelOutput', type=click.Path(), default="", help="Location to save a graphical representation of the trained decision tree (optional). Output is in the form of a DOT file.")
@click.option('--output', '-o', type=click.Path(), default="%s/classifications.txt" % os.getcwd())
@click.option('--tokeep', '-k', type=click.Path(), default="", help="Location to save the contigs identified as the target organism(optional).")
@click.option('--toremove', '-x', type=click.Path(), default="", help="Location to save the contigs identified as not belonging to the target organism (optional).")
@click.option('--binary', is_flag=True, help="Use binary target/nontarget classification.")
@click.option('--target', '-t', help="The identity of the target organism at the chosen classification level. It is recommended to use the organism's phylum.")
@click.option('--level', '-l', default="phylum", help="The classification level to use when constructing the model. Default is 'phylum'.")
# @click.option('--verbose', '-v', count=True, help="Output more debugging options, repeat to increase verbosity (unimplemented).")
def default_runner(bam, fasta, blastresults, taxdump, output, tokeep, toremove, binary, target, level):
"""
Runs the default analysis using raw preassembly data.
"""
modelOutput = False
validate_taxdump(taxdump, runfile_runner)
default.runAnalysis(bam, fasta, blastresults, taxdump, modelOutput, output, tokeep, toremove, binary, target, level)
@cli.command(name="runfile", context_settings=CONTEXT_SETTINGS)
@click.option('--infile', '-i', type=click.Path(exists=True), help="Tab-delimited input file.")
@click.option('--taxdump', '-d', type=click.Path(), default=os.environ.get('BLASTDB'), help="Location of the NCBI Taxonomy dump. Default is $BLASTDB.")
@click.option('--output', '-o', type=click.Path(), default="%s/classifications.txt" % os.getcwd())
#@click.option('--model', '-m', 'modelOutput', type=click.Path(), default="", help="Location to save a graphical representation of the trained decision tree (optional). Output is in the form of a DOT file.")
@click.option('--tokeep', '-k', type=click.Path(), default="", help="Location to save the contigs identified as the target organism(optional).")
@click.option('--toremove', '-x', type=click.Path(), default="", help="Location to save the contigs identified as not belonging to the target organism (optional).")
@click.option('--target', '-t', help="The identity of the target organism at the chosen classification level. It is recommended to use the organism's phylum.")
@click.option('--binary', is_flag=True, help="Use binary target/nontarget classification.")
@click.option('--level', '-l', default="phylum", help="The classification level to use when constructing the model. Default is 'phylum'.")
def runfile_runner(infile, taxdump, output, tokeep, toremove, binary, target, level):
"""
Runs a custom analysis using pre-computed data from BBMap or other sources.
Input data will be read for all variables which will be used to construct a Decision Tree model.
"""
modelOutput = False
validate_taxdump(taxdump, runfile_runner)
runfile.runAnalysis(taxdump, infile, level, modelOutput, output, tokeep, toremove, binary, target)
""" WIP
@cli.command(name="filter", context_settings=CONTEXT_SETTINGS)
@click.option('--tokeep', '-k', type=click.Path(), default="", help="File containing list of contigs from the alignment to keep.")
@click.option('--bam', '-b', type=click.Path(exists=True), help="Alignment of reads to preliminary assembly, in BAM format.")
@click.option('-i1', type=click.Path(exists=True), help="Right read fastq to extract reads from.")
@click.option('-i2', type=click.Path(exists=True), help="Left read fastq to extract reads from.")
@click.option('-o1', type=click.Path(), help="Right read fastq to extract reads to.")
@click.option('-o2', type=click.Path(), help="Left read fastq to extract reads to.")
def filter_runner(tokeep, bam, i1, i2, o1, o2):
"""
#Filters reads aligning to the given contigs.
"""
filterReads.runFilter(tokeep, bam, i1, i2, o1, o2)
if __name__ == "__main__": # TODO Setuptools
cli(prog_name="sidr")
"""
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python version: 3.6
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import copy
import numpy as np
from torchvision import datasets, transforms
import torch
from json import dumps
import datetime
import pandas as pd
import os
import random
from utils.sampling import mnist_iid, mnist_noniid, cifar_iid
from utils.options import args_parser
from models.Update import LocalUpdate
from models.Update import DatasetSplit
from models.Nets import MLP, CNNMnist, CNNCifar
from models.Fed import FedAvg
from models.test import test_img
dateNow = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
# os.environ['CUDA_VISIBLE_DEVICES'] = '0'
if __name__ == '__main__':
# parse args
args = args_parser()
args.device = torch.device('cuda:{}'.format(args.gpu) if torch.cuda.is_available() and args.gpu != -1 else 'cpu')
# load dataset and split users
if args.dataset == 'mnist':
trans_mnist = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
dataset_train = datasets.MNIST('../data/mnist/', train=True, download=True, transform=trans_mnist)
dataset_test = datasets.MNIST('../data/mnist/', train=False, download=True, transform=trans_mnist)
# sample users
if args.iid:
# allocate the dataset index to users
dict_users = mnist_iid(dataset_train, args.num_users)
else:
dict_users = mnist_noniid(dataset_train, args.num_users)
elif args.dataset == 'cifar':
trans_cifar = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
dataset_train = datasets.CIFAR10('../data/cifar', train=True, download=True, transform=trans_cifar)
dataset_test = datasets.CIFAR10('../data/cifar', train=False, download=True, transform=trans_cifar)
if args.iid:
dict_users = cifar_iid(dataset_train, args.num_users)
else:
exit('Error: only consider IID setting in CIFAR10')
else:
exit('Error: unrecognized dataset')
img_size = dataset_train[0][0].shape
# build model
if args.model == 'cnn' and args.dataset == 'cifar':
net_glob = CNNCifar(args=args).to(args.device)
elif args.model == 'cnn' and args.dataset == 'mnist':
net_glob = CNNMnist(args=args).to(args.device)
elif args.model == 'mlp':
len_in = 1
for x in img_size:
len_in *= x
net_glob = MLP(dim_in=len_in, dim_hidden=200, dim_out=args.num_classes).to(args.device)
else:
exit('Error: unrecognized model')
print(net_glob)
net_glob.train()
# copy weights
w_glob = net_glob.state_dict()
tmp_glob = torch.load('./data/genesisGPUForCNN.pkl')
# training
loss_train = []
m = max(int(args.frac * args.num_users), 1) # args.frac is the fraction of users
idxs_users = np.random.choice(range(args.num_users), m, replace=False)
# malicious node select randomly
random.seed(10)
maliciousN = random.sample(idxs_users.tolist(), 2)
print("The malicious nodes are " + str(maliciousN))
workerIterIdx = {}
for item in idxs_users:
workerIterIdx[item] = 0
realEpoch = 0
currentEpoch = 0
acc_test_list = []
loss_test_list = []
acc_train_list = []
loss_train_list = []
while currentEpoch <= args.epochs:
currentEpoch += 1
w_fAvg = []
base_glob = tmp_glob
net_glob.load_state_dict(base_glob)
print('# of current epoch is ' + str(currentEpoch))
workerNow = np.random.choice(idxs_users, 1, replace=False).tolist()[0]
staleFlag = np.random.randint(-1,4,size=1)
print('The staleFlag of worker ' + str(workerNow) + ' is ' + str(staleFlag))
if staleFlag <= 4:
# judge the malicious node
if workerNow not in maliciousN:
local = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[workerNow])
w, loss = local.train(net=copy.deepcopy(net_glob).to(args.device))
else:
w = torch.load('./data/genesisGPUForCNN.pkl', map_location=torch.device('cuda:{}'.format(args.gpu) if torch.cuda.is_available() and args.gpu != -1 else 'cpu'))
print('Training of malicious node device '+str(workerNow)+' in iteration '+str(currentEpoch)+' has done!')
# means that the alpha is 0.5
w_fAvg.append(copy.deepcopy(base_glob))
w_fAvg.append(copy.deepcopy(w))
tmp_glob = FedAvg(w_fAvg)
net_glob.load_state_dict(tmp_glob)
net_glob.eval()
acc_test, loss_test = test_img(net_glob, dataset_test, args)
# acc_train, loss_train = test_img(net_glob, dataset_train, args)
acc_test_list.append(acc_test.cpu().numpy().tolist()/100)
loss_test_list.append(loss_test)
# acc_train_list.append(acc_train.cpu().numpy().tolist())
# loss_train_list.append(loss_train)
accDfTest = pd.DataFrame({'baseline':acc_test_list})
accDfTest.to_csv("D:\\ChainsFLexps\\asynFL\\normal-10users\\AsynFL-idd{}-{}-{}localEpochs-{}users-{}Rounds_ACC_{}.csv".format(args.iid, args.model, args.local_ep, str(int(float(args.frac)*100)), args.epochs, dateNow),index=False,sep=',')
lossDfTest = pd.DataFrame({'baseline':loss_test_list})
lossDfTest.to_csv("D:\\ChainsFLexps\\asynFL\\normal-10users\\AsynFL-idd{}-{}-{}localEpochs-{}users-{}Rounds_Loss_{}.csv".format(args.iid, args.model, args.local_ep, str(int(float(args.frac)*100)), args.epochs, dateNow),index=False,sep=',')
# accDfTrain = pd.DataFrame({'baseline':acc_train_list})
# accDfTrain.to_csv("D:\\ChainsFLexps\\asynFL\\AsynFL-Train-idd{}-{}-{}localEpochs-{}users-{}Rounds_ACC_{}.csv".format(args.iid, args.model, args.local_ep, str(int(float(args.frac)*100)), args.epochs, dateNow),index=False,sep=',')
# lossDfTrain = pd.DataFrame({'baseline':loss_train_list})
# lossDfTrain.to_csv("D:\\ChainsFLexps\\asynFL\\AsynFL-Train-idd{}-{}-{}localEpochs-{}users-{}Rounds_Loss_{}.csv".format(args.iid, args.model, args.local_ep, str(int(float(args.frac)*100)), args.epochs, dateNow),index=False,sep=',')
print('# of real epoch is ' + str(realEpoch))
print("Testing accuracy: {:.2f}".format(acc_test))
print("Testing loss: {:.2f}".format(loss_test))
workerIterIdx[workerNow] += 1
realEpoch += 1
|
from urllib.parse import quote_plus
from openai import util
from openai.api_resources.abstract.api_resource import APIResource
class DeletableAPIResource(APIResource):
@classmethod
def _cls_delete(cls, sid, **params):
url = "%s/%s" % (cls.class_url(), quote_plus(sid))
return cls._static_request("delete", url, **params)
@util.class_method_variant("_cls_delete")
def delete(self, **params):
self.refresh_from(self.request("delete", self.instance_url(), params))
return self
|
"""
Exercise 4
Create a list, x, consisting of the numbers [1,2,3,4]. Then, call the shuffle()
function(), passing this list as an argument. You'll see that the numbers in x
have been shuffled. Note, that the list is shiffled "in place." The is, the
original order is lost.
But what if you wanted to use this program in a card game?There, it's not
enough to simply output the shuffled list of integers. You'll also need a way
to map back the integers to the specific suit and rank of each card.
"""
import random
def initialize_deck():
suits = ["clubs", "diamonds", "hearts", "spades"]
ranks = [
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"10",
"jack",
"queen",
"king",
"ace",
]
return [(suit, rank) for suit in suits for rank in ranks]
def print_deck(deck):
for card in deck:
print("{0} of {1}".format(card[0], card[1]))
deck = initialize_deck()
print("Before shuffelig: ")
print_deck(deck)
random.shuffle(deck)
print("After shuffeling: ")
print_deck(deck)
|
"""Day 13 Part 2 of Advent of Code 2021"""
import sys
from pprint import PrettyPrinter
def process_input(input_file):
dots = []
folds = []
with open(input_file, encoding="utf8") as input_data:
current = True
while current:
current = input_data.readline()
if current == "\n":
break
dots.append(tuple([int(x) for x in current.split(",")]))
current = input_data.readline()
while current:
location = current.split()[-1].split("=")
fold = (location[0], int(location[1]))
folds.append(fold)
current = input_data.readline()
return dots, folds
def create_matrix(dots):
cols = max([x[0] for x in dots]) + 1
rows = max([x[1] for x in dots]) + 1
matrix = [[False] * cols for i in range(rows)]
for x, y in dots:
matrix[y][x] = True
return matrix
def create_fold(matrix, axis, position):
if axis == "x":
new_matrix = [row[:position] for row in matrix]
for i, j in zip(range(position), range(len(matrix[0]) - 1, position, -1)):
for y in range(len(matrix)):
new_matrix[y][i] = new_matrix[y][i] or matrix[y][j]
else:
new_matrix = matrix[:position]
for i, j in zip(range(position), range(len(matrix) - 1, position, -1)):
for x in range(len(matrix[0])):
new_matrix[i][x] = matrix[i][x] or matrix[j][x]
return new_matrix
def count_dots(matrix):
counter = 0
for row in matrix:
for value in row:
if value:
counter += 1
return counter
def print_matrix(matrix):
for row in matrix:
line = ""
for col in row:
if col:
line += "#"
else:
line += " "
print(line)
def main(input_file):
dots, folds = process_input(input_file)
matrix = create_matrix(dots)
for fold in folds:
axis, position = fold
matrix = create_fold(matrix, axis, position)
print_matrix(matrix)
if __name__ == "__main__":
main(sys.argv[1])
|
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, Event, State
from flask import Flask
import flask
import webbrowser
import os
import pdb
#------------------------------------------------------------------------------------------------------------------------
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
#------------------------------------------------------------------------------------------------------------------------
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
@app.server.route('/PROJECTS/<path:urlpath>')
def serve_static_file(urlpath):
print("--- serve_static_file")
print("urlpath: %s" % urlpath)
fullPath = os.path.join("PROJECTS", urlpath)
dirname = os.path.dirname(fullPath)
filename = os.path.basename(fullPath)
print("about to send %s, %s" % (dirname, filename))
return flask.send_from_directory(dirname, filename)
#------------------------------------------------------------------------------------------------------------------------
app.scripts.config.serve_locally = True
menu = dcc.Dropdown(id="menu",
options=[
{'label': 'fubar', 'value': 'fubar'},
{'label': 'fubar2', 'value': 'fubar2'}
],
value="fubar",
style={"width": "100px", "margin": "5px"}
)
app.layout = html.Div([
html.Button('Display IJAL Text', id='displayIJALTextButton', style={"margin": "5px", "margin-top": "0px"}),
menu,
html.Br(),
html.Iframe(id="storyIFrame", width=1200, height=800)]
)
#------------------------------------------------------------------------------------------------------------------------
@app.callback(
Output('storyIFrame', 'src'),
[Input('displayIJALTextButton', 'n_clicks'),
Input('menu', 'value')]
)
def displayText(n_clicks, storyName):
if n_clicks is None:
return("")
print("storyName: %s" % storyName)
if(storyName == "fubar"):
return('/PROJECTS/fubar/test.html')
elif(storyName=="fubar2"):
return('/PROJECTS/fubar2/daylight/test.html')
#------------------------------------------------------------------------------------------------------------------------
server = app.server
if __name__ == "__main__":
webbrowser.open('http://127.0.0.1:8068/', new=0, autoraise=True)
app.run_server(host='0.0.0.0', port=8068)
#------------------------------------------------------------------------------------------------------------------------
|
#!/usr/bin/python3
#-*- coding: utf-8 -*-
#====================================================================
# author: Chancerel Codjovi (aka codrelphi)
# date: 2019-09-12
# source: https://www.hackerrank.com/challenges/py-hello-world/problem
#=====================================================================
print("Hello, World!")
|
# This file is part of ReACORN, a reimplementation by Élie Michel of the ACORN
# paper by Martel et al. published at SIGGRAPH 2021.
#
# Copyright (c) 2021 -- Télécom Paris (Élie Michel <elie.michel@telecom-paris.fr>)
#
# The MIT license:
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the “Software”), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# The Software is provided “as is”, without warranty of any kind, express or
# implied, including but not limited to the warranties of merchantability,
# fitness for a particular purpose and non-infringement. In no event shall the
# authors or copyright holders be liable for any claim, damages or other
# liability, whether in an action of contract, tort or otherwise, arising
# from, out of or in connection with the software or the use or other dealings
# in the Software.
from argparse import ArgumentParser
from datetime import datetime
import torch
parser = ArgumentParser(description=
'''Display info from a checkpoint created by run_acorn.py''')
# Basic arguments
parser.add_argument('checkpoint', type=str, help='filename of the checkpoint to display')
def main(args):
checkpoint = torch.load(args.checkpoint)
if "timestamp" in checkpoint:
dt = datetime.fromtimestamp(checkpoint["timestamp"])
checkpoint_datetime = dt.strftime("%m/%d/%Y, %H:%M:%S")
else:
checkpoint_datetime = "(unknown date time)"
cmd_args = checkpoint.get("cmd_args", {})
print(f"Checkpoint taken on {checkpoint_datetime}:")
for k, v in cmd_args.items():
print(f" - {k} = {v}")
encoder_state = checkpoint["model_state_dict"]["encoder"]
print(encoder_state['B'])
if __name__ == "__main__":
args = parser.parse_args()
main(args)
|
"""
Copyright (c) 2020 Cisco and/or its affiliates.
This software is licensed to you under the terms of the Cisco Sample
Code License, Version 1.1 (the "License"). You may obtain a copy of the
License at
https://developer.cisco.com/docs/licenses
All use of the material herein must be in accordance with the terms of
the License. All rights not expressly granted by the License are
reserved. Unless required by applicable law or agreed to separately in
writing, software distributed under the License is distributed on an "AS
IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
or implied.
"""
import urllib3
from virl2_client import ClientLibrary
import logging
import os, sys
# env is in parent directory
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import env
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format='%(message)s')
log = logging.getLogger(__name__)
# Read environment variables
cml_server_url = env.config['CML_SERVER_URL']
cml_username = env.config['CML_USERNAME']
cml_password = env.config['CML_PASSWORD']
LAB_TITLE = env.config['LAB_NAME']
IMAGE_DEFINITION = env.config['IMAGE_DEFINITION']
log.info("LOGGING INFO: Successfully read in the environment variables")
# Connect with the CML API
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
client = ClientLibrary(cml_server_url, cml_username, cml_password, ssl_verify=False, raise_for_auth_failure=True, allow_http=True)
log.info("LOGGING INFO: Successfully connected with CML through the API")
# Read in the config file
config_files = os.listdir(path='./config')
config_files = [file for file in config_files if ".txt" in file]
log.info("LOGGING INFO: Successfully read in the config files")
routers = []
for file in config_files:
routers.append(file[:-4])
# Create a new lab in CML
lab = client.create_lab(title=LAB_TITLE)
log.info("LOGGING INFO: Successfully created the lab in CML")
# Create the nodes in the lab
coordinates = [(0,0), (200, 0), (200,200), (0, 200)]
coordinates_counter = 0
for router in routers:
x, y = coordinates[coordinates_counter]
lab.create_node(label=router, node_definition='csr1000v', populate_interfaces=8, x=x, y=y)
coordinates_counter += 1
log.info("LOGGING INFO: Successfully created the nodes in the lab")
# Configure the nodes in the lab
for node in lab.nodes():
config = open(f"./config/{node.label}.txt", 'r').read()
node.config = config
node.image_definition = IMAGE_DEFINITION
log.info("LOGGING INFO: Successfully configured the nodes in the lab")
# Connect the nodes to each other
interface_pairs = [('010', '020'), ('020', '030'), ('030', '040'), ('040', '010')]
for intf1, intf2 in interface_pairs:
for interface in lab.interfaces():
if intf1 in interface.node.label and '2' in interface.label:
interface1 = interface
if intf2 in interface.node.label and '3' in interface.label:
interface2 = interface
lab.create_link(interface1, interface2)
log.info("LOGGING INFO: Successfully created links between the nodes")
#get lab testbed
pyats_testbed = lab.get_pyats_testbed()
# Write the YAML testbed out to a file
with open("lab_testbed.yaml", "w") as f:
f.write(pyats_testbed)
log.info("LOGGING INFO: Successfully obtained a testbed file")
|
from __future__ import print_function
# this is written to retrive airnow data concatenate and add to pandas array for usage
from builtins import zip
from builtins import object
from datetime import datetime
import pandas as pd
from numpy import NaN, array
class improve(object):
def __init__(self):
self.datestr = []
self.df = None
self.se_states = array(['AL', 'FL', 'GA', 'MS', 'NC', 'SC', 'TN', 'VA', 'WV'], dtype='|S2')
self.ne_states = array(['CT', 'DE', 'DC', 'ME', 'MD', 'MA', 'NH', 'NJ', 'NY', 'PA', 'RI', 'VT'], dtype='|S2')
self.nc_states = array(['IL', 'IN', 'IA', 'KY', 'MI', 'MN', 'MO', 'OH', 'WI'], dtype='|S2')
self.sc_states = array(['AR', 'LA', 'OK', 'TX'], dtype='|S2')
self.r_states = array(['AZ', 'CO', 'ID', 'KS', 'MT', 'NE', 'NV', 'NM', 'ND', 'SD', 'UT', 'WY'], dtype='|S2')
self.p_states = array(['CA', 'OR', 'WA'], dtype='|S2')
def open_file(self, fname, output=''):
""" This assumes that you have downloaded the data from
http://views.cira.colostate.edu/fed/DataWizard/Default.aspx
The data is the IMPROVE Aerosol dataset
Any number of sites
Parameters included are All
Fields include Dataset,Site,Date,Parameter,POC,Data_value,Unit,Latitude,Longitude,State,EPA Site Code
Options are delimited ',' data only and normalized skinny format
Parameters
----------
fname : type
Description of parameter `fname`.
output : type
Description of parameter `output` (the default is '').
Returns
-------
type
Description of returned object.
"""
self.df = pd.read_csv(fname, delimiter=',', parse_dates=[2], infer_datetime_format=True)
self.df.rename(columns={'EPACode': 'SCS'}, inplace=True)
self.df.rename(columns={'Value2': 'Obs'}, inplace=True)
self.df.rename(columns={'State': 'State_Name'}, inplace=True)
self.df.rename(columns={'ParamCode': 'Species'}, inplace=True)
self.df.rename(columns={'SiteCode': 'Site_Code'}, inplace=True)
self.df.rename(columns={'Unit': 'Units'}, inplace=True)
self.df.rename(columns={'Date': 'datetime'}, inplace=True)
self.df.drop('Dataset', axis=1, inplace=True)
print('Adding in some Meta-Data')
print('Calculating local time')
self.df = self.get_local_datetime(self.df)
self.df = self.df.copy().drop_duplicates()
self.df.dropna(subset=['Species'], inplace=True)
self.df.Species.loc[self.df.Species == 'MT'] = 'PM10'
self.df.Species.loc[self.df.Species == 'MF'] = 'PM2.5'
self.df.datetime = [datetime.strptime(i, '%Y%m%d') for i in self.df.datetime]
if output == '':
output = 'IMPROVE.hdf'
print('Outputing data to: ' + output)
self.df.Obs.loc[self.df.Obs < 0] = NaN
self.df.dropna(subset=['Obs'], inplace=True)
self.df.to_hdf(output, 'df', format='fixed', complevel=9, complib='zlib')
def load_hdf(self, fname, dates):
"""Short summary.
Parameters
----------
fname : type
Description of parameter `fname`.
dates : type
Description of parameter `dates`.
Returns
-------
type
Description of returned object.
"""
self.df = pd.read_hdf(fname)
self.get_date_range(self.dates)
def get_date_range(self, dates):
"""Short summary.
Parameters
----------
dates : type
Description of parameter `dates`.
Returns
-------
type
Description of returned object.
"""
self.dates = dates
con = (self.df.datetime >= dates[0]) & (self.df.datetime <= dates[-1])
self.df = self.df.loc[con]
def set_daterange(self, begin='', end=''):
"""Short summary.
Parameters
----------
begin : type
Description of parameter `begin` (the default is '').
end : type
Description of parameter `end` (the default is '').
Returns
-------
type
Description of returned object.
"""
dates = pd.date_range(start=begin, end=end, freq='H').values.astype('M8[s]').astype('O')
self.dates = dates
def get_local_datetime(self, df):
"""Short summary.
Parameters
----------
df : type
Description of parameter `df`.
Returns
-------
type
Description of returned object.
"""
import pytz
from numpy import unique
from tzwhere import tzwhere
tz = tzwhere.tzwhere(forceTZ=True, shapely=True)
df.dropna(subset=['Latitude', 'Longitude'], inplace=True)
lons, index = unique(df.Longitude.values, return_index=True)
lats = df.Latitude.values[index]
dates = df.datetime.values[index].astype('M8[s]').astype('O')
df['utcoffset'] = 0
for i, j, d in zip(lons, lats, dates):
l = tz.tzNameAt(j, i, forceTZ=True)
timezone = pytz.timezone(l)
n = d.replace(tzinfo=pytz.UTC)
r = d.replace(tzinfo=timezone)
rdst = timezone.normalize(r)
offset = (rdst.utcoffset()).total_seconds() // 3600
df['utcoffset'].loc[df.Longitude == i] = offset
df['datetime_local'] = df.datetime + pd.to_timedelta(df.utcoffset, 'H')
return df
|
import tyoudao
import tgoogle
f = open('dic.txt', encoding='utf-8')
fi = f.read()
f.close()
ls = eval(fi)
for li in ls[:3000-1385]:
try:
s1 = tgoogle.get(li[0])
s2 = tyoudao.get(li[0])
except KeyboardInterrupt:
quit()
except:
print('【错误】{}'.format(li[0]))
continue
s = '【【a1】】{}【【a2】】| 【【a3】】{}【【a4】】\n【【a5】】{}【【a6】】\n{}【【a7】】\n【【a8】】{}【【a9】】\n【【a10】】{}【【a11】】'.format(li[1], s2[0], s1[0], s2[1], s2[2], s1[1])
print(s)
f = open('english.txt', 'a+', encoding='utf-8')
f.write(s + '\n')
f.close()
|
import gzip
import json
import pickle
input_file = 'ner_wikiner/corpus/dev.spacy'
# with open(input_file, 'rb') as fp:
# TRAIN_DATA = json.load(fp)
# for example in TRAIN_DATA:
# print(example)
# exit()
with open(input_file, 'rb') as f:
TRAIN_DATA = json.load(f)
|
from .platform_list import PlatformListAPIView
from .platform_detail import PlatformDetail
from .platform_create import PlatformCreate
|
import numpy as np
import cv2
from Arduino import Arduino
import time
# Python-Arduino Command API
board = Arduino("9600", port="/dev/cu.usbmodem14301")
board.pinMode(13, "OUTPUT")
cap = cv2.VideoCapture(0)
# OpenCV API
while(True):
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if gray[200][200] < 50:
board.digitalWrite(13, "HIGH")
else:
board.digitalWrite(13, "LOW")
cv2.imshow('frame', gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
# -*- coding: utf-8 -*-
#
# MIT License
#
# Copyright (c) 2017-2018, Leo Moll
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -- Imports ------------------------------------------------
from __future__ import unicode_literals # ,absolute_import, division
# from future import standard_library
# from builtins import *
# standard_library.install_aliases()
import os
import re
import sys
import time
import urlparse
import datetime
import xbmcgui
import xbmcvfs
import xbmcplugin
import resources.lib.mvutils as mvutils
from contextlib import closing
from resources.lib.kodi.KodiAddon import KodiPlugin
from resources.lib.kodi.KodiUI import KodiBGDialog
from resources.lib.store import Store
from resources.lib.notifier import Notifier
from resources.lib.settings import Settings
from resources.lib.filmui import FilmUI
from resources.lib.channelui import ChannelUI
from resources.lib.initialui import InitialUI
from resources.lib.showui import ShowUI
from resources.lib.ttml2srt import ttml2srt
# -- Classes ------------------------------------------------
class MediathekView( KodiPlugin ):
def __init__( self ):
super( MediathekView, self ).__init__()
self.settings = Settings()
self.notifier = Notifier()
self.db = Store( self.getNewLogger( 'Store' ), self.notifier, self.settings )
def showMainMenu( self ):
# Search
self.addFolderItem( 30901, { 'mode': "search" } )
# Search all
self.addFolderItem( 30902, { 'mode': "searchall" } )
# Browse livestreams
self.addFolderItem( 30903, { 'mode': "livestreams" } )
# Browse recently added
self.addFolderItem( 30904, { 'mode': "recent", 'channel': 0 } )
# Browse recently added by channel
self.addFolderItem( 30905, { 'mode': "recentchannels" } )
# Browse by Initial->Show
self.addFolderItem( 30906, { 'mode': "initial", 'channel': 0 } )
# Browse by Channel->Initial->Shows
self.addFolderItem( 30907, { 'mode': "channels" } )
# Database Information
self.addActionItem( 30908, { 'mode': "action-dbinfo" } )
# Manual database update
if self.settings.updmode == 1 or self.settings.updmode == 2:
self.addActionItem( 30909, { 'mode': "action-dbupdate" } )
self.endOfDirectory()
self._check_outdate()
def showSearch( self, extendedsearch = False ):
settingid = 'lastsearch2' if extendedsearch is True else 'lastsearch1'
headingid = 30902 if extendedsearch is True else 30901
# are we returning from playback ?
searchText = self.addon.getSetting( settingid )
if len( searchText ) > 0:
# restore previous search
self.db.Search( searchText, FilmUI( self ), extendedsearch )
else:
# enter search term
searchText = self.notifier.GetEnteredText( '', headingid )
if len( searchText ) > 2:
if self.db.Search( searchText, FilmUI( self ), extendedsearch ) > 0:
self.addon.setSetting( settingid, searchText )
else:
self.info( 'The following ERROR can be ignored. It is caused by the architecture of the Kodi Plugin Engine' )
self.endOfDirectory( False, cacheToDisc = True )
# self.showMainMenu()
def showDbInfo( self ):
info = self.db.GetStatus()
heading = self.language( 30907 )
infostr = self.language( {
'NONE': 30941,
'UNINIT': 30942,
'IDLE': 30943,
'UPDATING': 30944,
'ABORTED': 30945
}.get( info['status'], 30941 ) )
infostr = self.language( 30965 ) % infostr
totinfo = self.language( 30971 ) % (
info['tot_chn'],
info['tot_shw'],
info['tot_mov']
)
updatetype = self.language( 30972 if info['fullupdate'] > 0 else 30973 )
if info['status'] == 'UPDATING' and info['filmupdate'] > 0:
updinfo = self.language( 30967 ) % (
updatetype,
datetime.datetime.fromtimestamp( info['filmupdate'] ).strftime( '%Y-%m-%d %H:%M:%S' ),
info['add_chn'],
info['add_shw'],
info['add_mov']
)
elif info['status'] == 'UPDATING':
updinfo = self.language( 30968 ) % (
updatetype,
info['add_chn'],
info['add_shw'],
info['add_mov']
)
elif info['lastupdate'] > 0 and info['filmupdate'] > 0:
updinfo = self.language( 30969 ) % (
updatetype,
datetime.datetime.fromtimestamp( info['lastupdate'] ).strftime( '%Y-%m-%d %H:%M:%S' ),
datetime.datetime.fromtimestamp( info['filmupdate'] ).strftime( '%Y-%m-%d %H:%M:%S' ),
info['add_chn'],
info['add_shw'],
info['add_mov'],
info['del_chn'],
info['del_shw'],
info['del_mov']
)
elif info['lastupdate'] > 0:
updinfo = self.language( 30970 ) % (
updatetype,
datetime.datetime.fromtimestamp( info['lastupdate'] ).strftime( '%Y-%m-%d %H:%M:%S' ),
info['add_chn'],
info['add_shw'],
info['add_mov'],
info['del_chn'],
info['del_shw'],
info['del_mov']
)
else:
updinfo = self.language( 30966 )
xbmcgui.Dialog().textviewer(
heading,
infostr + '\n\n' +
totinfo + '\n\n' +
updinfo
)
def doDownloadFilm( self, filmid, quality ):
if self.settings.downloadpath:
film = self.db.RetrieveFilmInfo( filmid )
if film is None:
# film not found - should never happen
return
# check if the download path is reachable
if not xbmcvfs.exists( self.settings.downloadpath ):
self.notifier.ShowError( self.language( 30952 ), self.language( 30979 ) )
return
# get the best url
if quality == '0' and film.url_video_sd:
videourl = film.url_video_sd
elif quality == '2' and film.url_video_hd:
videourl = film.url_video_hd
else:
videourl = film.url_video
# prepare names
showname = mvutils.cleanup_filename( film.show )[:64]
filestem = mvutils.cleanup_filename( film.title )[:64]
extension = os.path.splitext( videourl )[1]
if not extension:
extension = u'.mp4'
if not filestem:
filestem = u'Film-{}'.format( film.id )
if not showname:
showname = filestem
# prepare download directory and determine episode number
dirname = self.settings.downloadpath + showname + '/'
episode = 1
if xbmcvfs.exists( dirname ):
( _, epfiles, ) = xbmcvfs.listdir( dirname )
for epfile in epfiles:
match = re.search( '^.* [eE][pP]([0-9]*)\.[^/]*$', epfile )
if match and len( match.groups() ) > 0:
if episode <= int( match.group(1) ):
episode = int( match.group(1) ) + 1
else:
xbmcvfs.mkdir( dirname )
# prepare resulting filenames
fileepi = filestem + u' - EP%04d' % episode
movname = dirname + fileepi + extension
srtname = dirname + fileepi + u'.srt'
ttmname = dirname + fileepi + u'.ttml'
nfoname = dirname + fileepi + u'.nfo'
# download video
bgd = KodiBGDialog()
bgd.Create( self.language( 30974 ), fileepi + extension )
try:
bgd.Update( 0 )
mvutils.url_retrieve_vfs( videourl, movname, bgd.UrlRetrieveHook )
bgd.Close()
self.notifier.ShowNotification( 30960, self.language( 30976 ).format( videourl ) )
except Exception as err:
bgd.Close()
self.error( 'Failure downloading {}: {}', videourl, err )
self.notifier.ShowError( 30952, self.language( 30975 ).format( videourl, err ) )
# download subtitles
if film.url_sub:
bgd = KodiBGDialog()
bgd.Create( 30978, fileepi + u'.ttml' )
try:
bgd.Update( 0 )
mvutils.url_retrieve_vfs( film.url_sub, ttmname, bgd.UrlRetrieveHook )
try:
ttml2srt( xbmcvfs.File( ttmname, 'r' ), xbmcvfs.File( srtname, 'w' ) )
except Exception as err:
self.info( 'Failed to convert to srt: {}', err )
bgd.Close()
except Exception as err:
bgd.Close()
self.error( 'Failure downloading {}: {}', film.url_sub, err )
# create NFO Files
self._make_nfo_files( film, episode, dirname, nfoname, videourl )
else:
self.notifier.ShowError( 30952, 30958 )
def doEnqueueFilm( self, filmid ):
self.info( 'Enqueue {}', filmid )
def _check_outdate( self, maxage = 172800 ):
if self.settings.updmode != 1 and self.settings.updmode != 2:
# no check with update disabled or update automatic
return
if self.db is None:
# should never happen
self.notifier.ShowOutdatedUnknown()
return
status = self.db.GetStatus()
if status['status'] == 'NONE' or status['status'] == 'UNINIT':
# should never happen
self.notifier.ShowOutdatedUnknown()
return
elif status['status'] == 'UPDATING':
# great... we are updating. nuthin to show
return
# lets check how old we are
tsnow = int( time.time() )
tsold = int( status['lastupdate'] )
if tsnow - tsold > maxage:
self.notifier.ShowOutdatedKnown( status )
def _make_nfo_files( self, film, episode, dirname, filename, videourl ):
# create NFO files
if not xbmcvfs.exists( dirname + 'tvshow.nfo' ):
try:
with closing( xbmcvfs.File( dirname + 'tvshow.nfo', 'w' ) ) as file:
file.write( b'<tvshow>\n' )
file.write( b'<id></id>\n' )
file.write( bytearray( '\t<title>{}</title>\n'.format( film.show ), 'utf-8' ) )
file.write( bytearray( '\t<sorttitle>{}</sorttitle>\n'.format( film.show ), 'utf-8' ) )
# TODO: file.write( bytearray( '\t<year>{}</year>\n'.format( 2018 ), 'utf-8' ) )
file.write( bytearray( '\t<studio>{}</studio>\n'.format( film.channel ), 'utf-8' ) )
file.write( b'</tvshow>\n' )
except Exception as err:
self.error( 'Failure creating show NFO file for {}: {}', videourl, err )
try:
with closing( xbmcvfs.File( filename, 'w' ) ) as file:
file.write( b'<episodedetails>\n' )
file.write( bytearray( '\t<title>{}</title>\n'.format( film.title ), 'utf-8' ) )
file.write( b'\t<season>1</season>\n' )
file.write( b'\t<autonumber>1</autonumber>\n' )
file.write( bytearray( '\t<episode>{}</episode>\n'.format( episode ), 'utf-8' ) )
file.write( bytearray( '\t<showtitle>{}</showtitle>\n'.format( film.show ), 'utf-8' ) )
file.write( bytearray( '\t<plot>{}</plot>\n'.format( film.description ), 'utf-8' ) )
file.write( bytearray( '\t<aired>{}</aired>\n'.format( film.aired ), 'utf-8' ) )
if film.seconds > 60:
file.write( bytearray( '\t<runtime>{}</runtime>\n'.format( int( film.seconds / 60 ) ), 'utf-8' ) )
file.write( bytearray( '\t<studio>{}</studio\n'.format( film.channel ), 'utf-8' ) )
file.write( b'</episodedetails>\n' )
except Exception as err:
self.error( 'Failure creating episode NFO file for {}: {}', videourl, err )
def Init( self ):
self.args = urlparse.parse_qs( sys.argv[2][1:] )
self.db.Init()
if self.settings.HandleFirstRun():
# TODO: Implement Issue #16
pass
def Do( self ):
# save last activity timestamp
self.settings.ResetUserActivity()
# process operation
mode = self.args.get( 'mode', None )
if mode is None:
self.showMainMenu()
elif mode[0] == 'search':
self.showSearch()
elif mode[0] == 'searchall':
self.showSearch( extendedsearch = True )
elif mode[0] == 'livestreams':
self.db.GetLiveStreams( FilmUI( self, [ xbmcplugin.SORT_METHOD_LABEL ] ) )
elif mode[0] == 'recent':
channel = self.args.get( 'channel', [0] )
self.db.GetRecents( channel[0], FilmUI( self ) )
elif mode[0] == 'recentchannels':
self.db.GetRecentChannels( ChannelUI( self, nextdir = 'recent' ) )
elif mode[0] == 'channels':
self.db.GetChannels( ChannelUI( self, nextdir = 'shows' ) )
elif mode[0] == 'action-dbinfo':
self.showDbInfo()
elif mode[0] == 'action-dbupdate':
self.settings.TriggerUpdate()
self.notifier.ShowNotification( 30963, 30964, time = 10000 )
elif mode[0] == 'initial':
channel = self.args.get( 'channel', [0] )
self.db.GetInitials( channel[0], InitialUI( self ) )
elif mode[0] == 'shows':
channel = self.args.get( 'channel', [0] )
initial = self.args.get( 'initial', [None] )
self.db.GetShows( channel[0], initial[0], ShowUI( self ) )
elif mode[0] == 'films':
show = self.args.get( 'show', [0] )
self.db.GetFilms( show[0], FilmUI( self ) )
elif mode[0] == 'download':
filmid = self.args.get( 'id', [0] )
quality = self.args.get( 'quality', [1] )
self.doDownloadFilm( filmid[0], quality[0] )
elif mode[0] == 'enqueue':
self.doEnqueueFilm( self.args.get( 'id', [0] )[0] )
# cleanup saved searches
if mode is None or mode[0] != 'search':
self.addon.setSetting( 'lastsearch1', '' )
if mode is None or mode[0] != 'searchall':
self.addon.setSetting( 'lastsearch2', '' )
def Exit( self ):
self.db.Exit()
# -- Main Code ----------------------------------------------
if __name__ == '__main__':
addon = MediathekView()
addon.Init()
addon.Do()
addon.Exit()
del addon
|
"""Resources module."""
import abc
from typing import TypeVar, Generic, Optional
T = TypeVar('T')
class Resource(Generic[T], metaclass=abc.ABCMeta):
@abc.abstractmethod
def init(self, *args, **kwargs) -> Optional[T]:
...
def shutdown(self, resource: Optional[T]) -> None:
...
class AsyncResource(Generic[T], metaclass=abc.ABCMeta):
@abc.abstractmethod
async def init(self, *args, **kwargs) -> Optional[T]:
...
async def shutdown(self, resource: Optional[T]) -> None:
...
|
from django.contrib.gis.db.models.sql.conversion import AreaField, DistanceField, GeomField, GMLField
__all__ = [
'AreaField', 'DistanceField', 'GeomField', 'GMLField'
]
|
# 4K
you.shape = 'polygon'
color.type = 'mono'
for tear in me.face:
me.pixel /= 2
print('A required audio driver is missing.')
me.knock('floppy disk', target=screen)
screen.resolution = 3840 * 2160
|
# -*- coding: utf-8 -*-
""" Test printing of scalar types.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import assert_
class A(object):
pass
class B(A, np.float64):
pass
class C(B):
pass
class D(C, B):
pass
class B0(np.float64, A):
pass
class C0(B0):
pass
class TestInherit(object):
def test_init(self):
x = B(1.0)
assert_(str(x) == '1.0')
y = C(2.0)
assert_(str(y) == '2.0')
z = D(3.0)
assert_(str(z) == '3.0')
def test_init2(self):
x = B0(1.0)
assert_(str(x) == '1.0')
y = C0(2.0)
assert_(str(y) == '2.0')
class TestCharacter(object):
def test_char_radd(self):
# GH issue 9620, reached gentype_add and raise TypeError
np_s = np.string_('abc')
np_u = np.unicode_('abc')
s = b'def'
u = u'def'
assert_(np_s.__radd__(np_s) is NotImplemented)
assert_(np_s.__radd__(np_u) is NotImplemented)
assert_(np_s.__radd__(s) is NotImplemented)
assert_(np_s.__radd__(u) is NotImplemented)
assert_(np_u.__radd__(np_s) is NotImplemented)
assert_(np_u.__radd__(np_u) is NotImplemented)
assert_(np_u.__radd__(s) is NotImplemented)
assert_(np_u.__radd__(u) is NotImplemented)
assert_(s + np_s == b'defabc')
assert_(u + np_u == u'defabc')
class Mystr(str, np.generic):
# would segfault
pass
ret = s + Mystr('abc')
assert_(type(ret) is type(s))
def test_char_repeat(self):
np_s = np.string_('abc')
np_u = np.unicode_('abc')
res_s = b'abc' * 5
res_u = u'abc' * 5
assert_(np_s * 5 == res_s)
assert_(np_u * 5 == res_u)
|
from Observer import observer
import time
class weatherMonitor(observer):
def __init__(self, client, location):
self.client = client
self.location = location
self.latest_timestamp = 0
self.data =[]
self.content = ''
def Update(self):
pass
def set_timestamp(self):
self.latest_timestamp = time.time()
|
# Copyright 2020 The GenoML Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import pandas as pd
from genoml.continuous import supervised
def main(run_prefix, max_iter, cv_count):
# TUNING
# Create a dialogue with the user
print("Here is some basic info on the command you are about to run.")
print("CLI argument info...")
print(f"Working with the dataset and best model corresponding to prefix {run_prefix} the timestamp from the merge is the prefix in most cases.")
print(f"Your maximum number of tuning iterations is {max_iter} and if you are concerned about runtime, make this number smaller.")
print(f"You are running {cv_count} rounds of cross-validation, and again... if you are concerned about runtime, make this number smaller.")
print("Give credit where credit is due, for this stage of analysis we use code from the great contributors to python packages: argparse, xgboost, sklearn, pandas, numpy, time, matplotlib and seaborn.")
print("As a note, in all exported probabilities and other graphics, case status is treated as a 0 or 1, with 1 representing a positive case.")
print("")
infile_h5 = run_prefix + ".dataForML.h5"
df = pd.read_hdf(infile_h5, key = "dataForML")
y_tune = df.PHENO
X_tune = df.drop(columns=['PHENO'])
IDs_tune = X_tune.ID
X_tune = X_tune.drop(columns=['ID'])
best_algo_name_in = run_prefix + '.best_algorithm.txt'
best_algo_df = pd.read_csv(best_algo_name_in, header=None, index_col=False)
best_algo = str(best_algo_df.iloc[0,0])
# Communicate to the user the best identified algorithm
print(f"From previous analyses in the training phase, we've determined that the best algorithm for this application is {best_algo}... so let's tune it up and see what gains we can make!")
# Tuning
## This calls on the functions made in the tune class (tuning.py) at the genoml.continuous.supervised
model_tune = supervised.tune(df, run_prefix, max_iter, cv_count)
model_tune.select_tuning_parameters() # Returns algo, hyperparameters, and scoring_metric
model_tune.apply_tuning_parameters() # Randomized search with CV to tune
model_tune.report_tune() # Summary of the top 10 iterations of the hyperparameter tune
model_tune.summarize_tune() # Summary of the cross-validation
model_tune.compare_performance() # Compares tuned performance to baseline to
model_tune.export_tuned_data() # Export the newly tuned predictions
model_tune.export_tune_regression() # Export the tuned and fitted regression model
print("")
print("End of tuning stage with GenoML.")
print("")
|
from contextlib import contextmanager
from logging import getLogger
from pathlib import Path, PurePath
from typing import Dict, Iterator, Union, overload, Set, List
from modularconfig.errors import ConfigNotFoundError, ConfigFileNotFoundError
from modularconfig.loaders import load_file
logger = getLogger(__name__)
# loaded objects:
# a tree mocking the filesystem starting from _common_configs_path
_common_configs_path: Union[Path, None] = None
_configs: Union[Dict[str, object], None] = None
_loaded_paths: Set[Path] = set()
# config base directory
_config_directory: Path = Path.cwd()
# --- Path Management ---
def _split_real_file(config: PurePath) -> Path:
"""Return the file or directory containing the config (first part of path) and the remaining attributes
>>> from tempfile import NamedTemporaryFile
>>> with NamedTemporaryFile() as fil:
... print(_split_real_file(PurePath(fil.name, "foo/bar")) == Path(fil.name))
True
if the path refere to a directory the directory is loaded
>>> from tempfile import TemporaryDirectory
>>> with TemporaryDirectory() as dir:
... print(_split_real_file(PurePath(dir)) == Path(dir))
True
Directories can't contain values if not inside files:
>>> with TemporaryDirectory() as dir:
... try:
... _split_real_file(PurePath(dir, "foo/bar"))
... except ConfigFileNotFoundError:
... print(True)
True
"""
existing_file = Path(config)
if existing_file.exists() and existing_file.is_dir():
return existing_file
while not existing_file.exists(): # until we don't find a true file, or directory
existing_file = existing_file.parent
if existing_file.is_file():
return existing_file
raise ConfigFileNotFoundError(f"{config} do not refer to any file")
def _split_config_attributes(config: PurePath) -> PurePath:
"""Return the attributes from _common_configs_path
>>> _split_config_attributes(_common_configs_path.joinpath("foo/bar")) #doctest: +SKIP
PurePosixPath('foo/bar')
"""
return config.relative_to(_common_configs_path)
def _relative_to_config_directory(config):
config = _config_directory.joinpath(config).resolve() # make it relative to the prefix (still permit absolutes)
return config
@overload
def _common_path(path: Path, *paths: Path) -> Path:
...
@overload
def _common_path(path: PurePath, *paths: PurePath) -> PurePath:
...
def _common_path(path, *paths):
"""Find the longest common path
>>> _common_path(PurePath("/etc/base"), PurePath("/etc/common"))
PurePosixPath('/etc')
"""
common_path = path.anchor
if not all(common_path == other_path.anchor for other_path in paths):
raise OSError("The paths have different anchors")
common_path = PurePath(common_path)
for i, part in enumerate(path.parts):
if not all(other_path.parts[i] == part for other_path in paths):
break # we come to the splitting
common_path /= part # add to common path
assert all(common_path in other_path.parents for other_path in paths), \
"Found common path is not parent of some path"
return common_path
def _rebase(new_common_config_path: Path) -> None:
"""Change the _common_config_path and adapt _config in accord to the new base.
Can only go up in the directory tree"""
global _configs, _common_configs_path
assert new_common_config_path in _common_configs_path.parents, "Rebase can go only up in the directory tree"
while _common_configs_path != new_common_config_path:
_configs = {
_common_configs_path.name: _configs
}
_common_configs_path = _common_configs_path.parent
# --- File Loading ---
def _load_path(config_file: Path, reload: bool):
"""Load (or reload) the file/directory in the memory
>>> import tempfile; tmp_file = tempfile.mktemp()
>>> with open(tmp_file, "w") as out:
... out.write('{"answer": 42}')
14
>>> get(tmp_file)["answer"]
42
>>> with open(tmp_file, "w") as out:
... out.write('{"answer": 54}')
14
>>> _load_path(Path(tmp_file), reload=False)
>>> get(tmp_file)["answer"]
42
>>> _load_path(Path(tmp_file), reload=True)
>>> get(tmp_file)["answer"]
54
"""
global _loaded_paths, _common_configs_path, _configs
def recursive_load_path(config_file: Path):
"""Recursive reload all files"""
if (not reload) and (config_file in _loaded_paths):
return # this path is already loaded
config_attributes = _split_config_attributes(config_file)
if config_file.is_file():
with open(config_file, "br") as fil:
data = load_file(fil)
_set_attr(_configs, config_attributes, data)
else:
assert config_file.is_dir(), "There are existing paths that are neither files or directories?"
# _set_attr(_configs, config_attributes, {}) # create empty dir
# no empty dir is created, they will be done if a file is generated inside their sub-tree
for child in config_file.iterdir():
recursive_load_path(child) # recursive load
assert config_file.exists(), "This function should be called only on existing paths"
if _configs is None: # first loading
if config_file.is_dir():
_common_configs_path = config_file
else:
_common_configs_path = config_file.parent # the path is always a directory, so the file can be any file
_configs = {}
elif _common_configs_path not in config_file.parents:
_rebase(_common_path(config_file, _common_configs_path)) # moving so it can include the new configs
if (not reload) and _loaded_paths.intersection(config_file.parents):
return # is already inside a loaded path (one of his parents was loaded)
recursive_load_path(config_file)
_loaded_paths = set(
path for path in _loaded_paths
if path not in config_file.parents # select only the one that wasn't loaded
)
_loaded_paths.add(config_file) # signing this path as loaded
# --- Recursive Get and Set ---
def _get_attr(obj: object, attrs: PurePath):
"""Recursively get attributes from an object.
>>> dct = {"baz":{"bar":42}}
>>> _get_attr(dct, PurePath("baz/bar")) # equivalent to dct["baz"]["bar"]
42
KeyError is raised if an attribute isn't found
>>> _get_attr(dct, PurePath("baz/foo/bac"))
Traceback (most recent call last):
...
KeyError: PurePosixPath('baz/foo')
"""
def _recursive_get(found_obj: object, remaining_attrs: Iterator[str]):
try:
attr = next(remaining_attrs) # the attribute we need to open at this level at this level
except StopIteration:
return found_obj # we got to the end of the path
child_obj = found_obj[attr]
try:
return _recursive_get(child_obj, remaining_attrs)
except LookupError as e: # an attribute wasn't found
e.args = (attr,) + e.args # adding the full path to the exception
raise
try:
return _recursive_get(obj, iter(attrs.parts))
except LookupError as e:
e.args = (PurePath(*e.args),)
raise
def _set_attr(obj: object, attrs: PurePath, value: object):
"""Recursively set attributes to an object.
>>> dct = {"baz":{"bar":42}}
>>> _set_attr(dct, PurePath("baz/bar"), 12) # equivalent to dct["baz"]["bar"] = 12
>>> dct["baz"]["bar"]
12
KeyError is raised if an attribute that is not the last isn't found
>>> _get_attr(dct, PurePath("baz/foo/bac"))
Traceback (most recent call last):
...
KeyError: PurePosixPath('baz/foo')
"""
def _recursive_set(found_obj: object, remaining_attrs: List[str]):
attr = remaining_attrs.pop(0) # the attribute we need to open at this level at this level
if len(remaining_attrs) == 0: # we arrived at the end
found_obj[attr] = value
return
if attr not in found_obj:
found_obj[attr] = {} # creating parent dirs as needed
child_obj = found_obj[attr]
try:
_recursive_set(child_obj, remaining_attrs)
except LookupError as e: # an attribute wasn't found
e.args = (attr,) + e.args # adding the full path to the exception
raise
try:
_recursive_set(obj, list(attrs.parts))
except LookupError as e:
e.args = (PurePath(*e.args),)
raise
# --- End User Entry Points ---
@contextmanager
def using_config_directory(relative_config_directory: Union[str, PurePath]):
"""Temporanely set a new config directory. Can be relative to the old"""
global _config_directory
old_dir = _config_directory
set_config_directory(relative_config_directory)
yield
_config_directory = old_dir # returning back
def set_config_directory(relative_config_directory: Union[str, PurePath]):
"""Change the config directory. Can be relative to the old"""
global _config_directory
_config_directory = _relative_to_config_directory(relative_config_directory)
def get_config_directory():
"""Return the config directory"""
return _config_directory
def ensure(config_file: Union[Path, str, bytes], reload: bool = False):
"""Load (or reload) the file/directory in the memory
>>> import tempfile; tmp_file = tempfile.mktemp()
>>> with open(tmp_file, "w") as out:
... out.write('{"answer": 42}')
14
>>> get(tmp_file)["answer"]
42
>>> with open(tmp_file, "w") as out:
... out.write('{"answer": 54}')
14
>>> ensure(tmp_file)
>>> get(tmp_file)["answer"]
42
>>> ensure(tmp_file, reload=True)
>>> get(tmp_file)["answer"]
54
"""
_load_path(_relative_to_config_directory(config_file), reload)
def get(config: Union[str, PurePath]):
"""Return the requested config
>>> from tempfile import NamedTemporaryFile; from json import dump; from os import remove
>>> with NamedTemporaryFile(mode="w", delete=False) as fil:
... dump({"bar":"foo"}, fil)
... filename = fil.name
>>> get(PurePath(filename, "./bar"))
'foo'
>>> remove(filename)
"""
config = _relative_to_config_directory(config)
_load_path(_split_real_file(config), reload=False) # ensure the file is loaded
try:
return _get_attr(_configs, _split_config_attributes(config))
except LookupError as e:
raise ConfigNotFoundError(f"Can't find the config {e.args[0]}") from e
|
from ishuhui import create_app
app = create_app('env')
if __name__ == '__main__':
app.run(host=app.config['HOST'], port=int(app.config['PORT']), debug=True)
|
import jsonpickle
# import simplejson
load_success = jsonpickle.load_backend('json')
print (load_success)
jsonpickle.set_preferred_backend('json')
json_path_in = "examples/process_design_example/frame_ortho_lap_joints_no_rfl_process.json"
json_path_out = "examples/process_design_example/frame_ortho_lap_joints_no_rfl_prepathplan.json"
# Read process
f = open(json_path_in, 'r')
json_str = f.read()
print ("json_str len:" , len(json_str))
data = jsonpickle.decode(json_str, keys=True)
f.close()
data._clamps = None
data._grippers = None
data.pickup_station = None
f = open(json_path_out, 'w')
json_str = jsonpickle.encode(data, keys=True) # Somehow iron python refuse to deserialize if make_refs = True
print ("json_str len:" , len(json_str))
f.write(json_str)
f.close()
data2 = jsonpickle.decode(json_str, keys=True)
print (data2)
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/markaligbe/Documents/PlatformIO/Projects/leviathans_breath_pio/gui/gui.ui'
#
# Created by: PyQt5 UI code generator 5.14.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_mw_main(object):
def setupUi(self, mw_main):
mw_main.setObjectName("mw_main")
mw_main.resize(997, 616)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("logo.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
mw_main.setWindowIcon(icon)
self.centralwidget = QtWidgets.QWidget(mw_main)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout_2 = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout_2.setObjectName("gridLayout_2")
self.sp_main = QtWidgets.QSplitter(self.centralwidget)
self.sp_main.setOrientation(QtCore.Qt.Horizontal)
self.sp_main.setObjectName("sp_main")
self.sa_chart = QtWidgets.QScrollArea(self.sp_main)
self.sa_chart.setWidgetResizable(True)
self.sa_chart.setObjectName("sa_chart")
self.sawc_chart = QtWidgets.QWidget()
self.sawc_chart.setGeometry(QtCore.QRect(0, -112, 681, 669))
self.sawc_chart.setObjectName("sawc_chart")
self.gl_sawc_chart = QtWidgets.QGridLayout(self.sawc_chart)
self.gl_sawc_chart.setContentsMargins(0, 0, 0, 0)
self.gl_sawc_chart.setSpacing(0)
self.gl_sawc_chart.setObjectName("gl_sawc_chart")
self.gb_fan_curve_editor = QtWidgets.QGroupBox(self.sawc_chart)
self.gb_fan_curve_editor.setCheckable(True)
self.gb_fan_curve_editor.setObjectName("gb_fan_curve_editor")
self.gl_fan_curve_editor = QtWidgets.QGridLayout(self.gb_fan_curve_editor)
self.gl_fan_curve_editor.setContentsMargins(0, 0, 0, 0)
self.gl_fan_curve_editor.setSpacing(0)
self.gl_fan_curve_editor.setObjectName("gl_fan_curve_editor")
self.f_fan_curve_editor_anim = QtWidgets.QFrame(self.gb_fan_curve_editor)
self.f_fan_curve_editor_anim.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.f_fan_curve_editor_anim.setFrameShadow(QtWidgets.QFrame.Raised)
self.f_fan_curve_editor_anim.setObjectName("f_fan_curve_editor_anim")
self.gl_fan_curve_editor_anim = QtWidgets.QGridLayout(self.f_fan_curve_editor_anim)
self.gl_fan_curve_editor_anim.setContentsMargins(0, 0, 0, 0)
self.gl_fan_curve_editor_anim.setSpacing(0)
self.gl_fan_curve_editor_anim.setObjectName("gl_fan_curve_editor_anim")
self.gb_fan_curve_options = QtWidgets.QGroupBox(self.f_fan_curve_editor_anim)
self.gb_fan_curve_options.setCheckable(True)
self.gb_fan_curve_options.setObjectName("gb_fan_curve_options")
self.formLayout = QtWidgets.QFormLayout(self.gb_fan_curve_options)
self.formLayout.setContentsMargins(0, 0, 0, 0)
self.formLayout.setSpacing(0)
self.formLayout.setObjectName("formLayout")
self.f_fan_curve_options_anim = QtWidgets.QFrame(self.gb_fan_curve_options)
self.f_fan_curve_options_anim.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.f_fan_curve_options_anim.setFrameShadow(QtWidgets.QFrame.Raised)
self.f_fan_curve_options_anim.setObjectName("f_fan_curve_options_anim")
self.verticalLayout = QtWidgets.QVBoxLayout(self.f_fan_curve_options_anim)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setObjectName("verticalLayout")
self.f_fan_options_preset = QtWidgets.QFrame(self.f_fan_curve_options_anim)
self.f_fan_options_preset.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.f_fan_options_preset.setFrameShadow(QtWidgets.QFrame.Raised)
self.f_fan_options_preset.setObjectName("f_fan_options_preset")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.f_fan_options_preset)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setSpacing(0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.label_6 = QtWidgets.QLabel(self.f_fan_options_preset)
self.label_6.setObjectName("label_6")
self.horizontalLayout.addWidget(self.label_6)
self.cb_fan_options_preset = QtWidgets.QComboBox(self.f_fan_options_preset)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.cb_fan_options_preset.sizePolicy().hasHeightForWidth())
self.cb_fan_options_preset.setSizePolicy(sizePolicy)
self.cb_fan_options_preset.setEditable(True)
self.cb_fan_options_preset.setInsertPolicy(QtWidgets.QComboBox.InsertAlphabetically)
self.cb_fan_options_preset.setObjectName("cb_fan_options_preset")
self.horizontalLayout.addWidget(self.cb_fan_options_preset)
self.pb_fan_options_save = QtWidgets.QPushButton(self.f_fan_options_preset)
self.pb_fan_options_save.setObjectName("pb_fan_options_save")
self.horizontalLayout.addWidget(self.pb_fan_options_save)
self.pb_fan_options_load = QtWidgets.QPushButton(self.f_fan_options_preset)
self.pb_fan_options_load.setObjectName("pb_fan_options_load")
self.horizontalLayout.addWidget(self.pb_fan_options_load)
self.pb_fan_options_delete = QtWidgets.QPushButton(self.f_fan_options_preset)
self.pb_fan_options_delete.setObjectName("pb_fan_options_delete")
self.horizontalLayout.addWidget(self.pb_fan_options_delete)
self.verticalLayout.addWidget(self.f_fan_options_preset)
self.f_fan_curve_temperature_source = QtWidgets.QFrame(self.f_fan_curve_options_anim)
self.f_fan_curve_temperature_source.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.f_fan_curve_temperature_source.setFrameShadow(QtWidgets.QFrame.Raised)
self.f_fan_curve_temperature_source.setObjectName("f_fan_curve_temperature_source")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout(self.f_fan_curve_temperature_source)
self.horizontalLayout_3.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_3.setSpacing(0)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.label_4 = QtWidgets.QLabel(self.f_fan_curve_temperature_source)
self.label_4.setObjectName("label_4")
self.horizontalLayout_3.addWidget(self.label_4)
self.cb_fan_curve_temperature_source_selection = QtWidgets.QComboBox(self.f_fan_curve_temperature_source)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.cb_fan_curve_temperature_source_selection.sizePolicy().hasHeightForWidth())
self.cb_fan_curve_temperature_source_selection.setSizePolicy(sizePolicy)
self.cb_fan_curve_temperature_source_selection.setEditable(False)
self.cb_fan_curve_temperature_source_selection.setInsertPolicy(QtWidgets.QComboBox.NoInsert)
self.cb_fan_curve_temperature_source_selection.setObjectName("cb_fan_curve_temperature_source_selection")
self.horizontalLayout_3.addWidget(self.cb_fan_curve_temperature_source_selection)
self.verticalLayout.addWidget(self.f_fan_curve_temperature_source)
self.f_pwm_controlled = QtWidgets.QFrame(self.f_fan_curve_options_anim)
self.f_pwm_controlled.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.f_pwm_controlled.setFrameShadow(QtWidgets.QFrame.Raised)
self.f_pwm_controlled.setObjectName("f_pwm_controlled")
self.formLayout_4 = QtWidgets.QFormLayout(self.f_pwm_controlled)
self.formLayout_4.setFormAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.formLayout_4.setContentsMargins(0, 0, 0, 0)
self.formLayout_4.setSpacing(0)
self.formLayout_4.setObjectName("formLayout_4")
self.cb_pwm_controlled = QtWidgets.QCheckBox(self.f_pwm_controlled)
self.cb_pwm_controlled.setObjectName("cb_pwm_controlled")
self.formLayout_4.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.cb_pwm_controlled)
self.verticalLayout.addWidget(self.f_pwm_controlled)
self.formLayout.setWidget(0, QtWidgets.QFormLayout.SpanningRole, self.f_fan_curve_options_anim)
self.gl_fan_curve_editor_anim.addWidget(self.gb_fan_curve_options, 2, 0, 1, 1)
self.f_fan_curve_ph = QtWidgets.QFrame(self.f_fan_curve_editor_anim)
self.f_fan_curve_ph.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.f_fan_curve_ph.setFrameShadow(QtWidgets.QFrame.Raised)
self.f_fan_curve_ph.setObjectName("f_fan_curve_ph")
self.gl_fan_curve_ph = QtWidgets.QGridLayout(self.f_fan_curve_ph)
self.gl_fan_curve_ph.setContentsMargins(0, 0, 0, 0)
self.gl_fan_curve_ph.setSpacing(0)
self.gl_fan_curve_ph.setObjectName("gl_fan_curve_ph")
self.gl_fan_curve_editor_anim.addWidget(self.f_fan_curve_ph, 1, 0, 1, 1)
self.cb_fan_curve_selection = QtWidgets.QComboBox(self.f_fan_curve_editor_anim)
self.cb_fan_curve_selection.setEditable(True)
self.cb_fan_curve_selection.setInsertPolicy(QtWidgets.QComboBox.NoInsert)
self.cb_fan_curve_selection.setObjectName("cb_fan_curve_selection")
self.gl_fan_curve_editor_anim.addWidget(self.cb_fan_curve_selection, 0, 0, 1, 1)
self.gl_fan_curve_editor.addWidget(self.f_fan_curve_editor_anim, 0, 0, 1, 1)
self.gl_sawc_chart.addWidget(self.gb_fan_curve_editor, 1, 0, 1, 1)
self.gb_temperature_display = QtWidgets.QGroupBox(self.sawc_chart)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.gb_temperature_display.sizePolicy().hasHeightForWidth())
self.gb_temperature_display.setSizePolicy(sizePolicy)
self.gb_temperature_display.setCheckable(True)
self.gb_temperature_display.setObjectName("gb_temperature_display")
self.gridLayout_6 = QtWidgets.QGridLayout(self.gb_temperature_display)
self.gridLayout_6.setContentsMargins(0, 0, 0, 0)
self.gridLayout_6.setSpacing(0)
self.gridLayout_6.setObjectName("gridLayout_6")
self.f_temperature_display_anim = QtWidgets.QFrame(self.gb_temperature_display)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.f_temperature_display_anim.sizePolicy().hasHeightForWidth())
self.f_temperature_display_anim.setSizePolicy(sizePolicy)
self.f_temperature_display_anim.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.f_temperature_display_anim.setFrameShadow(QtWidgets.QFrame.Raised)
self.f_temperature_display_anim.setObjectName("f_temperature_display_anim")
self.vl_temperature_display_anim = QtWidgets.QVBoxLayout(self.f_temperature_display_anim)
self.vl_temperature_display_anim.setContentsMargins(0, 0, 0, 0)
self.vl_temperature_display_anim.setSpacing(0)
self.vl_temperature_display_anim.setObjectName("vl_temperature_display_anim")
self.cb_temperature_display_selection = QtWidgets.QComboBox(self.f_temperature_display_anim)
self.cb_temperature_display_selection.setEditable(True)
self.cb_temperature_display_selection.setInsertPolicy(QtWidgets.QComboBox.NoInsert)
self.cb_temperature_display_selection.setObjectName("cb_temperature_display_selection")
self.vl_temperature_display_anim.addWidget(self.cb_temperature_display_selection)
self.f_temperature_display_ph = QtWidgets.QFrame(self.f_temperature_display_anim)
self.f_temperature_display_ph.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.f_temperature_display_ph.setFrameShadow(QtWidgets.QFrame.Raised)
self.f_temperature_display_ph.setObjectName("f_temperature_display_ph")
self.gl_temperature_display_ph = QtWidgets.QGridLayout(self.f_temperature_display_ph)
self.gl_temperature_display_ph.setContentsMargins(0, 0, 0, 0)
self.gl_temperature_display_ph.setSpacing(0)
self.gl_temperature_display_ph.setObjectName("gl_temperature_display_ph")
self.vl_temperature_display_anim.addWidget(self.f_temperature_display_ph)
self.gridLayout_6.addWidget(self.f_temperature_display_anim, 0, 0, 1, 1)
self.gl_sawc_chart.addWidget(self.gb_temperature_display, 0, 0, 1, 1)
self.gb_led_curve_editor = QtWidgets.QGroupBox(self.sawc_chart)
self.gb_led_curve_editor.setCheckable(True)
self.gb_led_curve_editor.setObjectName("gb_led_curve_editor")
self.gridLayout_14 = QtWidgets.QGridLayout(self.gb_led_curve_editor)
self.gridLayout_14.setContentsMargins(0, 0, 0, 0)
self.gridLayout_14.setSpacing(0)
self.gridLayout_14.setObjectName("gridLayout_14")
self.f_led_curve_editor_anim = QtWidgets.QFrame(self.gb_led_curve_editor)
self.f_led_curve_editor_anim.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.f_led_curve_editor_anim.setFrameShadow(QtWidgets.QFrame.Raised)
self.f_led_curve_editor_anim.setObjectName("f_led_curve_editor_anim")
self.gl_led_curve_editor_anim = QtWidgets.QGridLayout(self.f_led_curve_editor_anim)
self.gl_led_curve_editor_anim.setContentsMargins(0, 0, 0, 0)
self.gl_led_curve_editor_anim.setSpacing(0)
self.gl_led_curve_editor_anim.setObjectName("gl_led_curve_editor_anim")
self.tw_led_curve_channel = QtWidgets.QTabWidget(self.f_led_curve_editor_anim)
self.tw_led_curve_channel.setObjectName("tw_led_curve_channel")
self.tw_led_channel_t_r = QtWidgets.QWidget()
self.tw_led_channel_t_r.setObjectName("tw_led_channel_t_r")
self.gridLayout_15 = QtWidgets.QGridLayout(self.tw_led_channel_t_r)
self.gridLayout_15.setContentsMargins(0, 0, 0, 0)
self.gridLayout_15.setSpacing(0)
self.gridLayout_15.setObjectName("gridLayout_15")
self.f_tw_led_channel_t_r_ph = QtWidgets.QFrame(self.tw_led_channel_t_r)
self.f_tw_led_channel_t_r_ph.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.f_tw_led_channel_t_r_ph.setFrameShadow(QtWidgets.QFrame.Raised)
self.f_tw_led_channel_t_r_ph.setLineWidth(0)
self.f_tw_led_channel_t_r_ph.setObjectName("f_tw_led_channel_t_r_ph")
self.gl_tw_led_channel_t_r_ph = QtWidgets.QGridLayout(self.f_tw_led_channel_t_r_ph)
self.gl_tw_led_channel_t_r_ph.setContentsMargins(0, 0, 0, 0)
self.gl_tw_led_channel_t_r_ph.setSpacing(0)
self.gl_tw_led_channel_t_r_ph.setObjectName("gl_tw_led_channel_t_r_ph")
self.gridLayout_15.addWidget(self.f_tw_led_channel_t_r_ph, 0, 0, 1, 1)
self.tw_led_curve_channel.addTab(self.tw_led_channel_t_r, "")
self.tw_led_channel_t_g = QtWidgets.QWidget()
self.tw_led_channel_t_g.setObjectName("tw_led_channel_t_g")
self.gridLayout_17 = QtWidgets.QGridLayout(self.tw_led_channel_t_g)
self.gridLayout_17.setContentsMargins(0, 0, 0, 0)
self.gridLayout_17.setSpacing(0)
self.gridLayout_17.setObjectName("gridLayout_17")
self.f_tw_led_channel_t_g_ph = QtWidgets.QFrame(self.tw_led_channel_t_g)
self.f_tw_led_channel_t_g_ph.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.f_tw_led_channel_t_g_ph.setFrameShadow(QtWidgets.QFrame.Raised)
self.f_tw_led_channel_t_g_ph.setLineWidth(0)
self.f_tw_led_channel_t_g_ph.setObjectName("f_tw_led_channel_t_g_ph")
self.gl_tw_led_channel_t_g_ph = QtWidgets.QGridLayout(self.f_tw_led_channel_t_g_ph)
self.gl_tw_led_channel_t_g_ph.setContentsMargins(0, 0, 0, 0)
self.gl_tw_led_channel_t_g_ph.setSpacing(0)
self.gl_tw_led_channel_t_g_ph.setObjectName("gl_tw_led_channel_t_g_ph")
self.gridLayout_17.addWidget(self.f_tw_led_channel_t_g_ph, 0, 0, 1, 1)
self.tw_led_curve_channel.addTab(self.tw_led_channel_t_g, "")
self.tw_led_channel_t_b = QtWidgets.QWidget()
self.tw_led_channel_t_b.setObjectName("tw_led_channel_t_b")
self.gridLayout_18 = QtWidgets.QGridLayout(self.tw_led_channel_t_b)
self.gridLayout_18.setContentsMargins(0, 0, 0, 0)
self.gridLayout_18.setSpacing(0)
self.gridLayout_18.setObjectName("gridLayout_18")
self.f_tw_led_channel_t_b_ph = QtWidgets.QFrame(self.tw_led_channel_t_b)
self.f_tw_led_channel_t_b_ph.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.f_tw_led_channel_t_b_ph.setFrameShadow(QtWidgets.QFrame.Raised)
self.f_tw_led_channel_t_b_ph.setLineWidth(0)
self.f_tw_led_channel_t_b_ph.setObjectName("f_tw_led_channel_t_b_ph")
self.gl_tw_led_channel_t_b_ph = QtWidgets.QGridLayout(self.f_tw_led_channel_t_b_ph)
self.gl_tw_led_channel_t_b_ph.setContentsMargins(0, 0, 0, 0)
self.gl_tw_led_channel_t_b_ph.setSpacing(0)
self.gl_tw_led_channel_t_b_ph.setObjectName("gl_tw_led_channel_t_b_ph")
self.gridLayout_18.addWidget(self.f_tw_led_channel_t_b_ph, 0, 0, 1, 1)
self.tw_led_curve_channel.addTab(self.tw_led_channel_t_b, "")
self.gl_led_curve_editor_anim.addWidget(self.tw_led_curve_channel, 1, 0, 1, 1)
self.gb_led_curve_options = QtWidgets.QGroupBox(self.f_led_curve_editor_anim)
self.gb_led_curve_options.setCheckable(True)
self.gb_led_curve_options.setObjectName("gb_led_curve_options")
self.gridLayout_16 = QtWidgets.QGridLayout(self.gb_led_curve_options)
self.gridLayout_16.setContentsMargins(0, 0, 0, 0)
self.gridLayout_16.setSpacing(0)
self.gridLayout_16.setObjectName("gridLayout_16")
self.f_led_options_anim = QtWidgets.QFrame(self.gb_led_curve_options)
self.f_led_options_anim.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.f_led_options_anim.setFrameShadow(QtWidgets.QFrame.Raised)
self.f_led_options_anim.setObjectName("f_led_options_anim")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.f_led_options_anim)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setSpacing(0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.f_led_options_preset = QtWidgets.QFrame(self.f_led_options_anim)
self.f_led_options_preset.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.f_led_options_preset.setFrameShadow(QtWidgets.QFrame.Raised)
self.f_led_options_preset.setObjectName("f_led_options_preset")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.f_led_options_preset)
self.horizontalLayout_2.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_2.setSpacing(0)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label_7 = QtWidgets.QLabel(self.f_led_options_preset)
self.label_7.setObjectName("label_7")
self.horizontalLayout_2.addWidget(self.label_7)
self.cb_led_options_preset = QtWidgets.QComboBox(self.f_led_options_preset)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.cb_led_options_preset.sizePolicy().hasHeightForWidth())
self.cb_led_options_preset.setSizePolicy(sizePolicy)
self.cb_led_options_preset.setEditable(True)
self.cb_led_options_preset.setInsertPolicy(QtWidgets.QComboBox.InsertAlphabetically)
self.cb_led_options_preset.setObjectName("cb_led_options_preset")
self.horizontalLayout_2.addWidget(self.cb_led_options_preset)
self.pb_led_options_save = QtWidgets.QPushButton(self.f_led_options_preset)
self.pb_led_options_save.setObjectName("pb_led_options_save")
self.horizontalLayout_2.addWidget(self.pb_led_options_save)
self.pb_led_options_load = QtWidgets.QPushButton(self.f_led_options_preset)
self.pb_led_options_load.setObjectName("pb_led_options_load")
self.horizontalLayout_2.addWidget(self.pb_led_options_load)
self.pb_led_options_delete = QtWidgets.QPushButton(self.f_led_options_preset)
self.pb_led_options_delete.setObjectName("pb_led_options_delete")
self.horizontalLayout_2.addWidget(self.pb_led_options_delete)
self.verticalLayout_2.addWidget(self.f_led_options_preset)
self.f_speed_multiplier = QtWidgets.QFrame(self.f_led_options_anim)
self.f_speed_multiplier.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.f_speed_multiplier.setFrameShadow(QtWidgets.QFrame.Raised)
self.f_speed_multiplier.setObjectName("f_speed_multiplier")
self.horizontalLayout_5 = QtWidgets.QHBoxLayout(self.f_speed_multiplier)
self.horizontalLayout_5.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_5.setSpacing(0)
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.label_8 = QtWidgets.QLabel(self.f_speed_multiplier)
self.label_8.setObjectName("label_8")
self.horizontalLayout_5.addWidget(self.label_8)
self.dsb_speed_multiplier = QtWidgets.QDoubleSpinBox(self.f_speed_multiplier)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.dsb_speed_multiplier.sizePolicy().hasHeightForWidth())
self.dsb_speed_multiplier.setSizePolicy(sizePolicy)
self.dsb_speed_multiplier.setDecimals(3)
self.dsb_speed_multiplier.setMinimum(0.001)
self.dsb_speed_multiplier.setSingleStep(0.01)
self.dsb_speed_multiplier.setProperty("value", 0.25)
self.dsb_speed_multiplier.setObjectName("dsb_speed_multiplier")
self.horizontalLayout_5.addWidget(self.dsb_speed_multiplier)
self.verticalLayout_2.addWidget(self.f_speed_multiplier)
self.f_led_curve_temperature = QtWidgets.QFrame(self.f_led_options_anim)
self.f_led_curve_temperature.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.f_led_curve_temperature.setFrameShadow(QtWidgets.QFrame.Raised)
self.f_led_curve_temperature.setObjectName("f_led_curve_temperature")
self.horizontalLayout_4 = QtWidgets.QHBoxLayout(self.f_led_curve_temperature)
self.horizontalLayout_4.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_4.setSpacing(0)
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.label_5 = QtWidgets.QLabel(self.f_led_curve_temperature)
self.label_5.setObjectName("label_5")
self.horizontalLayout_4.addWidget(self.label_5)
self.cb_led_curve_temperature_source_selection = QtWidgets.QComboBox(self.f_led_curve_temperature)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.cb_led_curve_temperature_source_selection.sizePolicy().hasHeightForWidth())
self.cb_led_curve_temperature_source_selection.setSizePolicy(sizePolicy)
self.cb_led_curve_temperature_source_selection.setEditable(False)
self.cb_led_curve_temperature_source_selection.setInsertPolicy(QtWidgets.QComboBox.NoInsert)
self.cb_led_curve_temperature_source_selection.setObjectName("cb_led_curve_temperature_source_selection")
self.horizontalLayout_4.addWidget(self.cb_led_curve_temperature_source_selection)
self.verticalLayout_2.addWidget(self.f_led_curve_temperature)
self.f_led_curve_control_selection = QtWidgets.QFrame(self.f_led_options_anim)
self.f_led_curve_control_selection.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.f_led_curve_control_selection.setFrameShadow(QtWidgets.QFrame.Raised)
self.f_led_curve_control_selection.setObjectName("f_led_curve_control_selection")
self.formLayout_2 = QtWidgets.QFormLayout(self.f_led_curve_control_selection)
self.formLayout_2.setFormAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.formLayout_2.setContentsMargins(0, 0, 0, 0)
self.formLayout_2.setSpacing(0)
self.formLayout_2.setObjectName("formLayout_2")
self.rb_time_controlled = QtWidgets.QRadioButton(self.f_led_curve_control_selection)
self.rb_time_controlled.setChecked(True)
self.rb_time_controlled.setObjectName("rb_time_controlled")
self.formLayout_2.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.rb_time_controlled)
self.rb_temperature_controlled = QtWidgets.QRadioButton(self.f_led_curve_control_selection)
self.rb_temperature_controlled.setObjectName("rb_temperature_controlled")
self.formLayout_2.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.rb_temperature_controlled)
self.verticalLayout_2.addWidget(self.f_led_curve_control_selection)
self.f_channel_sync_options = QtWidgets.QFrame(self.f_led_options_anim)
self.f_channel_sync_options.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.f_channel_sync_options.setFrameShadow(QtWidgets.QFrame.Raised)
self.f_channel_sync_options.setObjectName("f_channel_sync_options")
self.gridLayout_13 = QtWidgets.QGridLayout(self.f_channel_sync_options)
self.gridLayout_13.setContentsMargins(0, 0, 0, 0)
self.gridLayout_13.setSpacing(0)
self.gridLayout_13.setObjectName("gridLayout_13")
self.label = QtWidgets.QLabel(self.f_channel_sync_options)
self.label.setObjectName("label")
self.gridLayout_13.addWidget(self.label, 0, 0, 1, 1)
self.cb_r_channel_sync = QtWidgets.QComboBox(self.f_channel_sync_options)
self.cb_r_channel_sync.setObjectName("cb_r_channel_sync")
self.gridLayout_13.addWidget(self.cb_r_channel_sync, 1, 0, 1, 1)
self.cb_g_channel_sync = QtWidgets.QComboBox(self.f_channel_sync_options)
self.cb_g_channel_sync.setObjectName("cb_g_channel_sync")
self.gridLayout_13.addWidget(self.cb_g_channel_sync, 1, 1, 1, 1)
self.label_2 = QtWidgets.QLabel(self.f_channel_sync_options)
self.label_2.setObjectName("label_2")
self.gridLayout_13.addWidget(self.label_2, 0, 1, 1, 1)
self.label_3 = QtWidgets.QLabel(self.f_channel_sync_options)
self.label_3.setObjectName("label_3")
self.gridLayout_13.addWidget(self.label_3, 0, 2, 1, 1)
self.cb_b_channel_sync = QtWidgets.QComboBox(self.f_channel_sync_options)
self.cb_b_channel_sync.setObjectName("cb_b_channel_sync")
self.gridLayout_13.addWidget(self.cb_b_channel_sync, 1, 2, 1, 1)
self.verticalLayout_2.addWidget(self.f_channel_sync_options)
self.gridLayout_16.addWidget(self.f_led_options_anim, 2, 0, 1, 1)
self.gl_led_curve_editor_anim.addWidget(self.gb_led_curve_options, 2, 0, 1, 1)
self.cb_led_curve_selection = QtWidgets.QComboBox(self.f_led_curve_editor_anim)
self.cb_led_curve_selection.setEditable(True)
self.cb_led_curve_selection.setInsertPolicy(QtWidgets.QComboBox.NoInsert)
self.cb_led_curve_selection.setObjectName("cb_led_curve_selection")
self.gl_led_curve_editor_anim.addWidget(self.cb_led_curve_selection, 0, 0, 1, 1)
self.gridLayout_14.addWidget(self.f_led_curve_editor_anim, 1, 0, 1, 1)
self.gl_sawc_chart.addWidget(self.gb_led_curve_editor, 2, 0, 1, 1)
self.sa_chart.setWidget(self.sawc_chart)
self.f_fan_and_preview = QtWidgets.QFrame(self.sp_main)
self.f_fan_and_preview.setMinimumSize(QtCore.QSize(285, 0))
self.f_fan_and_preview.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.f_fan_and_preview.setFrameShadow(QtWidgets.QFrame.Raised)
self.f_fan_and_preview.setObjectName("f_fan_and_preview")
self.gridLayout = QtWidgets.QGridLayout(self.f_fan_and_preview)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setSpacing(0)
self.gridLayout.setObjectName("gridLayout")
self.gb_fan_status = QtWidgets.QGroupBox(self.f_fan_and_preview)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.gb_fan_status.sizePolicy().hasHeightForWidth())
self.gb_fan_status.setSizePolicy(sizePolicy)
self.gb_fan_status.setObjectName("gb_fan_status")
self.gridLayout_10 = QtWidgets.QGridLayout(self.gb_fan_status)
self.gridLayout_10.setContentsMargins(0, 0, 0, 0)
self.gridLayout_10.setSpacing(0)
self.gridLayout_10.setObjectName("gridLayout_10")
self.sa_fan_status = QtWidgets.QScrollArea(self.gb_fan_status)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.sa_fan_status.sizePolicy().hasHeightForWidth())
self.sa_fan_status.setSizePolicy(sizePolicy)
self.sa_fan_status.setWidgetResizable(True)
self.sa_fan_status.setObjectName("sa_fan_status")
self.sawc_fan_status = QtWidgets.QWidget()
self.sawc_fan_status.setGeometry(QtCore.QRect(0, 0, 257, 507))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.sawc_fan_status.sizePolicy().hasHeightForWidth())
self.sawc_fan_status.setSizePolicy(sizePolicy)
self.sawc_fan_status.setObjectName("sawc_fan_status")
self.gridLayout_9 = QtWidgets.QGridLayout(self.sawc_fan_status)
self.gridLayout_9.setContentsMargins(0, 0, 0, 0)
self.gridLayout_9.setSpacing(0)
self.gridLayout_9.setObjectName("gridLayout_9")
self.sa_fan_status.setWidget(self.sawc_fan_status)
self.gridLayout_10.addWidget(self.sa_fan_status, 0, 0, 1, 1)
self.gridLayout.addWidget(self.gb_fan_status, 0, 0, 1, 1)
self.gb_led_preview = QtWidgets.QGroupBox(self.f_fan_and_preview)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.gb_led_preview.sizePolicy().hasHeightForWidth())
self.gb_led_preview.setSizePolicy(sizePolicy)
self.gb_led_preview.setObjectName("gb_led_preview")
self.gl_led_preview = QtWidgets.QGridLayout(self.gb_led_preview)
self.gl_led_preview.setContentsMargins(0, 0, 0, 0)
self.gl_led_preview.setSpacing(0)
self.gl_led_preview.setObjectName("gl_led_preview")
self.gridLayout.addWidget(self.gb_led_preview, 1, 0, 1, 1)
self.gridLayout_2.addWidget(self.sp_main, 0, 0, 1, 1)
mw_main.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(mw_main)
self.menubar.setGeometry(QtCore.QRect(0, 0, 997, 24))
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menuPreferences = QtWidgets.QMenu(self.menubar)
self.menuPreferences.setObjectName("menuPreferences")
mw_main.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(mw_main)
self.statusbar.setObjectName("statusbar")
mw_main.setStatusBar(self.statusbar)
self.actionExit = QtWidgets.QAction(mw_main)
self.actionExit.setObjectName("actionExit")
self.actionSet_Refresh_Rate = QtWidgets.QAction(mw_main)
self.actionSet_Refresh_Rate.setObjectName("actionSet_Refresh_Rate")
self.menuFile.addAction(self.actionExit)
self.menuPreferences.addAction(self.actionSet_Refresh_Rate)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuPreferences.menuAction())
self.retranslateUi(mw_main)
self.tw_led_curve_channel.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(mw_main)
mw_main.setTabOrder(self.sa_chart, self.gb_temperature_display)
mw_main.setTabOrder(self.gb_temperature_display, self.cb_temperature_display_selection)
mw_main.setTabOrder(self.cb_temperature_display_selection, self.gb_fan_curve_editor)
mw_main.setTabOrder(self.gb_fan_curve_editor, self.cb_fan_curve_selection)
mw_main.setTabOrder(self.cb_fan_curve_selection, self.gb_fan_curve_options)
mw_main.setTabOrder(self.gb_fan_curve_options, self.cb_fan_options_preset)
mw_main.setTabOrder(self.cb_fan_options_preset, self.pb_fan_options_save)
mw_main.setTabOrder(self.pb_fan_options_save, self.pb_fan_options_load)
mw_main.setTabOrder(self.pb_fan_options_load, self.pb_fan_options_delete)
mw_main.setTabOrder(self.pb_fan_options_delete, self.cb_fan_curve_temperature_source_selection)
mw_main.setTabOrder(self.cb_fan_curve_temperature_source_selection, self.cb_pwm_controlled)
mw_main.setTabOrder(self.cb_pwm_controlled, self.gb_led_curve_editor)
mw_main.setTabOrder(self.gb_led_curve_editor, self.cb_led_curve_selection)
mw_main.setTabOrder(self.cb_led_curve_selection, self.tw_led_curve_channel)
mw_main.setTabOrder(self.tw_led_curve_channel, self.gb_led_curve_options)
mw_main.setTabOrder(self.gb_led_curve_options, self.cb_led_options_preset)
mw_main.setTabOrder(self.cb_led_options_preset, self.pb_led_options_save)
mw_main.setTabOrder(self.pb_led_options_save, self.pb_led_options_load)
mw_main.setTabOrder(self.pb_led_options_load, self.pb_led_options_delete)
mw_main.setTabOrder(self.pb_led_options_delete, self.dsb_speed_multiplier)
mw_main.setTabOrder(self.dsb_speed_multiplier, self.cb_led_curve_temperature_source_selection)
mw_main.setTabOrder(self.cb_led_curve_temperature_source_selection, self.rb_time_controlled)
mw_main.setTabOrder(self.rb_time_controlled, self.rb_temperature_controlled)
mw_main.setTabOrder(self.rb_temperature_controlled, self.cb_r_channel_sync)
mw_main.setTabOrder(self.cb_r_channel_sync, self.cb_g_channel_sync)
mw_main.setTabOrder(self.cb_g_channel_sync, self.cb_b_channel_sync)
mw_main.setTabOrder(self.cb_b_channel_sync, self.sa_fan_status)
def retranslateUi(self, mw_main):
_translate = QtCore.QCoreApplication.translate
mw_main.setWindowTitle(_translate("mw_main", "Leviathan\'s Breath"))
self.gb_fan_curve_editor.setTitle(_translate("mw_main", "Fan Curve Editor"))
self.gb_fan_curve_options.setTitle(_translate("mw_main", "Options"))
self.label_6.setText(_translate("mw_main", "Preset"))
self.pb_fan_options_save.setText(_translate("mw_main", "Save"))
self.pb_fan_options_load.setText(_translate("mw_main", "Load"))
self.pb_fan_options_delete.setText(_translate("mw_main", "Delete"))
self.label_4.setText(_translate("mw_main", "Temperature Source"))
self.cb_pwm_controlled.setText(_translate("mw_main", "PWM Controlled"))
self.gb_temperature_display.setTitle(_translate("mw_main", "Temperature"))
self.gb_led_curve_editor.setTitle(_translate("mw_main", "LED Curve Editor"))
self.tw_led_curve_channel.setTabText(self.tw_led_curve_channel.indexOf(self.tw_led_channel_t_r), _translate("mw_main", "R"))
self.tw_led_curve_channel.setTabText(self.tw_led_curve_channel.indexOf(self.tw_led_channel_t_g), _translate("mw_main", "G"))
self.tw_led_curve_channel.setTabText(self.tw_led_curve_channel.indexOf(self.tw_led_channel_t_b), _translate("mw_main", "B"))
self.gb_led_curve_options.setTitle(_translate("mw_main", "Options"))
self.label_7.setText(_translate("mw_main", "Preset"))
self.pb_led_options_save.setText(_translate("mw_main", "Save"))
self.pb_led_options_load.setText(_translate("mw_main", "Load"))
self.pb_led_options_delete.setText(_translate("mw_main", "Delete"))
self.label_8.setText(_translate("mw_main", "Speed Multiplier"))
self.label_5.setText(_translate("mw_main", "Temperature Source"))
self.rb_time_controlled.setText(_translate("mw_main", "Time Controlled"))
self.rb_temperature_controlled.setText(_translate("mw_main", "Temperature Controlled"))
self.label.setText(_translate("mw_main", "Sync R Channel To..."))
self.label_2.setText(_translate("mw_main", "Sync G Channel To..."))
self.label_3.setText(_translate("mw_main", "Sync B Channel To..."))
self.gb_fan_status.setTitle(_translate("mw_main", "Fan Status"))
self.gb_led_preview.setTitle(_translate("mw_main", "LED Preview"))
self.menuFile.setTitle(_translate("mw_main", "File"))
self.menuPreferences.setTitle(_translate("mw_main", "Preferences"))
self.actionExit.setText(_translate("mw_main", "Exit"))
self.actionSet_Refresh_Rate.setText(_translate("mw_main", "Set Refresh Rate..."))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
mw_main = QtWidgets.QMainWindow()
ui = Ui_mw_main()
ui.setupUi(mw_main)
mw_main.show()
sys.exit(app.exec_())
|
import json
import torch
import pickle
import itertools
import numpy as np
import pandas as pd
import torch.nn as nn
from pathlib import Path
from tabulate import tabulate
from nested_lookup import nested_lookup
from transformers import BertTokenizer
from haversine import haversine, haversine_vector, Unit
# File Utils
def getProjectRootPath() -> Path:
return Path(__file__).parent.parent
def create(path) -> None:
path = Path(path)
path.mkdir(parents = True, exist_ok = True)
def dumpJSON(data, path, sort_keys = False) -> None:
create(Path(path).parent)
json.dump(data, open(path, "w"), indent = 4, ensure_ascii = False, sort_keys = sort_keys)
# Distance Utils
def getDistance(a, b):
return haversine(a, b, Unit.KILOMETERS)
def getCandidateDistances(candidate_locations, locations):
num_candidates = len(candidate_locations)
num_locations = len(locations)
candidate_distances = np.empty([num_candidates, 0])
for location in locations:
distances = haversine_vector(np.repeat([location], num_candidates, axis = 0), candidate_locations, Unit.KILOMETERS)
candidate_distances = np.hstack((candidate_distances, np.expand_dims(distances, axis = 1)))
return candidate_distances.tolist()
def getSortedCandidatesByDistance(candidates, locations, k):
candidate_ids = list(candidates.keys())
candidate_locations = np.array(list(candidates.values()))
candidate_distances = np.array(getCandidateDistances(candidates_locations, locations))
candidate_distances = np.sum(np.sort(candidate_distances, axis = 1)[:, :k], axis = 1).tolist()
sorted_candidates = sorted(list(zip(candidate_ids, candidate_distances)), key = lambda item: item[1])
return sorted_candidates
# Dataset Utils
class Word2Vec():
def __init__(self, vocab_file_path, word2vec_file_path):
self.vocab = pickle.load(open(vocab_file_path, "rb"))
self.pretrained_embeddings = pickle.load(open(word2vec_file_path, "rb"))
self.vocab_size = len(self.vocab)
self.word_embedding_dim = 128
self.embedding_model = nn.Embedding(self.vocab_size, self.word_embedding_dim)
self.embedding_model.weight = nn.Parameter(torch.tensor(self.pretrained_embeddings).float(), requires_grad = False)
def __call__(self, tokens):
indexes = torch.tensor([[self.vocab[token.lower()] if token.lower() in self.vocab else self.vocab_size for token in tokens]])
word_embeddings = self.embedding_model(indexes)[0]
return word_embeddings.tolist()
class TokensList2BertTokensIdsList():
def __init__(self):
self.tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
def __call__(self, tokens):
bert_token_ids = self.tokenizer.encode(tokens, add_special_tokens = False, is_pretokenized = True)
return list(bert_token_ids)
def getBIencoding(num_tokens, chosen_positions):
bi_encoding = torch.zeros((2, num_tokens))
b_positions = [positions[0] for positions in chosen_positions]
bi_encoding[0][b_positions] = 1
i_positions = list(itertools.chain.from_iterable([positions[1:] for positions in chosen_positions]))
bi_encoding[1][i_positions] = 1
return bi_encoding.transpose(1, 0).tolist()
def getDistanceEncodings(num_tokens, chosen_positions, candidate_distances):
num_candidates = len(candidate_distances)
candidate_distances = torch.tensor(candidate_distances)
candidate_distance_encodings = torch.zeros(num_candidates, num_tokens)
for index, positions in enumerate(chosen_positions):
candidate_distance_encodings[:, positions] = candidate_distances[:, index].unsqueeze(1)
return candidate_distance_encodings.tolist()
# Analysis Utils
def getMetrics(ranks, distances):
results = {"N": len(ranks)}
ranks = np.array(ranks)
results["Acc@1"] = np.mean(ranks == 1) * 100
results["Acc@3"] = np.mean(ranks <= 3) * 100
results["Acc@5"] = np.mean(ranks <= 5) * 100
results["Acc@10"] = np.mean(ranks <= 10) * 100
results["Acc@30"] = np.mean(ranks <= 30) * 100
results["Acc@50"] = np.mean(ranks <= 50) * 100
results["MR"] = np.mean(ranks)
results["MRR"] = np.mean(1 / ranks)
results["DistG"] = np.mean(list(map(lambda _distances: np.mean(np.min(_distances, axis = 1)), distances)))
df = pd.DataFrame.from_dict(results, orient = "index")
return df
def getTable(df):
return tabulate(df, headers = "keys", tablefmt = "psql")
|
from Instrucciones.TablaSimbolos.Instruccion import Instruccion
from Instrucciones.Sql_create.Tipo_Constraint import Tipo_Constraint, Tipo_Dato_Constraint
from Instrucciones.Excepcion import Excepcion
#from storageManager.jsonMode import *
class AlterTableAddConstraintFK(Instruccion):
def __init__(self, tabla, id_constraint, lista_id1,tabla2, lista_id2, strGram, linea, columna):
Instruccion.__init__(self,None,linea,columna,strGram)
self.tabla = tabla
self.id_constraint = id_constraint
self.lista_id1 = lista_id1
self.tabla2 = tabla2
self.lista_id2 = lista_id2
def ejecutar(self, tabla, arbol):
super().ejecutar(tabla,arbol)
if arbol.bdUsar != None:
objetoTabla = arbol.devolviendoTablaDeBase(self.tabla)
if objetoTabla != 0:
tablaForanea = arbol.devolviendoTablaDeBase(self.tabla2)
if tablaForanea != 0:
listaTabla1 = []
tabla1Nombres = []
for c in self.lista_id1:
for columnas in objetoTabla.lista_de_campos:
if columnas.nombre == c:
listaTabla1.append(columnas)
tabla1Nombres.append(columnas.nombre)
if(len(listaTabla1)==len(self.lista_id1)):
listaForaneas = []
tabla2Nombres = []
for c in self.lista_id2:
for columnas in tablaForanea.lista_de_campos:
if columnas.nombre == c:
listaForaneas.append(columnas)
tabla2Nombres.append(columnas.nombre)
if(len(listaForaneas)==len(self.lista_id2)):
listaPrimarias = 0
for columna in listaForaneas:
if columna.constraint != None:
for i in columna.constraint:
if i.tipo == Tipo_Dato_Constraint.PRIMARY_KEY:
listaPrimarias += 1
else:
error = Excepcion('42P01',"Semántico","No hay restricción unique que coincida con las columnas dadas en la tabla referida «"+self.tabla2+"»",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return
if listaPrimarias == len(self.lista_id2):
for c in range(0,len(listaTabla1)):
if listaTabla1[c].constraint != None:
restriccion = Tipo_Constraint(self.id_constraint, Tipo_Dato_Constraint.FOREIGN_KEY, listaForaneas[c])
restriccion.referencia = self.tabla2
listaTabla1[c].constraint.append(restriccion)
else:
listaTabla1[c].constraint = []
restriccion = Tipo_Constraint(self.id_constraint, Tipo_Dato_Constraint.FOREIGN_KEY, listaForaneas[c])
restriccion.referencia = self.tabla2
listaTabla1[c].constraint.append(restriccion)
arbol.consola.append("Consulta devuelta correctamente.")
else:
error = Excepcion('42P01',"Semántico","No hay restricción unique que coincida con las columnas dadas en la tabla referida «"+self.tabla2+"»",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return
else:
lista = set(self.lista_id2) - set(tabla2Nombres)
#print(tabla2Nombres,self.lista_id2)
#print(lista)
for i in lista:
error = Excepcion('42P01',"Semántico","No existe la columna «"+i+"» en la llave",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return
else:
lista = set(self.lista_id1) - set(tabla1Nombres)
#print(tabla1Nombres,self.lista_id1)
#print(lista)
for i in lista:
error = Excepcion('42P01',"Semántico","No existe la columna «"+i+"» en la llave",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return
else:
error = Excepcion('42P01',"Semántico","No existe la relación "+self.tabla2,self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
else:
error = Excepcion('42P01',"Semántico","No existe la relación "+self.tabla,self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
else:
error = Excepcion("100","Semantico","No ha seleccionado ninguna Base de Datos.",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
def analizar(self, tabla, arbol):
pass
def traducir(self, tabla, arbol):
#ALTER TABLE ID ADD CONSTRAINT ID FOREIGN KEY PARIZQ lista_id PARDER REFERENCES ID PARIZQ lista_id PARDER PUNTO_COMA
cadena = "\"alter table "
if(self.tabla):
cadena += self.tabla
cadena += " add constraint "
if(self.id_constraint):
cadena += self.id_constraint
cadena += " foreign key ( "
if(self.lista_id1):
for x in range(0,len(self.lista_id1)):
if(x > 0):
cadena += ", "
cadena += self.lista_id1[x]
cadena += " ) "
cadena += "references "
if(self.tabla2):
cadena += self.tabla2
cadena += " ( "
if(self.lista_id2):
for y in range(0,len(self.lista_id2)):
if(y > 0):
cadena += ", "
cadena += self.lista_id2[y]
cadena += " )"
cadena += ";\""
arbol.addComen("Asignar cadena")
temporal1 = tabla.getTemporal()
arbol.addc3d(f"{temporal1} = { cadena }")
arbol.addComen("Entrar al ambito")
temporal2 = tabla.getTemporal()
arbol.addc3d(f"{temporal2} = P+2")
temporal3 = tabla.getTemporal()
arbol.addComen("parametro 1")
arbol.addc3d(f"{temporal3} = { temporal2}+1")
arbol.addComen("Asignacion de parametros")
arbol.addc3d(f"Pila[{temporal3}] = {temporal1}")
arbol.addComen("Llamada de funcion")
arbol.addc3d(f"P = P+2")
arbol.addc3d(f"funcionintermedia()")
arbol.addComen("obtener resultado")
temporalX = tabla.getTemporal()
arbol.addc3d(f"{temporalX} = P+2")
temporalR = tabla.getTemporal()
arbol.addc3d(f"{temporalR} = Pila[{ temporalX }]")
arbol.addComen("Salida de funcion")
arbol.addc3d(f"P = P-2")
|
"""
@no 21
@name Merge Two Sorted Lists
"""
class Solution:
def mergeTwoLists(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
answer = ListNode(0)
p = l1
q = l2
r = answer
while p or q:
if not p:
temp = q.val
q = q.next
elif not q:
temp = p.val
p = p.next
else:
if p.val > q.val:
temp = q.val
q = q.next
else:
temp = p.val
p = p.next
r.next = ListNode(temp)
r = r.next
return answer.next
|
from .futurasciences_crawling_job import FuturaSciencesCrawlingJob
from .liberation_crawling_job import LiberationCrawlingJob
from .nouvelobs_crawling_job import NouvelObsCrawlingJob
from .telerama_crawling_job import TeleramaCrawlingJob
from .lefigaro_crawling_job import LeFigaroCrawlingJob
from .lemonde_crawling_job import LeMondeCrawlingJob
|
from mongoengine import *
from mongoConfig import *
import math
import time
import sys
import re
from threading import Thread
sys.path.append('..')
from helpers import log
letters = []
def tfidfWorker():
while(len(letters) > 0):
currentLetter = letters.pop()
startTime = time.time()
log('tfidf', 'Calculating TFIDF for all terms starting with ' + currentLetter.upper())
regex = re.compile('^'+currentLetter, re.IGNORECASE)
terms = InvertedIndex.objects(term=regex)
for termEntry in terms:
term = termEntry['term']
idf = termEntry['idf']
for docKey in termEntry["doc_info"]:
tf = termEntry['doc_info'][docKey]['termCount']
log_tf = 0
if (tf != 0):
log_tf = math.log(tf, 2) + 1
tf_idf = log_tf * idf
url = termEntry['doc_info'][docKey]['url']
if url[0:8] == 'https://':
try:
document = Crawler.objects.get(url=url)
except DoesNotExist:
continue
if 'tfidf' not in document:
document['tfidf'] = {}
document['tfidf'][term.replace('.', ',')] = tf_idf
document.save()
log('time', 'Finished calculating tfidf for letter ' + currentLetter.upper() + ' in ' + str(time.time() - startTime) + ' seconds')
def calculateTFIDF(threads):
startTime = time.time()
connect(databaseName, host=databaseAddr, port=27017)
log('tfidf', 'Calculating TFIDF scores for all terms and documents')
for i in range(ord('a'), ord('z')+1):
letters.append(chr(i))
threadPool = []
for i in range(0, threads):
newThread = Thread(name='tfidf_'+str(i), target=tfidfWorker)
threadPool.append(newThread)
for i in range(0, threads):
threadPool[i].start()
for i in range(0, threads):
threadPool[i].join()
log("time", 'Execution finished in '+str(time.time()-startTime)+' seconds.')
if __name__ == "__main__":
calculateTFIDF(4)
|
#given an alphanumeric number guaranteed to have:
#1 integer paired to a sequence of characters
#the integer is guaranteed to be 0-9
#output a string that represents character sequences multiplied by integer number of times
def multi_num_by_string(s):
curr_num=0
curr_chars=''
output=''
nums = set(['1','2','3','4','5','6','7','8','9','0'])
for char in s:
if char in nums:
output+=curr_num*curr_chars
curr_num=int(char)
curr_chars=''
else:
curr_chars+=char
output+=curr_num*curr_chars
return output
print(multi_num_by_string('2a3bc4def'))
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2017 Juniper Networks, Inc. All rights reserved.
#
"""
Kube network manager DB
"""
from cfgm_common.vnc_object_db import VncObjectDBClient
class KubeNetworkManagerDB(VncObjectDBClient):
def __init__(self, args, logger):
self._db_logger = logger
cred = None
if (args.cassandra_user is not None and
args.cassandra_password is not None):
cred={'username':args.cassandra_user,
'password':args.cassandra_password}
super(KubeNetworkManagerDB, self).__init__(args.cassandra_server_list,
args.cluster_id, None, None, self._db_logger.log,
reset_config=False, credential=cred)
|
from timeit import default_timer as timer
class ProgressBar():
def __init__(self, task='task'):
self.task = task
self.started = False
self.finished = False
def reset(self):
self.started = True
self.finished = False
self.prev_progress = 1e-8
self.start = timer()
def __call__(self, progress, message=''):
if progress < 0.0 or progress > 1.0:
raise ValueError('progress must be between 0 and 1')
if not self.finished:
if not self.started or progress < self.prev_progress:
self.reset()
self.prev_progress = progress
msg = '|'
msg += '█'*round(progress*30)
msg += ' '*round((1-progress)*30)
msg += f'| {progress:.1%} '
msg += self.task
if progress > 1e-4:
t = timer()
elapsed = t - self.start
time_left = elapsed/progress-elapsed
msg += f' Remaining: {time_left:8.1f} s'
msg += message
if round(progress*1000) == 1000:
spaces = ' '*50
elapsed = t - self.start
print(f'\rFinished {self.task} in {elapsed:.1f} seconds {spaces}')
self.finished = True
else:
print(f'\r{msg}', end='')
elif progress < self.prev_progress:
self.reset()
if __name__ == "__main__":
a = []
pb = ProgressBar('kacsa')
for i in range(1000):
pb(i/999)
for x in range(50000):
a.append(x)
a[x] = 0
b = a[x]
a[x] = b
a = []
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 29 15:19:36 2020
@author: Francesco Di Lauro
@mail: F.Di-Lauro@sussex.ac.uk
Copyright 2020 Francesco Di Lauro. All Rights Reserved.
See LICENSE file for details
"""
import networkx as nx
import numpy as np
from heapq import *
from datetime import datetime
#0:00:23.466188
class Node():
def __init__(self,index,status, time):
self.index = index
self.status = status
self.rec_time = time
class Event():
def __init__(self,node,time,action, source=None):
self.time = time
self.node = node
self.action = action
self.source=source
def __lt__(self, other):
'''
This is read by heappush to understand what the heap should be about
'''
return self.time < other.time
class fast_Gillespie():
'''
This algorithm is inspired by Joel Miller's algorithm for Fast Gillespie described in the book
Mathematics of Epidemics on Networks by Kiss, Miller, Simon, 2017, Springer. Section A.1.2 page 384
'''
def __init__(self, A, tau=1.0, gamma=2.0, i0=10, tauf=4, discretestep=500):
if type(A)==nx.classes.graph.Graph:
self.N = nx.number_of_nodes(A)
self.A = A
else:
raise BaseException("Input networkx object only.")
# Model Parameters (See Istvan paper).
self.tau = tau
self.gamma = gamma
self.tauf = tauf
# Time-keeping.
self.cur_time = 0
#output time vector
self.time_grid =np.linspace(0,tauf,discretestep)
self.current_index=0
#Node numbers.
self.I = np.zeros(discretestep)
#number of SI links
self.SI=np.zeros(self.N+1)
#time in each state
self.tk = np.zeros(self.N+1)
#node state is [0] if not infected and [1] if infected
X = np.array([0]*(self.N-i0) +[1]*i0)
#nodes initialisation
self.nodes = [Node(i,'susceptible', 0) for i in range(self.N)]
#keeps count of how many infected, useful for self.I and self.SI updates
self.num_I = 0
#display randomly the initial infected nodes
np.random.shuffle(X)
#Queue of Events, here each node has its own event
self.queue=[]
self.times=[]
self.infected=[]
self.cur_time=0
for index in np.where(X==1)[0]:
event = Event(self.nodes[index],0,'transmit', source=Node(-1,'infected',0))
heappush(self.queue,event)
def run_sim(self):
'''first round outside to determine SI'''
num_SI=0
while self.queue:
'''
condition to stop
'''
event = heappop(self.queue)
#dt is used only to update SI
'''
If node is susceptible and it has an event it must be an infection
'''
if event.action=='transmit':
if event.node.status =='susceptible':
dt = event.time -self.cur_time
#set new time accordingly
'''
check if time grid needs to be updated
'''
if self.cur_time <self.tauf:
while self.time_grid[self.current_index] <= self.cur_time:
self.I[self.current_index] = self.num_I
self.current_index +=1
'''
AFTER finding dt you can update SI
'''
self.SI[self.num_I] += num_SI*dt
self.tk[self.num_I] += dt
num_SI +=self.process_trans(event.node, event.time)
self.find_next_trans(event.source, event.node, event.time)
else:
if self.cur_time <self.tauf:
while self.time_grid[self.current_index] <= self.cur_time:
self.I[self.current_index] = self.num_I
self.current_index +=1
dt = event.time -self.cur_time
self.SI[self.num_I] += num_SI*dt
self.tk[self.num_I] += dt
num_SI +=self.process_rec(event.node,event.time)
self.I[self.current_index:] = self.I[self.current_index-1]
def process_trans(self,node,time):
'''
utility for transmission events:
it checks also the neighbours.
Returns number of SI as well
'''
#self.times.append(time)
self.cur_time=time
self.num_I +=1
'''
if len(self.infected) >0:
self.infected.append(self.infected[-1]+1)
else:
self.infected.append(1)
'''
node.status='infected'
r1 = np.random.rand()
rec_time = time -1.0/self.gamma *np.log(r1)
node.rec_time = rec_time
if rec_time < self.tauf:
event = Event(node,rec_time,'recover', None)
heappush(self.queue,event)
num_SI=0
for index in self.A.neighbors(node.index):
neighbor = self.nodes[index]
if neighbor.status=='susceptible':
num_SI+=1
else:
num_SI-=1
self.find_next_trans(source = node, target = neighbor, time = time)
return num_SI
def find_next_trans(self,source,target,time):
if target.rec_time < source.rec_time:
r1 = np.random.rand()
trans_time = max(time,target.rec_time) -1.0/self.tau *np.log(r1)
if trans_time < source.rec_time and trans_time<self.tauf:
event = Event(node=target, time=trans_time, action='transmit', source=source)
heappush(self.queue,event)
def process_rec(self, node, time):
node.status='susceptible'
node.rec_time = 0
num_SI=0
self.num_I -=1
for index in self.A.neighbors(node.index):
neighbor = self.nodes[index]
if neighbor.status=='susceptible':
num_SI-=1
else:
num_SI+=1
#self.times.append(time)
self.cur_time=time
#self.infected.append(self.infected[-1]-1)
return num_SI
if __name__=="__main__":
N=100000
k=8
from matplotlib import pyplot as plt
#A = nx.erdos_renyi_graph(int(N),k/float(N-1.0),seed = 100)
SI_threads=np.zeros(N+1)
tk_threads=np.zeros(N+1)
startTime = datetime.now()
#model = fast_Gillespie(A, tau =1, gamma =5, i0 =10)
#model.run_sim()
'''
#This generates the data to fit the C,a,p curves
ERgamma = [5, 4.5, 7]
ERtau =[1, 1, 4]
ERk = [8.0,10.0,7.0]
taufv = [4,3,0.8]
number_of_networkgen = 100
number_of_epid = 200
from datetime import datetime
from matplotlib import pyplot as plt
startTime = datetime.now()
for i in range(3):
for N in [1000,100000]:
k = ERk[i]
gamma = ERgamma[i]
tau = ERtau[i]
tauf = taufv[i]
R0_er = tau*(k*(N-2)/(N-1.0))/(tau+gamma)
print(R0_er)
#print(tauf)
networkchoice='E-R'
seed_vector=np.array([j*(124)+23 for j in range(number_of_networkgen*number_of_epid)])
A = nx.fast_gnp_random_graph(N,k/float(N-1.0))
fig = plt.figure()
for j in range(5):
model = fast_Gillespie(A, tau =tau, gamma =gamma, i0 =10)
model.run_sim() # Run the simulation.
plt.plot(model.time_grid,model.I)
model = fast_Gillespie(A,tau=tau, gamma=gamma, i0=N)
model.run_sim()
plt.plot(model.time_grid,model.I)
plt.title("k=%d"%k)
plt.show()
'''
#This bit is to produce a single realisation of the Gillespie algo used
#for maximum likelihood and inference.
import numpy as np
np.random.seed(114286)
tau = 1
k = 10
gamma = 4.5
T=4
N=1000
networkchoice='E-R'
seed = 2012
A = nx.fast_gnp_random_graph(N,k/float(N-1.0))
model = fast_Gillespie(A, tau =tau, gamma =gamma, i0 =5)
model.run_sim() # Run the simulation.
fig= plt.figure(figsize=(3,2.6))
plt.subplots_adjust(left=0.2, bottom=0.2, right=0.94, top=0.95, wspace=0, hspace=0)
ax0 = fig.add_subplot(111)
ax0.step(model.time_grid,model.I/N, label=r"BD process")
ax0.set_xlim(0, T)
ax0.set_xlabel(r"Time", size=9)
ax0.set_ylim(0, 0.6)
ax0.set_xticklabels([r"$0$",r"$1$",r"$2$",r"$3$",r"$4$",r"$5$"], size=7)
ax0.set_yticklabels([r"$0.0$",r"$0.1$",r"$0.2$",r"$0.3$",r"$0.4$",r"$0.5$",r"$0.6$"], size=7)
sample_size = 30
sample_every = int(len(model.I)/sample_size)
data = model.I[::sample_every]
data = data[:30]
time = model.time_grid[::sample_every]
time=time[:30]
ax0.set_ylabel(r"Infected", size=9)
ax0.scatter(time,data/N, color="red", label=r'Data')
ax0.legend(loc="best")
Data = np.zeros((2,sample_size))
Data[0] = time
Data[1] = data
Data[1][0] = 5
Data = Data.T
plt.savefig("Data_ER.png",format='png', dpi=400)
np.savetxt("Dataset_ER.out", Data)
'''
for i in range(10):
model = fast_Gillespie(A, tau =1, gamma =5, i0 =10)
model.run_sim()
SI_threads += model.SI
tk_threads += model.tk
for i in range(10):
model = fast_Gillespie(A, tau =1, gamma =5, i0 =100)
model.run_sim()
SI_threads += model.SI
tk_threads += model.tk
#plt.plot(model.time_grid, model.I)
print(datetime.now()-startTime)
import matplotlib.pyplot as plt
tk_threads[np.where(tk_threads==0)]=1
plt.plot(np.arange(0,101), SI_threads/tk_threads)
#plt.plot(model.time_grid, model.I)
#plt.show()
#I = np.arange(1001)
#avg_SI= model.SI/model.tk
#plt.plot(I,avg_SI)
'''
|
#================================================================
#
# File name : utils.py
# Author : PyLessons
# Created date: 2020-09-27
# Website : https://pylessons.com/
# GitHub : https://github.com/pythonlessons/TensorFlow-2.x-YOLOv3
# Description : additional yolov3 and yolov4 functions
#
#================================================================
from multiprocessing import Process, Queue, Pipe
import cv2
import time
import random
import colorsys
import numpy as np
import tensorflow as tf
from yolov3.configs import *
from yolov3.yolov4 import *
from tensorflow.python.saved_model import tag_constants
def load_yolo_weights(model, weights_file):
tf.keras.backend.clear_session() # used to reset layer names
# load Darknet original weights to TensorFlow model
if YOLO_TYPE == "yolov3":
range1 = 75 if not TRAIN_YOLO_TINY else 13
range2 = [58, 66, 74] if not TRAIN_YOLO_TINY else [9, 12]
if YOLO_TYPE == "yolov4":
range1 = 110 if not TRAIN_YOLO_TINY else 21
range2 = [93, 101, 109] if not TRAIN_YOLO_TINY else [17, 20]
with open(weights_file, 'rb') as wf:
major, minor, revision, seen, _ = np.fromfile(wf, dtype=np.int32, count=5)
j = 0
for i in range(range1):
if i > 0:
conv_layer_name = 'conv2d_%d' %i
else:
conv_layer_name = 'conv2d'
if j > 0:
bn_layer_name = 'batch_normalization_%d' %j
else:
bn_layer_name = 'batch_normalization'
conv_layer = model.get_layer(conv_layer_name)
filters = conv_layer.filters
k_size = conv_layer.kernel_size[0]
in_dim = conv_layer.input_shape[-1]
if i not in range2:
# darknet weights: [beta, gamma, mean, variance]
bn_weights = np.fromfile(wf, dtype=np.float32, count=4 * filters)
# tf weights: [gamma, beta, mean, variance]
bn_weights = bn_weights.reshape((4, filters))[[1, 0, 2, 3]]
bn_layer = model.get_layer(bn_layer_name)
j += 1
else:
conv_bias = np.fromfile(wf, dtype=np.float32, count=filters)
# darknet shape (out_dim, in_dim, height, width)
conv_shape = (filters, in_dim, k_size, k_size)
conv_weights = np.fromfile(wf, dtype=np.float32, count=np.product(conv_shape))
# tf shape (height, width, in_dim, out_dim)
conv_weights = conv_weights.reshape(conv_shape).transpose([2, 3, 1, 0])
if i not in range2:
conv_layer.set_weights([conv_weights])
bn_layer.set_weights(bn_weights)
else:
conv_layer.set_weights([conv_weights, conv_bias])
assert len(wf.read()) == 0, 'failed to read all data'
def Load_Yolo_model():
gpus = tf.config.experimental.list_physical_devices('GPU')
if len(gpus) > 0:
print(f'GPUs {gpus}')
try: tf.config.experimental.set_memory_growth(gpus[0], True)
except RuntimeError: pass
if YOLO_FRAMEWORK == "tf": # TensorFlow detection
if YOLO_TYPE == "yolov4":
Darknet_weights = YOLO_V4_TINY_WEIGHTS if TRAIN_YOLO_TINY else YOLO_V4_WEIGHTS
if YOLO_TYPE == "yolov3":
Darknet_weights = YOLO_V3_TINY_WEIGHTS if TRAIN_YOLO_TINY else YOLO_V3_WEIGHTS
if YOLO_CUSTOM_WEIGHTS == False:
print("Loading Darknet_weights from:", Darknet_weights)
yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE, CLASSES=YOLO_COCO_CLASSES)
load_yolo_weights(yolo, Darknet_weights) # use Darknet weights
else:
print("Loading custom weights from:", YOLO_CUSTOM_WEIGHTS)
yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE, CLASSES=TRAIN_CLASSES)
yolo.load_weights(f"./checkpoints/{TRAIN_MODEL_NAME}") # use custom weights
elif YOLO_FRAMEWORK == "trt": # TensorRT detection
saved_model_loaded = tf.saved_model.load(YOLO_CUSTOM_WEIGHTS, tags=[tag_constants.SERVING])
signature_keys = list(saved_model_loaded.signatures.keys())
yolo = saved_model_loaded.signatures['serving_default']
return yolo
def image_preprocess(image, target_size, gt_boxes=None):
ih, iw = target_size
h, w, _ = image.shape
scale = min(iw/w, ih/h)
nw, nh = int(scale * w), int(scale * h)
image_resized = cv2.resize(image, (nw, nh))
image_paded = np.full(shape=[ih, iw, 3], fill_value=128.0)
dw, dh = (iw - nw) // 2, (ih-nh) // 2
image_paded[dh:nh+dh, dw:nw+dw, :] = image_resized
image_paded = image_paded / 255.
if gt_boxes is None:
return image_paded
else:
gt_boxes[:, [0, 2]] = gt_boxes[:, [0, 2]] * scale + dw
gt_boxes[:, [1, 3]] = gt_boxes[:, [1, 3]] * scale + dh
return image_paded, gt_boxes
def draw_detections(image, bboxes, birds_eye=None, combinations=None, distances=None, centroids=None, centroids_transformed=None, dist_threshold = None):
image_h, image_w, _ = image.shape
# Set thickness of pen
bbox_thick = int(0.6 * (image_h + image_w)/1000)
if bbox_thick < 1: bbox_thick = 1
fontScale = 0.75 * bbox_thick
# Combinations wasn't supplied, just draw bboxes
if not combinations:
for i, bbox in enumerate(bboxes):
coor = np.array(bbox[:4], dtype=np.int32)
score = bbox[4]
(x1, y1), (x2, y2) = (coor[0], coor[1]), (coor[2], coor[3])
# put object rectangle
cv2.rectangle(image, (x1, y1), (x2, y2), (255,255,0), bbox_thick*2)
# get text label
label = " {:.2f}".format(score)
# get text size
(text_width, text_height), baseline = cv2.getTextSize(label, cv2.FONT_HERSHEY_COMPLEX_SMALL,
fontScale, thickness=bbox_thick)
# put filled text rectangle
cv2.rectangle(image, (x1, y1), (x1 + text_width, y1 - text_height - baseline), (255,255,0), thickness=cv2.FILLED)
# put text above rectangle
cv2.putText(image, label, (x1, y1-4), cv2.FONT_HERSHEY_COMPLEX_SMALL,
fontScale, (255,255,255), bbox_thick, lineType=cv2.LINE_AA)
return image
# Need to draw violation lines too
red_combinations = []
green_combinations = []
# Fetch red and green combinations (violating, not violating the distance threshold)
for dist_count, pair in enumerate(combinations):
if distances[dist_count] <= dist_threshold:
red_combinations.append(pair)
else:
green_combinations.append(pair)
# Fetch the unique box IDs in red and green
red_unique = list(set(list(map(lambda x: x[0], red_combinations))+list(map(lambda x: x[1], red_combinations))))
green_unique = list(set(list(map(lambda x: x[0], green_combinations))+list(map(lambda x: x[1], green_combinations))))
green_only = list(np.setdiff1d(green_unique, red_unique))
# Drawing green bounding boxes on OG Frame and Green dots in birds eye frame
for id in green_only:
bbox = bboxes[id]
coor = np.array(bbox[:4], dtype=np.int32)
score = bbox[4]
(x1, y1), (x2, y2) = (coor[0], coor[1]), (coor[2], coor[3])
# put object rectangle and label
cv2.rectangle(image, (x1, y1), (x2, y2), (255,255,0), bbox_thick*2)
label = " {:.2f}".format(score)
(text_width, text_height), baseline = cv2.getTextSize(label, cv2.FONT_HERSHEY_COMPLEX_SMALL,
fontScale, thickness=bbox_thick)
# Put filled rect
cv2.rectangle(image, (x1, y1), (x1 + text_width, y1 - text_height - baseline), (255,255,0), thickness=cv2.FILLED)
# Text above rect
cv2.putText(image, label, (x1, y1-4), cv2.FONT_HERSHEY_COMPLEX_SMALL,
fontScale, (255,255,255), bbox_thick, lineType=cv2.LINE_AA)
# Dots in birdseye
birds_eye = cv2.circle(birds_eye, centroids_transformed[id], 3, (255,255,0),-1)
# Drawing red bounding boxes,lines, red dots and lines in birds eye
drawn_boxes = np.zeros((len(bboxes)), dtype=np.uint8)
for id, pair in enumerate(red_combinations):
pairs = [pair[0], pair[1]]
# Draw red bounding box
for box_id in pairs:
if drawn_boxes[box_id] == 0:
drawn_boxes[box_id] = 1
bbox = bboxes[box_id]
coor = np.array(bbox[:4], dtype=np.int32)
score = bbox[4]
(x1, y1), (x2, y2) = (coor[0], coor[1]), (coor[2], coor[3])
# put object rectangle and label
cv2.rectangle(image, (x1, y1), (x2, y2), (0,0,255), bbox_thick*2)
label = " {:.2f}".format(score)
(text_width, text_height), baseline = cv2.getTextSize(label, cv2.FONT_HERSHEY_COMPLEX_SMALL,
fontScale, thickness=bbox_thick)
# Put filled rect
cv2.rectangle(image, (x1, y1), (x1 + text_width, y1 - text_height - baseline), (0,0,255), thickness=cv2.FILLED)
# Text above rect
cv2.putText(image, label, (x1, y1-4), cv2.FONT_HERSHEY_COMPLEX_SMALL,
fontScale, (255,255,255), bbox_thick, lineType=cv2.LINE_AA)
# Dots in birdseye
birds_eye = cv2.circle(birds_eye, centroids_transformed[box_id], 3, (0,0,255),-1)
# Draw lines OG frame, birds eye view
cv2.line(image, centroids[pair[0]], centroids[pair[1]], (0,0,255), bbox_thick)
cv2.line(birds_eye, centroids_transformed[pair[0]], centroids_transformed[pair[1]], (0,0,255), 1)
# Print number of violations
num_violations = len(red_unique)
label = "Number of violations: {}".format(num_violations)
# put filled text rectangle
start_x = int(image_w/30)
start_y = int(image_h/25)
fontScale = int(0.5 * min(image_h, image_w) / 1000)
if fontScale < 1: fontScale = 1
(text_width, text_height), baseline = cv2.getTextSize(label, cv2.FONT_HERSHEY_COMPLEX_SMALL,
fontScale, thickness=bbox_thick)
# OG Frame
cv2.rectangle(image, (start_x, start_y), (start_x + text_width, start_y - text_height - baseline), (0,0,255), thickness=cv2.FILLED)
cv2.putText(image, label, (start_x, start_y), cv2.FONT_HERSHEY_COMPLEX_SMALL, fontScale, (255,255,255), bbox_thick,lineType=cv2.LINE_AA)
# Birds eye frame
cv2.rectangle(birds_eye, (start_x, start_y), (start_x + text_width, start_y - text_height - baseline), (0,0,255), thickness=cv2.FILLED)
cv2.putText(birds_eye,label,(start_x, start_y),cv2.FONT_HERSHEY_COMPLEX_SMALL,fontScale,(255,255,255), bbox_thick,lineType=cv2.LINE_AA)
# Return
return image, birds_eye
def bboxes_iou(boxes1, boxes2):
boxes1 = np.array(boxes1)
boxes2 = np.array(boxes2)
boxes1_area = (boxes1[..., 2] - boxes1[..., 0]) * (boxes1[..., 3] - boxes1[..., 1])
boxes2_area = (boxes2[..., 2] - boxes2[..., 0]) * (boxes2[..., 3] - boxes2[..., 1])
left_up = np.maximum(boxes1[..., :2], boxes2[..., :2])
right_down = np.minimum(boxes1[..., 2:], boxes2[..., 2:])
inter_section = np.maximum(right_down - left_up, 0.0)
inter_area = inter_section[..., 0] * inter_section[..., 1]
union_area = boxes1_area + boxes2_area - inter_area
ious = np.maximum(1.0 * inter_area / union_area, np.finfo(np.float32).eps)
return ious
def nms(bboxes, iou_threshold, sigma=0.3, method='nms', limits = None):
"""
:param bboxes: (xmin, ymin, xmax, ymax, score, class)
Note: soft-nms, https://arxiv.org/pdf/1704.04503.pdf
https://github.com/bharatsingh430/soft-nms
"""
min_limits = limits[0]
max_limits = limits[1]
classes_in_img = list(set(bboxes[:, 5]))
CLASSES = YOLO_COCO_CLASSES
NUM_CLASS = read_class_names(CLASSES)
best_bboxes = []
for cls in classes_in_img:
cls_mask = (bboxes[:, 5] == cls)
cls_bboxes = bboxes[cls_mask]
# Process 1: Determine whether the number of bounding boxes is greater than 0
while len(cls_bboxes) > 0:
# Process 2: Select the bounding box with the highest score according to socre order A
max_ind = np.argmax(cls_bboxes[:, 4])
best_bbox = cls_bboxes[max_ind]
if NUM_CLASS[int(best_bbox[5])] == "person":
if best_bbox[0] >= min_limits[0] and best_bbox[1] >= min_limits[1]:
if best_bbox[2] <= max_limits[0] and best_bbox[3] <= max_limits[1]:
best_bboxes.append(best_bbox)
cls_bboxes = np.concatenate([cls_bboxes[: max_ind], cls_bboxes[max_ind + 1:]])
# Process 3: Calculate this bounding box A and
# Remain all iou of the bounding box and remove those bounding boxes whose iou value is higher than the threshold
iou = bboxes_iou(best_bbox[np.newaxis, :4], cls_bboxes[:, :4])
weight = np.ones((len(iou),), dtype=np.float32)
assert method in ['nms', 'soft-nms']
if method == 'nms':
iou_mask = iou > iou_threshold
weight[iou_mask] = 0.0
if method == 'soft-nms':
weight = np.exp(-(1.0 * iou ** 2 / sigma))
cls_bboxes[:, 4] = cls_bboxes[:, 4] * weight
score_mask = cls_bboxes[:, 4] > 0.
cls_bboxes = cls_bboxes[score_mask]
return best_bboxes
def postprocess_boxes(pred_bbox, original_image, input_size, score_threshold):
valid_scale=[0, np.inf]
pred_bbox = np.array(pred_bbox)
pred_xywh = pred_bbox[:, 0:4]
pred_conf = pred_bbox[:, 4]
pred_prob = pred_bbox[:, 5:]
# 1. (x, y, w, h) --> (xmin, ymin, xmax, ymax)
pred_coor = np.concatenate([pred_xywh[:, :2] - pred_xywh[:, 2:] * 0.5,
pred_xywh[:, :2] + pred_xywh[:, 2:] * 0.5], axis=-1)
# 2. (xmin, ymin, xmax, ymax) -> (xmin_org, ymin_org, xmax_org, ymax_org)
org_h, org_w = original_image.shape[:2]
resize_ratio = min(input_size / org_w, input_size / org_h)
dw = (input_size - resize_ratio * org_w) / 2
dh = (input_size - resize_ratio * org_h) / 2
pred_coor[:, 0::2] = 1.0 * (pred_coor[:, 0::2] - dw) / resize_ratio
pred_coor[:, 1::2] = 1.0 * (pred_coor[:, 1::2] - dh) / resize_ratio
# 3. clip some boxes those are out of range
pred_coor = np.concatenate([np.maximum(pred_coor[:, :2], [0, 0]),
np.minimum(pred_coor[:, 2:], [org_w - 1, org_h - 1])], axis=-1)
invalid_mask = np.logical_or((pred_coor[:, 0] > pred_coor[:, 2]), (pred_coor[:, 1] > pred_coor[:, 3]))
pred_coor[invalid_mask] = 0
# 4. discard some invalid boxes
bboxes_scale = np.sqrt(np.multiply.reduce(pred_coor[:, 2:4] - pred_coor[:, 0:2], axis=-1))
scale_mask = np.logical_and((valid_scale[0] < bboxes_scale), (bboxes_scale < valid_scale[1]))
# 5. discard boxes with low scores
classes = np.argmax(pred_prob, axis=-1)
scores = pred_conf * pred_prob[np.arange(len(pred_coor)), classes]
score_mask = scores > score_threshold
mask = np.logical_and(scale_mask, score_mask)
coors, scores, classes = pred_coor[mask], scores[mask], classes[mask]
return np.concatenate([coors, scores[:, np.newaxis], classes[:, np.newaxis]], axis=-1)
def detect_image(Yolo, image_path, output_path, input_size=416, show=False, CLASSES=YOLO_COCO_CLASSES, score_threshold=0.3, iou_threshold=0.45, rectangle_colors=''):
original_image = cv2.imread(image_path)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
image_data = image_preprocess(np.copy(original_image), [input_size, input_size])
image_data = image_data[np.newaxis, ...].astype(np.float32)
if YOLO_FRAMEWORK == "tf":
pred_bbox = Yolo.predict(image_data)
elif YOLO_FRAMEWORK == "trt":
batched_input = tf.constant(image_data)
result = Yolo(batched_input)
pred_bbox = []
for key, value in result.items():
value = value.numpy()
pred_bbox.append(value)
pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
pred_bbox = tf.concat(pred_bbox, axis=0)
bboxes = postprocess_boxes(pred_bbox, original_image, input_size, score_threshold)
bboxes = nms(bboxes, iou_threshold, method='nms')
image = draw_bbox(original_image, bboxes, CLASSES=CLASSES, rectangle_colors=rectangle_colors)
# CreateXMLfile("XML_Detections", str(int(time.time())), original_image, bboxes, read_class_names(CLASSES))
if output_path != '': cv2.imwrite(output_path, image)
if show:
# Show the image
cv2.imshow("predicted image", image)
# Load and hold the image
cv2.waitKey(0)
# To close the window after the required kill value was provided
cv2.destroyAllWindows()
return image
def Predict_bbox_mp(Frames_data, Predicted_data, Processing_times):
gpus = tf.config.experimental.list_physical_devices('GPU')
if len(gpus) > 0:
try: tf.config.experimental.set_memory_growth(gpus[0], True)
except RuntimeError: print("RuntimeError in tf.config.experimental.list_physical_devices('GPU')")
Yolo = Load_Yolo_model()
times = []
while True:
if Frames_data.qsize()>0:
image_data = Frames_data.get()
t1 = time.time()
Processing_times.put(time.time())
if YOLO_FRAMEWORK == "tf":
pred_bbox = Yolo.predict(image_data)
elif YOLO_FRAMEWORK == "trt":
batched_input = tf.constant(image_data)
result = Yolo(batched_input)
pred_bbox = []
for key, value in result.items():
value = value.numpy()
pred_bbox.append(value)
pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
pred_bbox = tf.concat(pred_bbox, axis=0)
Predicted_data.put(pred_bbox)
def postprocess_mp(Predicted_data, original_frames, Processed_frames, Processing_times, input_size, CLASSES, score_threshold, iou_threshold, rectangle_colors, realtime):
times = []
while True:
if Predicted_data.qsize()>0:
pred_bbox = Predicted_data.get()
if realtime:
while original_frames.qsize() > 1:
original_image = original_frames.get()
else:
original_image = original_frames.get()
bboxes = postprocess_boxes(pred_bbox, original_image, input_size, score_threshold)
bboxes = nms(bboxes, iou_threshold, method='nms')
image = draw_bbox(original_image, bboxes, CLASSES=CLASSES, rectangle_colors=rectangle_colors)
times.append(time.time()-Processing_times.get())
times = times[-20:]
ms = sum(times)/len(times)*1000
fps = 1000 / ms
image = cv2.putText(image, "Time: {:.1f}FPS".format(fps), (0, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)
#print("Time: {:.2f}ms, Final FPS: {:.1f}".format(ms, fps))
Processed_frames.put(image)
def Show_Image_mp(Processed_frames, show, Final_frames):
while True:
if Processed_frames.qsize()>0:
image = Processed_frames.get()
Final_frames.put(image)
if show:
cv2.imshow('output', image)
if cv2.waitKey(25) & 0xFF == ord("q"):
cv2.destroyAllWindows()
break
# detect from webcam
def detect_video_realtime_mp(video_path, output_path, input_size=416, show=False, CLASSES=YOLO_COCO_CLASSES, score_threshold=0.3, iou_threshold=0.45, rectangle_colors='', realtime=False):
if realtime:
vid = cv2.VideoCapture(0)
else:
vid = cv2.VideoCapture(video_path)
# by default VideoCapture returns float instead of int
width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(vid.get(cv2.CAP_PROP_FPS))
codec = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(output_path, codec, fps, (width, height)) # output_path must be .mp4
no_of_frames = int(vid.get(cv2.CAP_PROP_FRAME_COUNT))
original_frames = Queue()
Frames_data = Queue()
Predicted_data = Queue()
Processed_frames = Queue()
Processing_times = Queue()
Final_frames = Queue()
p1 = Process(target=Predict_bbox_mp, args=(Frames_data, Predicted_data, Processing_times))
p2 = Process(target=postprocess_mp, args=(Predicted_data, original_frames, Processed_frames, Processing_times, input_size, CLASSES, score_threshold, iou_threshold, rectangle_colors, realtime))
p3 = Process(target=Show_Image_mp, args=(Processed_frames, show, Final_frames))
p1.start()
p2.start()
p3.start()
while True:
ret, img = vid.read()
if not ret:
break
original_image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
original_frames.put(original_image)
image_data = image_preprocess(np.copy(original_image), [input_size, input_size])
image_data = image_data[np.newaxis, ...].astype(np.float32)
Frames_data.put(image_data)
while True:
if original_frames.qsize() == 0 and Frames_data.qsize() == 0 and Predicted_data.qsize() == 0 and Processed_frames.qsize() == 0 and Processing_times.qsize() == 0 and Final_frames.qsize() == 0:
p1.terminate()
p2.terminate()
p3.terminate()
break
elif Final_frames.qsize()>0:
image = Final_frames.get()
if output_path != '': out.write(image)
cv2.destroyAllWindows()
def detect_video(Yolo, video_path, output_path, input_size=416, show=False, CLASSES=YOLO_COCO_CLASSES, score_threshold=0.3, iou_threshold=0.45, rectangle_colors=''):
times, times_2 = [], []
vid = cv2.VideoCapture(video_path)
# by default VideoCapture returns float instead of int
width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(vid.get(cv2.CAP_PROP_FPS))
codec = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(output_path, codec, fps, (width, height)) # output_path must be .mp4
while True:
_, img = vid.read()
try:
original_image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
except:
break
image_data = image_preprocess(np.copy(original_image), [input_size, input_size])
image_data = image_data[np.newaxis, ...].astype(np.float32)
t1 = time.time()
if YOLO_FRAMEWORK == "tf":
pred_bbox = Yolo.predict(image_data)
elif YOLO_FRAMEWORK == "trt":
batched_input = tf.constant(image_data)
result = Yolo(batched_input)
pred_bbox = []
for key, value in result.items():
value = value.numpy()
pred_bbox.append(value)
t2 = time.time()
pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
pred_bbox = tf.concat(pred_bbox, axis=0)
bboxes = postprocess_boxes(pred_bbox, original_image, input_size, score_threshold)
bboxes = nms(bboxes, iou_threshold, method='nms')
image = draw_bbox(original_image, bboxes, CLASSES=CLASSES, rectangle_colors=rectangle_colors)
t3 = time.time()
times.append(t2-t1)
times_2.append(t3-t1)
times = times[-20:]
times_2 = times_2[-20:]
ms = sum(times)/len(times)*1000
fps = 1000 / ms
fps2 = 1000 / (sum(times_2)/len(times_2)*1000)
image = cv2.putText(image, "Time: {:.1f}FPS".format(fps), (0, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)
# CreateXMLfile("XML_Detections", str(int(time.time())), original_image, bboxes, read_class_names(CLASSES))
print("Time: {:.2f}ms, Detection FPS: {:.1f}, total FPS: {:.1f}".format(ms, fps, fps2))
if output_path != '': out.write(image)
if show:
cv2.imshow('output', image)
if cv2.waitKey(25) & 0xFF == ord("q"):
cv2.destroyAllWindows()
break
cv2.destroyAllWindows()
# detect from webcam
def detect_realtime(Yolo, output_path, input_size=416, show=False, CLASSES=YOLO_COCO_CLASSES, score_threshold=0.3, iou_threshold=0.45, rectangle_colors=''):
times = []
vid = cv2.VideoCapture(0)
# by default VideoCapture returns float instead of int
width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(vid.get(cv2.CAP_PROP_FPS))
codec = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(output_path, codec, fps, (width, height)) # output_path must be .mp4
while True:
_, frame = vid.read()
try:
original_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
original_frame = cv2.cvtColor(original_frame, cv2.COLOR_BGR2RGB)
except:
break
image_data = image_preprocess(np.copy(original_frame), [input_size, input_size])
image_data = image_data[np.newaxis, ...].astype(np.float32)
t1 = time.time()
if YOLO_FRAMEWORK == "tf":
pred_bbox = Yolo.predict(image_data)
elif YOLO_FRAMEWORK == "trt":
batched_input = tf.constant(image_data)
result = Yolo(batched_input)
pred_bbox = []
for key, value in result.items():
value = value.numpy()
pred_bbox.append(value)
t2 = time.time()
pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
pred_bbox = tf.concat(pred_bbox, axis=0)
bboxes = postprocess_boxes(pred_bbox, original_frame, input_size, score_threshold)
bboxes = nms(bboxes, iou_threshold, method='nms')
times.append(t2-t1)
times = times[-20:]
ms = sum(times)/len(times)*1000
fps = 1000 / ms
print("Time: {:.2f}ms, {:.1f} FPS".format(ms, fps))
frame = draw_bbox(original_frame, bboxes, CLASSES=CLASSES, rectangle_colors=rectangle_colors)
# CreateXMLfile("XML_Detections", str(int(time.time())), original_frame, bboxes, read_class_names(CLASSES))
image = cv2.putText(frame, "Time: {:.1f}FPS".format(fps), (0, 30),
cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)
if output_path != '': out.write(frame)
if show:
cv2.imshow('output', frame)
if cv2.waitKey(25) & 0xFF == ord("q"):
cv2.destroyAllWindows()
break
cv2.destroyAllWindows()
|
import info
class subinfo(info.infoclass):
def setDependencies(self):
self.runtimeDependencies["virtual/base"] = None
self.runtimeDependencies["libs/qt5/qtbase"] = None
self.runtimeDependencies["libs/exiv2"] = None
self.runtimeDependencies["libs/iconv"] = None
self.runtimeDependencies["libs/libbzip2"] = None
self.runtimeDependencies["libs/libxml2"] = None
self.runtimeDependencies["libs/zlib"] = None
def setTargets(self):
self.svnTargets['master'] = 'git://anongit.kde.org/strigi'
for ver in ['0.7.6', '0.7.7', '0.7.8']:
self.svnTargets[ver] = 'git://anongit.kde.org/strigi||v%s' % ver
self.svnTargets['komobranch'] = 'branches/work/komo/strigi'
for i in ['4.3.0', '4.3.1', '4.3.2', '4.3.3', '4.3.4', '4.3']:
self.svnTargets[i] = 'tags/kdesupport-for-4.3/kdesupport/strigi'
for i in ['4.4.0', '4.4.1', '4.4.2', '4.4.3', '4.4.4', '4.4']:
self.svnTargets[i] = 'tags/kdesupport-for-4.4/strigi'
for ver in ['0.7.2', '0.7.5']:
self.targets[ver] = 'http://www.vandenoever.info/software/strigi/strigi-' + ver + '.tar.bz2'
self.targetInstSrc[ver] = 'strigi-' + ver
self.patchToApply['0.7.2'] = ("strigi-0.7.2-20101223.diff", 1)
self.patchToApply['0.7.5'] = [("strigi-0.7.5-20120225.diff", 1),
("add-intel-compiler-to-strigi-plugin-macros.diff", 1),
("do-not-use-fpic-also-on-intel-compiler.diff", 1),
("isblank-false-positive-intel-compiler.diff", 1),
("intel-cmake-adaptations.diff", 1)]
self.patchToApply['0.7.8'] = [("strigi-0.7.8-20130906.diff", 1),
("add-intel-compiler-to-strigi-plugin-macros-0.7.8.diff", 1),
("do-not-use-fpic-also-on-intel-compiler-0.7.8.diff", 1),
("isblank-false-positive-intel-compiler.diff", 1),
("intel-cmake-adaptations-0.7.8.diff", 1)]
self.targetDigests['0.7.2'] = 'b4c1472ef068536acf9c5c4c8f033a97f9c69f9f'
self.description = "a desktop search engine and indexer"
self.defaultTarget = '0.7.8'
from Package.CMakePackageBase import *
class Package(CMakePackageBase):
def __init__(self):
CMakePackageBase.__init__(self)
self.subinfo.options.fetch.checkoutSubmodules = True
self.subinfo.options.configure.args = ""
self.subinfo.options.configure.args += "-DENABLE_CLUCENE=OFF "
if self.buildTarget == "master":
self.subinfo.options.configure.args = (
" -DSTRIGI_SYNC_SUBMODULES=ON "
" -DGIT_EXECUTABLE=%s "
% os.path.join(self.rootdir, "dev-utils", "git", "bin",
"git.exe"))
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
plt.rc('font', family='malgun gothic')
plt.rcParams['axes.unicode_minus'] = False
R = 8314.472
T = 310
F = 96485.3415
RTF = R*T/F
FRT = F/(R*T)
FFRT = F*F/(R*T)
current_time = 0
next_time = 1000
dt = 0.001
V = -86.2
m = 0
h = 0.75
j = 0.75
r = 0
s = 1
Xs = 0
d = 0
f = 1
fca = 1
Nai = 11.6
Cai = 0.000045
Casr = 0.2
Ki = 137.18
xr1 = 0
xr2 = 1
g = 1
a12 = []
times = []
Cai2 = []
INa2 = []
Ito2 = []
IKr2 = []
IKs2 = []
ICaL2 = []
iK12 = []
while current_time <= next_time:
GNa = 14.838
m1 = 1 / (np.power(1 + np.exp((-56.86 - V) / 9.03), 2))
# print('m1 : ', m1)
h1 = 1 / (np.power(1 + np.exp((V + 71.55) / 7.43), 2))
# print('h1 : ', h1)
j1 = 1 / (np.power(1 + np.exp((V + 71.55) / 7.43), 2))
am = 1 / (1 + np.exp((-60 - V) / 5))
bm = 0.1 / (1 + np.exp((V + 35) / 5)) + 0.1 / (1 + np.exp((V - 50) / 200))
Tm = am * bm
if V < -40:
ah = 0.057 * np.exp(-(V + 80) / 6.8)
else:
ah = 0
if V < -40:
Bh = 2.7 * np.exp(0.079 * V) + 310000 * np.exp(0.3485 * V)
else:
Bh = 0.77 / (0.13 * (1 + np.exp((V + 10.66) / -11.1)))
Th = 1 / (ah + Bh)
if V < -40:
aj = ((-25428 * np.exp(0.2444 * V) - 6.948e-6 * np.exp(-0.04391 * V)) * (V + 37.78)) / (1 + np.exp(0.311 * (V + 79.23)))
else:
aj = 0
if V < -40:
Bj = (0.02424 * np.exp(-0.01052 * V)) / (1 + np.exp(-0.1378 * (V + 40.14)))
else:
Bj = (0.6 * np.exp(0.057 * V)) / (1 + np.exp(-0.1 * (V + 32)))
Tj = 1 / (aj + Bj)
dm = (m1 - m) / Tm
dh = (h1 - h) / Th
dj = (j1 - j) / Tj
Nao = 140
ENa = RTF * np.log(Nao / Nai)
INa = GNa * np.power(m,3) * h * j * (V - ENa)
GK1 = 5.405
Ko = 5.4
Ek = RTF * np.log(Ko / Ki)
aK1 = 0.1 / (1 + np.exp(0.06 * (V - Ek - 200)))
bK1 = (3 * np.exp(0.0002 * (V - Ek + 100)) + np.exp(0.1 * (V - Ek - 10))) / ((1 + np.exp(-0.5 * (V - Ek))))
xK1 = aK1 / (aK1 + bK1)
iK1 = GK1 * xK1 * (V - Ek)
Gtoendo = 0.073
r1 = 1 / (1 + (np.exp((20 - V) / (6))))
Tr = 9.5 * np.exp(np.power((V + 40),2) / -1800) + 0.8
dr = (r1 - r) / Tr
s1 = 1 / ((1 + (np.exp((V + 28) / 5))))
Ts = (1000 * np.exp(np.power((V + 67),2) / -1000)) + 8
ds = (s1 - s) / Ts
Ko = 5.4
Ek = RTF * np.log(Ko / Ki)
Ito = Gtoendo * r * s * (V - Ek)
GKr = 0.096
Ko = 5.4
xr11 = 1 / (1 + (np.exp((-26 - V) / 7)))
xr21 = 1 / (1 + (np.exp((V + 88) / 24)))
axr1 = 450 / (1 + (np.exp((-45 - V) / 10)))
Bxr1 = 6 / (1 + (np.exp((V + 30) / 11.5)))
Txr1 = axr1 * Bxr1
axr2 = 3 / (1 + (np.exp((-60 - V) / 20)))
Bxr2 = 1.12 / (1 + (np.exp((V - 60) / 20)))
Txr2 = axr2 * Bxr2
dxr1 = (xr11 - xr1) / Txr1
dxr2 = (xr21 - xr2) / Txr2
Ek = RTF * np.log(Ko / Ki)
IKr = GKr * np.sqrt(Ko / 5.4) * xr1 * xr2 * (V - Ek)
Gksepiendo = 0.245
Xs1 = 1 / (1 + (np.exp((-5 - V) / (14))))
axs = 1100 / (np.sqrt(1 + (np.exp((-10 - V) / (6)))))
bxs = 1 / (1 + (np.exp((V - 60) / (20))))
Txs = axs * bxs
dXs = (Xs1 - Xs) / Txs
Ko = 5.4
pKNa = 0.03
Nao = 140
EKs = RTF * np.log((Ko + pKNa * Nao) / (Ki + pKNa * Nai))
IKs = Gksepiendo * np.power(Xs,2) * (V - EKs)
GCaL = 0.000175
Tfca = 2
d1 = 1 / (1 + np.exp((-5 - V) / 7.5))
f1 = 1 / (1 + np.exp((V + 20) / 7))
afca = 1 / (1 + np.power(Cai / 0.000325,8))
bfca = 0.1 / (1 + np.exp((Cai - 0.0005) / 0.0001))
rfca = 0.2 / (1 + np.exp((Cai - 0.00075) / 0.0008))
fca1 = (afca + bfca + rfca + 0.23) / 1.46
d_fca = (fca1 - fca) / Tfca
ad = 1.4 / (1 + np.exp((-35 - V) / 13)) + 0.25
Bd = 1.4 / (1 + np.exp((V + 5) / 5))
rd = 1 / (1 + np.exp((50 - V) / 20))
Td = ad * Bd + rd
Tf = 1125 * np.exp(-(np.power(V + 27,2)) / 240) + 80 + 165 / (1 + np.exp((25 - V) / 10))
dd = (d1 - d) / Td
df = (f1 - f) / Tf
if 0.01 * d_fca > 0 and V > -60:
dfca = 0
else:
dfca = d_fca
Cao = 2
ICaL = GCaL * d * f * fca * 4 * V * FFRT * (Cai * np.exp(2 * V * FRT) - 0.341 * Cao) / (np.exp(2 * V * FRT) - 1)
kNaCa = 1000
z = 0.35
y = 2.5
KmNai = 87.5
KmCa = 1.38
ksat = 0.1
Cao = 2
Nao = 140
up = np.exp(z * V * FRT) * (np.power(Nai,3)) * Cao - np.exp((z - 1) * V * FRT) * (np.power(Nao,3)) * Cai * y
down = (np.power(KmNai,3) + np.power(Nao,3)) * (KmCa + Cao) * (1 + ksat * np.exp((z - 1) * V * FRT))
INaCa = kNaCa * (up / down)
PNaK = 1.362
KmK = 1
KmNa = 40
Ko = 5.4
INaK = ((((PNaK * Ko) / (Ko + KmK)) * Nai) / (Nai + KmNa)) / (1 + 0.1245 * np.exp(-0.1 * V * FRT) + 0.0353 * np.exp(-V * FRT))
KpCa = 0.0005
GpCa = 0.825
IpCa = (GpCa * Cai) / (KpCa + Cai)
GpK = 0.0146
Ko = 5.4
Ek = RTF * np.log(Ko / Ki)
IpK = GpK * ((V - Ek) / (1 + (np.exp((25 - V) / (5.98)))))
GbCa = 0.000592
Cao = 2
ECa = 0.5 * RTF * np.log(Cao / Cai)
IbCa = GbCa * (V - ECa)
GbNa = 0.00029
Nao = 140
ENa = RTF * np.log(Nao / Nai)
IbNa = GbNa * (V - ENa)
Bufc = 0.15
Kbufc = 0.001
Bufsr = 10
Kbufsr = 0.3
Vc = 0.016404
Kup = 0.00025
brel = 0.25
Vsr = 0.001094
Vleak = 8e-5
Vmaxup = 0.000425
arel = 0.016464
crel = 0.008232
Cm = 0.185
if Cai < 0.00035:
gm = 1 / (1 + np.power(Cai / 0.00035,6))
else:
gm = 1 / (1 + np.power(Cai / 0.00035,16))
Tg = 2
d_g = (gm-g) / Tg
if 0.01*d_g > 0 and V > -60:
dg = 0
else:
dg = d_g
Ileak = Vleak * (Casr - Cai)
Iup = Vmaxup / (1 + (np.power(Kup,2)) / (np.power(Cai,2)))
Irel = ((arel * (np.power(Casr,2))) / (np.power(brel,2) + np.power(Casr,2)) + crel) * d * g
Caibufc = 1 / (1 + (Bufc * Kbufc) / (np.power(Cai + Kbufc,2)))
Casrbufsr = 1 / (1 + (Bufsr * Kbufsr) / (np.power(Casr + Kbufsr,2)))
dCai = Caibufc * (Ileak - Iup + Irel - (ICaL + IbCa + IpCa - (2 * INaCa)) / (2 * Vc * F) * Cm)
dCasr = Casrbufsr * Vc / Vsr * (Iup - (Irel + Ileak))
Iion = (INa
+ iK1
+ Ito
+ IKr
+ IKs
+ ICaL
+ INaCa
+ INaK
+ IpCa
+ IpK
+ IbCa
+ IbNa)
if current_time >= 100 and current_time <= 101:
i_stim = -52
dV = -(Iion + i_stim)
else:
i_stim = 0
dV = -(Iion + i_stim)
# All : dNai = (-(INa + IbNa + 3 * INaK + 3 * INaCa) / (Vc * F)) * Cm
# All : dKi = (-((iK1 + Ito + IKr + IKs + IpK + i_stim) - (2 * INaK)) /(Vc * F)) * Cm
# IKr : dKi = (-((IKr + i_stim) ) /(Vc * F)) * Cm
Cm = 0.185
Vc = 0.016404
dNai = (INa + IbNa + 3 * INaK + 3 * INaCa) / -(Vc * F) * Cm
dKi = ((iK1 + Ito + IKr + IKs + IpK + i_stim) - (2 * INaK)) /-(Vc * F) * Cm
# update time
current_time += dt
# integrate
V_Next = V + dV*dt
m_Next = m + dm*dt
h_Next = h + dh*dt
j_Next = j + dj*dt
r_Next = r + dr*dt
s_Next = s + ds*dt
Xs_Next = Xs + dXs*dt
d_Next = d + dd*dt
f_Next = f + df*dt
fca_Next = fca + dfca*dt
Nai_Next = Nai + dNai*dt
Cai_Next = Cai + dCai*dt
Casr_Next = Casr + dCasr*dt
g_Next = g + dg*dt
Ki_Next = Ki + dKi*dt
xr1_Next = xr1 + dxr1*dt
xr2_Next = xr2 + dxr2*dt
# update values
V = V_Next
m = m_Next
h = h_Next
j = j_Next
r = r_Next
s = s_Next
Xs = Xs_Next
d = d_Next
f = f_Next
fca = fca_Next
Nai = Nai_Next
Cai = Cai_Next
Casr = Casr_Next
g = g_Next
Ki = Ki_Next
xr1 = xr1_Next
xr2 = xr2_Next
times.append(current_time)
a12.append(V_Next)
Cai2.append(Cai_Next)
INa2.append(INa)
Ito2.append(Ito)
IKr2.append(IKr)
IKs2.append(IKs)
ICaL2.append(ICaL)
iK12.append(iK1)
# Euler method call
plt.suptitle("Euler method in tentusscher")
plt.subplot(2, 4, 1)
plt.plot(times, a12)
plt.title('Action potential')
plt.subplot(2, 4, 2)
plt.plot(times, Cai2)
plt.title('Cai')
plt.subplot(2, 4, 3)
plt.plot(times, INa2)
plt.title('INa')
plt.subplot(2, 4, 4)
plt.plot(times, Ito2)
plt.title('Ito')
plt.subplot(2, 4, 5)
plt.plot(times, IKr2)
plt.title('IKr')
plt.subplot(2, 4, 6)
plt.plot(times, IKs2)
plt.title('IKs')
plt.subplot(2, 4, 7)
plt.plot(times, ICaL2)
plt.title('ICaL')
plt.subplot(2, 4, 8)
plt.plot(times, iK12)
plt.title('iK1')
plt.show()
|
from manimlib import *
class Music1(Scene):
def construct(self):
# manimpango.register_font("E:/Dropbox/manim/字体/阿里巴巴普惠体/阿里巴巴普惠体/Alibaba-PuHuiTi-Light.otf")
# print(manimpango.list_fonts())
text1 = Text(
"三个层次",
font="Source Han Sans CN",
weight='BOLD',
# size=1.68
)
self.play(FadeIn(text1,scale=0.5))
self.wait()
text2 = TexText(
"层次一 : 欣赏音乐",
"层次二 : 自娱自乐",
"层次三 : 玩音乐",
)\
.arrange(
DOWN,
aligned_edge=LEFT
)
self.remove(text1)
self.play(Write(text2[0]))
self.wait()
self.play(Write(text2[1]))
self.wait()
self.play(Write(text2[2]))
self.wait()
class Music2(Scene):
def construct(self):
# manimpango.register_font("E:/Dropbox/manim/字体/阿里巴巴普惠体/阿里巴巴普惠体/Alibaba-PuHuiTi-Light.otf")
# print(manimpango.list_fonts())
text1 = Text(
"旧琴练习",
font="Source Han Sans CN",
weight='BOLD',
size=500,
).shift(0.238*UP)
text2 = Text(
"新琴练习",
font="Source Han Sans CN",
weight='BOLD',
size=500,
).shift(0.238*UP)
self.play(Write(text1))
self.wait()
self.remove(text1)
self.play(FadeIn(text2,scale=0.5))
self.wait()
|
from datetime import date
from django.db import IntegrityError
from django.test import TestCase
from pytest import raises
from .models import Car, Person, Vehicle, VicePrincipal
class SingleModelTest(TestCase):
def test_it_should_hide_soft_deleted_objects(self):
Person.objects.create(name='bob').soft_delete()
assert Person.objects.count() == 0
assert Person.all_including_deleted.count() == 1
def test_it_should_raise_an_error_when_soft_deleting_twice(self):
bob = Person.objects.create(name='bob')
bob.soft_delete()
assert Person.objects.count() == 0
with raises(IntegrityError):
bob.soft_delete()
def test_it_should_allow_undeletion_of_objects(self):
bob = Person.objects.create(name='bob')
bob.soft_delete()
assert Person.objects.count() == 0
bob.soft_undelete()
assert Person.objects.count() == 1
def test_it_should_not_introduce_extra_sql_queries(self):
Person.objects.create(name='alice')
Person.objects.create(name='bob')
Person.objects.create(name='charlie')
Person.objects.create(name='dave').soft_delete()
with self.assertNumQueries(1):
assert Person.objects.count() == 3
class OneToManyRelationshipTest(TestCase):
"""
In a one-to-many relationship soft-delete has an impact in one direction but not the other.
"""
def setUp(self):
bob = Person.objects.create(name='bob')
Vehicle.objects.create(make='volvo', owner=bob)
Vehicle.objects.create(make='trek', owner=bob)
def test_it_should_hide_objects_on_the_many_side_of_the_relation(self):
Vehicle.objects.get(make='volvo').soft_delete()
bob = Person.objects.get(name='bob')
assert bob.vehicles.count() == 1
def test_it_cannot_hide_objects_on_the_one_side_of_the_relation(self):
"""
If you are able to grab a reference to an object that has a foreign key to a soft-deleted object,
that object stays accessible even though it was soft-deleted.
"""
bob = Person.objects.get(name='bob')
bob.soft_delete()
volvo = Vehicle.objects.get(make='volvo')
assert volvo.owner == bob
class OneToOneRelationshipTest(TestCase):
"""
When two objects are in a one-to-one relationship, soft-deleting one has no impact on the other.
"""
def setUp(self):
bob = Person.objects.create(name='bob')
volvo_vehicle = Vehicle.objects.create(make='volvo', owner=bob)
Car.objects.create(vehicle=volvo_vehicle, license_plate='123456')
def test_soft_deleting_the_car_doesnt_impact_the_vehicle(self):
Car.objects.get(license_plate='123456').soft_delete()
assert Car.objects.count() == 0
assert Vehicle.objects.count() == 1
assert Vehicle.objects.get(make='volvo').car.license_plate == '123456'
def test_soft_deleting_the_vehicle_doesnt_impact_the_car(self):
Vehicle.objects.get(make='volvo').soft_delete()
assert Vehicle.objects.count() == 0
assert Car.objects.count() == 1
assert Car.objects.get(license_plate='123456').vehicle.make == 'volvo'
class SubclassTest(TestCase):
def test_soft_deleting_as_subclass_does_not_affect_the_superclass(self):
neal = VicePrincipal.objects.create(name='neal', hire_date=date(2017, 9, 23))
neal.soft_delete()
assert VicePrincipal.objects.count() == 0
assert Person.objects.count() == 1
|
# header is ("4solr field", "darwin core field")
dwc_mapping = [
("accessionnumber_s", "catalogNumber"),
("alllocalities_ss", "tbd"),
("associatedtaxa_ss", "associatedTaxa"),
("blob_ss", "associatedMedia"),
("briefdescription_txt", "dynamicProperties"),
("collcountry_s", "country"),
("collcounty_s", "county"),
("collectioncode", "collectionCode"),
("collectiondate_s", "verbatimEventDate"),
("collector_ss", "recordedBy"),
("collectornumber_s", "recordNumber"),
("collectorverbatim_s", "tbd"),
("collstate_s", "stateProvince"),
("comments_ss", "occurrenceRemarks"),
("coordinatesource_s", "georeferenceSources"),
("coordinateuncertainty_f", "coordinateUncertaintyInMeters"),
#("coordinateuncertaintyunit_s", "coordinateUncertaintyInMeters"),
("createdat_dt", "tbd"),
("csid_s", "occurrenceID"),
("cultivated_s", "establishmentMeans"),
("datum_s", "geodeticDatum"),
("depth_s", "verbatimDepth"),
#("depthunit_s", "verbatimDepthUnit"),
("determination_s", "scientificName"),
("determinationdetails_s", "identificationRemarks"),
("determinationqualifier_s", "identificationQualifier"),
("earlycollectiondate_dt", "eventDate"),
("elevation_s", "verbatimElevation"),
#("elevationunit_s", "verbatimElevationUnit"),
("family_s", "family"),
("habitat_s", "habitat"),
("hastypeassertions_s", "tbd"),
("id", "tbd"),
("labelfooter_s", "tbd"),
("labelheader_s", "tbd"),
("latecollectiondate_dt", "eventDate"),
("latlong_p", "tbd"),
("loannumber_s", "tbd"),
("loanstatus_s", "tbd"),
("locality_s", "verbatimLocality"),
("localitynote_s", "localityRemarks"),
("localitysource_s", "tbd"),
("localitysourcedetail_s", "georeferenceRemarks"),
("localname_s", "vernacularName"),
("location_0_coordinate", "decimalLatitude"),
("location_1_coordinate", "decimalLongitude"),
("majorgroup_s", "tbd"),
("maxdepth_s", "tbd"),
("maxelevation_s", "tbd"),
("mindepth_s", "tbd"),
("minelevation_s", "tbd"),
("numberofobjects_s", "tbd"),
("objectcount_s", "tbd"),
("otherlocalities_ss", "tbd"),
("othernumber_ss", "otherCatalogNumbers"),
("phase_s", "reproductiveCondition"),
("posttopublic_s", "tbd"),
("previousdeterminations_ss", "previousIdentifications"),
("references_ss", "tbd"),
("sex_s", "sex"),
("sheet_s", "tbd"),
("taxonbasionym_s", "originalNameUsage"),
("termformatteddisplayname_s", "tbd"),
("trscoordinates_s", "tbd"),
("typeassertions_ss", "typeStatus"),
("ucbgaccessionnumber_s", "tbd"),
("updatedat_dt", "modified")
]
id_column = 0
csid_column = 1
accessionnumber_column = 2
determination_column = 3
termformatteddisplayname_column = 4
family_column = 5
taxonbasionym_column = 6
majorgroup_column = 7
collector_column = 8
collectornumber_column = 9
collectiondate_column = 10
earlycollectiondate_column = 11
latecollectiondate_column = 12
locality_column = 13
collcounty_column = 14
collstate_column = 15
collcountry_column = 16
elevation_column = 17
minelevation_column = 18
maxelevation_column = 19
elevationunit_column = 20
habitat_column = 21
location_0_coordinate_column = 22
location_1_coordinate_column = 23
latlong_column = 24
trscoordinates_column = 25
datum_column = 26
coordinatesource_column = 27
coordinateuncertainty_column = 28
coordinateuncertaintyunit_column = 29
localitynote_column = 30
localitysource_column = 31
localitysourcedetail_column = 32
updatedat_dt_column = 33
labelheader_column = 34
labelfooter_column = 35
previousdeterminations_column = 36
localname_column = 37
briefdescription_txt_column = 38
depth_column = 39
mindepth_column = 40
maxdepth_column = 41
depthunit_column = 42
associatedtaxa_column = 43
typeassertions_column = 44
cultivated_column = 45
sex_column = 46
phase_column = 47
othernumber_column = 48
ucbgaccessionnumber_column = 49
determinationdetails_column = 50
loanstatus_column = 51
loannumber_column = 52
collectorverbatim_column = 53
otherlocalities_column = 54
alllocalities_column = 55
hastypeassertions_column = 56
determinationqualifier_column = 57
comments_column = 58
numberofobjects_column = 59
objectcount_column = 60
sheet_column = 61
createdat_dt_column = 62
posttopublic_column = 63
references_column = 64
blob_column = 65
|
# Author: Michael Lissner
# History:
# - 2013-06-03, mlr: Created
# - 2014-08-06, mlr: Updated for new website
# - 2015-07-30, mlr: Updated for changed website (failing xpaths)
from datetime import datetime
from juriscraper.OpinionSite import OpinionSite
from juriscraper.lib.string_utils import clean_if_py3
class Site(OpinionSite):
def __init__(self, *args, **kwargs):
super(Site, self).__init__(*args, **kwargs)
self.court_id = self.__module__
self.url = 'http://nvcourts.gov/Supreme/Decisions/Advance_Opinions/'
self.xpath_adjustment = 0
self.table_number = 2
self.base_path = '(//table)[{table_number}]//td[{i}]'
self.date_path = self._make_date_path()
def _make_date_path(self):
"""Needed so that subclasses can make a date path as part of their
init process
"""
return '{base}//text()[normalize-space(.)]'.format(
base=self.base_path.format(
table_number=self.table_number,
i=4 + self.xpath_adjustment,
),
)
def _get_download_urls(self):
path = '{base}//@href'.format(
base=self.base_path.format(
table_number=self.table_number,
i=4 + self.xpath_adjustment,
),
)
return list(self.html.xpath(path))
def _get_case_names(self):
path = '{base}//text()'.format(
base=self.base_path.format(
table_number=self.table_number,
i=3 + self.xpath_adjustment,
),
)
return list(self.html.xpath(path))
def _get_case_dates(self):
case_dates = []
for el in self.html.xpath(self.date_path):
date_string = clean_if_py3(str(el)).strip()
if date_string:
case_dates.append(datetime.strptime(date_string, '%b %d, %Y').date())
return case_dates
def _get_precedential_statuses(self):
return ['Published'] * len(self.case_names)
def _get_docket_numbers(self):
path = '{base}//text()[normalize-space(.)]'.format(
base=self.base_path.format(
table_number=self.table_number,
i=2 + self.xpath_adjustment,
),
)
docket_numbers = []
for el in self.html.xpath(path):
text = clean_if_py3(str(el)).strip()
if text:
docket_numbers.append(text)
return docket_numbers
def _get_neutral_citations(self):
neutral_path = '{base}//text()'.format(
base=self.base_path.format(
table_number=self.table_number,
i=1 + self.xpath_adjustment,
),
)
date_strings = []
for el in self.html.xpath(self.date_path):
date_string = clean_if_py3(str(el)).strip()
if date_string:
date_strings.append(date_string)
neutral_citations = []
for neutral_number, \
date_string in zip(
self.html.xpath(neutral_path),
date_strings):
year = datetime.strptime(date_string.strip(), '%b %d, %Y').year
neutral_citations.append('{year} NV {num}'.format(year=year, num=neutral_number))
return neutral_citations
|
#!/usr/bin/env python
ipv6_number = 'FE80:0000:0000:0000:0101:A3EF:EE1E:1719'
print(ipv6_number.split(":"))
|
"""
module to assess the current environment variables
default values loaded here
"""
from os import environ
from .static import PROD, TESTING, LOGLEVEL_KEY, STAGE_KEY
#https://en.wikipedia.org/wiki/Syslog#Severity_levels
EMERGENCY = "EMERGENCY"
ALERT = "ALERT"
CRITICAL = "CREITICAL"
ERROR = "ERROR"
WARNING = "WARNING"
NOTICE = "NOTICE"
INFORMATIONAL = "INFORMATIONAL"
DEBUG = "DEBUG"
TEST = "TEST"
LOGLEVELS = {EMERGENCY: 0,
ALERT: 1,
CRITICAL: 2,
ERROR: 3,
WARNING: 4,
NOTICE: 5,
INFORMATIONAL: 6,
DEBUG: 7,
TEST: 8}
def check_log_env():
"""
Special environment variable LOGLEVEL may be one of 9 keys in
<brain.environment.LOGLEVELS>
:return: <str> (defaults to 'TEST' / most verbose)
"""
return environ.get(LOGLEVEL_KEY, TEST)
def check_stage_env():
"""
Special environment variable STAGE may be one of 4 keys in
<brain.environment.STAGES>
:return: <str> ( defaults to 'TESTING' )
"""
return environ.get(STAGE_KEY, TESTING)
def log_env_gte(desired):
"""
Boolean check if the current environment LOGLEVEL is
at least as verbose as a desired LOGLEVEL
:param desired: <str> one of 9 keys in <brain.environment.stage>
:return: <bool>
"""
return LOGLEVELS.get(check_log_env()) >= LOGLEVELS.get(desired, LOGLEVELS[TEST])
def check_prod_env():
"""
Boolean check if the environemnt is production
:return: <bool>
"""
return check_stage_env() == PROD
def check_dev_env():
"""
Boolean check if the environment is anything other than production
:return: <bool>
"""
return not check_prod_env()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.