content stringlengths 5 1.05M |
|---|
"""Arnie Menu Maker Script
Jonathan Hanson
A quick-n-dirty way to place the menu into our postgres database.
Be careful! When you change the menu you may break the relations between
the menu and the orders tables!
"""
import psycopg2
import toml
import sys
SQL_CREATE_MENU_TABLE = """
CREATE TABLE IF NOT EXISTS menu (
item_id SERIAL PRIMARY KEY,
name TEXT,
ingredients TEXT
);"""
conn = psycopg2.connect(user="jon",
password="jon",
database="postgres")
cursor = conn.cursor()
cursor.execute(SQL_CREATE_MENU_TABLE)
conn.commit()
check_query = """SELECT * FROM menu;"""
cursor.execute(check_query)
response = cursor.fetchall()
if len(response) > 0:
print("table already exists; doing nothing")
sys.exit()
print("populating menu table from menu.toml")
menu = toml.load("menu.toml")
for item in menu["items"]:
insert_item_query = """INSERT INTO menu(name,ingredients) VALUES(%s,%s)"""
insert_item_payload = (item["name"],item["ingredients"])
cursor.execute(insert_item_query, insert_item_payload)
conn.commit()
|
from discord.ext import commands
import discord
from lyricsgenius import Genius
color = 0x00FF7F
genius = Genius("YOUR API KEY HERE")
@commands.command()
async def lyrics(ctx,*args):
songName = " ".join(args)
try:
result = genius.search_song(songName).lyrics
except:
await ctx.send(embed=discord.Embed(title="Oops.",description="Something went wrong with Genius API, please try again",color=color))
songName = " ".join([word.capitalize() for word in songName.split()])
if len(result)<2000:
await ctx.send(embed=discord.Embed(title=f"Lyrics of {songName}",description=result,color=color))
else:
sentences = result.split("\n")
midIndex = len(sentences)//2
await ctx.send(embed=discord.Embed(title=f"Lyrics of {songName}",description="\n".join(sentences[:midIndex]),color=color))
await ctx.send(embed=discord.Embed(description="\n".join(sentences[midIndex:]),color=color))
|
# ------------------------------------------------------------------------------------------
# Copyright (c) Natsuneko. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root for license information.
# ------------------------------------------------------------------------------------------
from __future__ import annotations
from typing import Any
from bpy import ops
from bpy.types import Context, Object
class OperationWrapper:
@staticmethod
def export_fbx(context: Context, filepath: str, objects: list[Object]) -> None:
override = context.copy()
override["selected_objects"] = objects
ops.export_scene.fbx(
override,
filepath=filepath,
check_existing=True,
filter_glob="*.fbx",
use_selection=True,
use_active_collection=False,
global_scale=1.0,
apply_unit_scale=True,
apply_scale_options="FBX_SCALE_ALL",
bake_space_transform=False,
object_types={"ARMATURE", "MESH", "OTHER"},
use_mesh_modifiers=False,
use_mesh_modifiers_render=False,
mesh_smooth_type="OFF",
use_subsurf=False,
use_mesh_edges=False,
use_tspace=False,
use_custom_props=False,
add_leaf_bones=False,
primary_bone_axis="Y",
secondary_bone_axis="X",
use_armature_deform_only=False,
armature_nodetype="NULL",
bake_anim=False,
path_mode="AUTO",
embed_textures=False,
batch_mode="OFF",
use_metadata=True,
axis_forward="-Z",
axis_up="Y"
)
@staticmethod
def delete_object(context: Context, objects: list[Object]) -> None:
override = context.copy()
override["selected_objects"] = objects
ops.object.delete(override, confirm=False)
@staticmethod
def separate_object(context: Context, objects: list[Object], type: str) -> None:
override = context.copy()
override["selected_editable_objects"] = objects
ops.mesh.separate(override, type=type)
@staticmethod
def extrude_region_move(context: Context, objects: list[Object], transform: Any = None) -> None:
override = context.copy()
override["selected_editable_objects"] = objects
ops.mesh.extrude_region_move(override, TRANSFORM_OT_translate=transform)
@staticmethod
def set_origin(context: Context, objects: list[Object], type: str, center: str) -> None:
override = context.copy()
override["selected_editable_objects"] = objects
ops.object.origin_set(override, type=type, center=center)
@staticmethod
def select_all_in_mesh(context: Context, objects: list[Object]) -> None:
override = context.copy()
override["selected_editable_objects"] = objects
ops.mesh.select_all(override, action="SELECT")
@staticmethod
def make_normals_consistent(context: Context, objects: list[Object], inside: bool) -> None:
override = context.copy()
override["selected_editable_objects"] = objects
ops.mesh.normals_make_consistent(override, inside=inside)
@staticmethod
def add_modifier(context: Context, object: Object, type: str) -> None:
override = context.copy()
override["object"] = object
ops.object.modifier_add(override, type=type)
@staticmethod
def apply_modifier(context: Context, object: Object, modifier: str) -> None:
override = context.copy()
override["object"] = object
ops.object.modifier_apply(override, modifier=modifier)
|
import pandas as pd
s_org = pd.Series(['aaa@xxx.com', 'bbb@yyy.com', 'ccc@zzz.com', 'ddd'], index=['A', 'B', 'C', 'D'])
print(s_org)
# A aaa@xxx.com
# B bbb@yyy.com
# C ccc@zzz.com
# D ddd
# dtype: object
df = s_org.str.extract('(.+)@(.+)\.(.+)', expand=True)
print(df)
# 0 1 2
# A aaa xxx com
# B bbb yyy com
# C ccc zzz com
# D NaN NaN NaN
df = s_org.str.extract('(.+)@(.+)\.(.+)', expand=False)
print(df)
# 0 1 2
# A aaa xxx com
# B bbb yyy com
# C ccc zzz com
# D NaN NaN NaN
df_single = s_org.str.extract('(\w+)', expand=True)
print(df_single)
print(type(df_single))
# 0
# A aaa
# B bbb
# C ccc
# D ddd
# <class 'pandas.core.frame.DataFrame'>
s = s_org.str.extract('(\w+)', expand=False)
print(s)
print(type(s))
# A aaa
# B bbb
# C ccc
# D ddd
# dtype: object
# <class 'pandas.core.series.Series'>
df_name = s_org.str.extract('(?P<local>.*)@(?P<second_LD>.*)\.(?P<TLD>.*)', expand=True)
print(df_name)
# local second_LD TLD
# A aaa xxx com
# B bbb yyy com
# C ccc zzz com
# D NaN NaN NaN
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class GlobalContext(nn.Module):
"""
GlobalContext, inspired by 'SE' and 'GC'.
"""
def __init__(self, in_channels, channel_reduction=4, pooling_mode='attn', fusion_mode='add'):
super(GlobalContext, self).__init__()
self.in_channels = in_channels
self.reduced_channels = in_channels // channel_reduction
assert pooling_mode in ['gap', 'attn']
self.pooling_mode = pooling_mode
assert fusion_mode in ['mul', 'add']
self.fusion_mode = fusion_mode
if pooling_mode == 'gap':
self.gap = nn.AdaptiveAvgPool2d(output_size=1)
else:
self.conv_mask = nn.Conv2d(
in_channels=self.in_channels,
out_channels=1,
kernel_size=1
)
self.softmax = nn.Softmax(dim=1)
if fusion_mode == 'mul':
self.conv_reduction = nn.Sequential(
nn.Conv2d(
in_channels=self.in_channels,
out_channels=self.reduced_channels,
kernel_size=1
),
nn.LayerNorm(normalized_shape=[self.reduced_channels, 1, 1]),
nn.ReLU(),
)
self.conv_cate = nn.Sequential(
nn.Conv2d(
in_channels=self.reduced_channels,
out_channels=self.reduced_channels,
kernel_size=1
),
nn.LayerNorm(normalized_shape=[self.reduced_channels, 1, 1]),
nn.ReLU(),
nn.Conv2d(
in_channels=self.reduced_channels,
out_channels=self.in_channels,
kernel_size=1
),
nn.Sigmoid()
)
self.conv_kernel = nn.Sequential(
nn.Conv2d(
in_channels=self.reduced_channels,
out_channels=self.reduced_channels,
kernel_size=1
),
nn.LayerNorm(normalized_shape=[self.reduced_channels, 1, 1]),
nn.ReLU(),
nn.Conv2d(
in_channels=self.reduced_channels,
out_channels=self.in_channels,
kernel_size=1
),
nn.Sigmoid()
)
else:
self.conv_reduction = nn.Sequential(
nn.Conv2d(
in_channels=self.in_channels,
out_channels=self.reduced_channels,
kernel_size=1
),
nn.LayerNorm(normalized_shape=[self.reduced_channels, 1, 1]),
nn.ReLU(),
)
self.conv_cate = nn.Sequential(
nn.Conv2d(
in_channels=self.reduced_channels,
out_channels=self.reduced_channels,
kernel_size=1
),
nn.LayerNorm(normalized_shape=[self.reduced_channels, 1, 1]),
nn.ReLU(),
nn.Conv2d(
in_channels=self.reduced_channels,
out_channels=self.in_channels,
kernel_size=1
)
)
self.conv_kernel = nn.Sequential(
nn.Conv2d(
in_channels=self.reduced_channels,
out_channels=self.reduced_channels,
kernel_size=1
),
nn.LayerNorm(normalized_shape=[self.reduced_channels, 1, 1]),
nn.ReLU(),
nn.Conv2d(
in_channels=self.reduced_channels,
out_channels=self.in_channels,
kernel_size=1
)
)
self._init_weights()
def _init_weights(self, zeros_init=True):
if self.pooling_mode == 'attn':
if zeros_init:
nn.init.constant_(self.conv_mask.weight, 0)
else:
nn.init.normal_(self.conv_mask.weight, 0.001)
if self.conv_mask.bias is not None:
nn.init.constant_(self.conv_mask.bias, 0)
if self.fusion_mode == 'mul':
for modules in [self.conv_reduction, self.conv_cate, self.conv_kernel]:
for l in modules.modules():
if isinstance(l, nn.Conv2d):
nn.init.normal_(l.weight, std=0.01)
# nn.init.xavier_uniform_(l.weight)
# nn.init.xavier_normal_(l.weight)
# nn.init.kaiming_uniform_(l.weight, mode='fan_in')
# nn.init.kaiming_uniform_(l.weight, mode='fan_out')
# nn.init.kaiming_normal_(l.weight, mode='fan_in')
# nn.init.kaiming_normal_(l.weight, mode='fan_out')
if l.bias is not None:
nn.init.constant_(l.bias, 0)
if zeros_init:
nn.init.constant_(self.conv_cate[-2].weight, 0)
nn.init.constant_(self.conv_kernel[-2].weight, 0)
else:
for modules in [self.conv_reduction, self.conv_cate, self.conv_kernel]:
for l in modules.modules():
if isinstance(l, nn.Conv2d):
nn.init.normal_(l.weight, std=0.01)
# nn.init.xavier_uniform_(l.weight)
# nn.init.xavier_normal_(l.weight)
# nn.init.kaiming_uniform_(l.weight, mode='fan_in')
# nn.init.kaiming_uniform_(l.weight, mode='fan_out')
# nn.init.kaiming_normal_(l.weight, mode='fan_in')
# nn.init.kaiming_normal_(l.weight, mode='fan_out')
if l.bias is not None:
nn.init.constant_(l.bias, 0)
if zeros_init:
nn.init.constant_(self.conv_cate[-1].weight, 0)
nn.init.constant_(self.conv_kernel[-1].weight, 0)
def forward(self, feature, cate_feat, kernel_feat):
"""
Disentangle the feature maps form category head and kernel head.
"""
N, C, H, W = feature.size()
if self.pooling_mode == 'gap':
context = self.gap(feature) # [N, C, 1, 1]
else:
mask = self.conv_mask(feature) # [N, 1, H, W]
mask = mask.view(N, H * W, 1) # [N, H * W, 1]
mask = self.softmax(mask) # [N, H * W, 1]
feature = feature.view(N, C, H * W) # [N, C, H * W]
context = torch.matmul(feature, mask) # [N, C, 1]
context = context.view(N, C, 1, 1) #[N, C, 1, 1]
if self.fusion_mode == 'mul':
context = self.conv_reduction(context)
cate_context = self.conv_cate(context)
kernel_context = self.conv_kernel(context)
# residual or not
cate_feat = cate_context * cate_feat
kernel_feat = kernel_context * kernel_feat
# cate_feat = cate_feat + cate_context * cate_feat
# kernel_feat = kernel_feat + kernel_context * kernel_feat
else:
context = self.conv_reduction(context)
cate_context = self.conv_cate(context)
kernel_context = self.conv_kernel(context)
cate_feat = cate_feat + cate_context
kernel_feat = kernel_feat + kernel_context
return cate_feat, kernel_feat |
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
def similarity_between_docs(doc1, doc2, is_1d=False):
if is_1d:
v1 = np.reshape(doc1, (1, -1))
v2 = np.reshape(doc2, (1, -1))
else:
d1 = np.mean(doc1, axis=0)
d2 = np.mean(doc2, axis=0)
v1 = np.reshape(d1, (1, -1))
v2 = np.reshape(d2, (1, -1))
return cosine_similarity(v1, v2)[0][0]
def plot_1d_heatmap(vec, name):
v = vec.reshape(1, -1)
plt.figure(figsize=(40, 4))
sns.heatmap(v).set_title(name)
plt.rcParams.update({"font.size": 22})
plt.show()
return |
import cv2
import PIL
from PIL import Image
import numpy as np
img_list = []
cv_img_list = []
cv_img_merge_h_list = []
cv_img_merge_h_list_top = []
cv_img_merge_h_list_bottom = []
cv_img_panorama_list = []
cv_img_output_panorama = []
cv_y_zero_list = []
cv_y_two_list = []
panorama = "panorama_"
width = 900
height = 900
for i in range(0, 6):
img_list.append(panorama + str(i) + ".png.pale")
count = 0
for img in img_list:
img_cv = cv2.imread(img)
cv_img_list.append(img_cv)
count += 1
count = 0
for img in cv_img_list:
if count <= 3:
img = cv2.resize(img, (width, height))
cv_img_merge_h_list.append(img)
if count == 4:
img = cv2.resize(img, (width, height))
image_top = img
if count == 5:
img = cv2.resize(img, (width, height))
image_bottom = img
count += 1
for i in range(0, 4):
rotation_matrix=cv2.getRotationMatrix2D((width / 2, height / 2), i * 90, 1)
image_temp=cv2.warpAffine(image_bottom, rotation_matrix, (width, height))
cv_img_merge_h_list_bottom.append(image_temp)
for i in range(0, 4):
rotation_matrix=cv2.getRotationMatrix2D((width / 2, height / 2), i * 90, 1)
image_temp=cv2.warpAffine(image_top, rotation_matrix, (width, height))
cv_img_merge_h_list_top.append(image_temp)
h_stack_top = np.hstack(tuple(cv_img_merge_h_list_top))
h_stack_middle = np.hstack(tuple(cv_img_merge_h_list))
h_stack_bottom = np.hstack(tuple(cv_img_merge_h_list_bottom))
v_stack = np.vstack((h_stack_top, h_stack_middle, h_stack_bottom))
v_stack_blur = cv2.GaussianBlur(v_stack, (5, 5), 5)
for x in range(0, 4):
for y in range(0, 3):
image_out = v_stack_blur[y * height:y * height + height, x * width:x * width + width]
cv_img_panorama_list.append((image_out, x, y))
def rotateAndAvg(lst : list, panoramaNumber : int):
tempList = []
count = 0
for img in lst:
rotation_matrix = cv2.getRotationMatrix2D((width / 2, height / 2), count * -90, 1)
image_temp = cv2.warpAffine(img, rotation_matrix, (width, height))
tempList.append(image_temp)
count += 1
dst = tempList[0]
for i in range(len(tempList)):
if i == 0:
pass
else:
alpha = 1.0/(i + 1)
beta = 1.0 - alpha
dst = cv2.addWeighted(tempList[i], alpha, dst, beta, 0.0)
filename = f"{panorama}{panoramaNumber}.png"
out_dir = "output/"
cv2.imwrite(out_dir + filename, dst)
for tup in cv_img_panorama_list:
img = tup[0]
x = tup[1]
y = tup[2]
if y == 0:
cv_y_zero_list.append(img)
elif y == 1:
cv_img_output_panorama.append((img, x))
elif y == 2:
cv_y_two_list.append(img)
rotateAndAvg(cv_y_zero_list, 4)
rotateAndAvg(cv_y_two_list, 5)
for tup in cv_img_output_panorama:
img = tup[0]
x = tup[1]
filename = f"{panorama}{x}.png"
out_dir = "output/"
cv2.imwrite(out_dir + filename, img) |
import os, sys
import multiprocessing as mp
import subprocess
import numpy as np
import pandas as pd
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from lq_utils import write_fastq, open_seq_chunk, guess_format
from lq_exec import LqExec
from time import sleep
from logging import getLogger
logger = getLogger(__name__)
def _sdust(psdust, fin, fout):
f = open(fout, "w")
completed_process = subprocess.run([psdust, fin], check=True, stdout=f)
if completed_process.stderr:
return completed_process.stderr.decode('utf-8')
else:
return None
class LqMask:
def __init__(self, path_to_sdust, work_dir, reads=None, suffix=None, max_n_proc=5):
if suffix:
self.suffix = "_" + suffix
else:
self.suffix = ""
if not os.path.isdir(work_dir):
os.makedirs(work_dir, exist_ok=True)
if reads:
self.reads = reads
self.n_proc = max_n_proc
self.psdust = path_to_sdust
self.wdir = work_dir
self.outf = os.path.join(work_dir, "longqc_sdust" + self.suffix + ".txt")
self.tin = []
self.tout = []
self.pool = mp.Pool(self.n_proc)
def plot_qscore_dist(self, df, column_qv, column_length, *, fp=None, platform='ont', interval=3000):
if platform == 'ont':
mid_threshold = 7 # ont
else:
mid_threshold = 8 # pb
df['Binned read length'] = np.floor(df[column_length].values/interval)
df.boxplot(column=column_qv, by='Binned read length', sym='+', rot=90, figsize=(2*int(max(df['Binned read length'])/5+0.5), 4.8))
plt.grid(True)
xmin, xmax = plt.gca().get_xlim()
ymin, ymax = plt.gca().get_ylim()
plt.xticks(np.arange(xmax+1), [int(i) for i in np.arange(xmax+1)*interval])
plt.axhspan(0, mid_threshold, facecolor='red', alpha=0.1)
#plt.axhspan(5, mid_threshold, facecolor='yellow', alpha=0.1)
plt.axhspan(mid_threshold, ymax, facecolor='green', alpha=0.1)
#plt.boxplot(df[5].values[np.where(df[4] == 0.0)])
plt.ylim(0, ymax)
plt.ylabel('Averaged QV')
plt.title("")
plt.suptitle("")
if fp:
plt.savefig(fp, bbox_inches="tight")
else:
plt.show()
plt.close()
def plot_masked_fraction(self, fp=None):
self.df = pd.read_table(self.outf, sep='\t', header=None)
plt.grid(True)
plt.hist(self.df[3], alpha=0.2, bins=np.arange(0, 1.0, 0.01), color='red')
plt.xlim(0, 1.0)
plt.xlabel('Low complexity fraction')
plt.ylabel('Frequency')
if fp:
plt.savefig(fp, bbox_inches="tight")
else:
plt.show()
plt.close()
def _concat_and_remove_tfiles(self):
with open(self.outf, 'w') as out:
for tf in self.tout:
with open(tf, 'r') as t:
for l in t:
out.write(l)
logger.info("sdust output file %s was made." % self.outf)
for tf in self.tin + self.tout:
if os.path.exists(tf):
try:
os.remove(tf)
logger.info("tmp file %s was removed." % tf)
except (OSError, e):
logger.error("%s - %s." % (e.filename, e.strerror))
else:
logger.warning("tmp file %s does not exist. skip removal of this file.")
# for multiple call case like chunking
def submit_sdust(self, reads, chunk_n):
if not os.path.isdir(os.path.join(self.wdir, "analysis")):
logger.info("A new dir was made: %s" % os.path.join(self.wdir, "analysis"))
os.makedirs(os.path.join(self.wdir, "analysis"), exist_ok=True)
fpi = os.path.join(self.wdir, "analysis", "tmp_" + str(chunk_n) + ".fastq")
self.tin.append(fpi)
fpo = os.path.join(self.wdir, "analysis", "tmp_" + str(chunk_n) + self.suffix + ".txt")
self.tout.append(fpo)
write_fastq(fpi, reads)
self.pool.apply_async(_sdust, args=(self.psdust, fpi, fpo))
logger.info("New job was submitted: in->%s, out->%s" % (fpi, fpo))
def close_pool(self):
logger.info("Waiting completion of all of jobs...")
self.pool.close()
self.pool.join()
logger.info("sdust jobs finished.")
self._concat_and_remove_tfiles()
# for a single call case
def run_async_sdust(self):
procs = []
if self.reads:
n_seqs = len(self.reads)
else:
logger.error("No read is given for analysis.")
sys.exit(1)
if not os.path.isdir(os.path.join(self.wdir, "analysis")):
os.makedirs(os.path.join(self.wdir, "analysis"), exist_ok=True)
for i in np.arange(0, self.n_proc):
s = int(i * n_seqs/self.n_proc)
e = int((i+1) * n_seqs/self.n_proc)
fp = os.path.join(self.wdir, "analysis", "tmp_"+str(i)+".fastq")
self.tin.append(fp)
logger.debug("Seqs from %d to %d" % (s, e))
write_fastq(fp, self.reads[s:e])
p = LqExec(self.psdust)
fpo = os.path.join(self.wdir, "analysis", "tmp_" + str(i) + self.suffix + ".txt")
self.tout.append(fpo)
p.exec(fp, out=fpo)
logger.info("sdust process %s started." % p.get_pid() )
procs.append(p)
while True:
for p in procs:
if p.get_poll() is not None:
logger.info("sdust process %s terminated." % p.get_pid() )
procs.remove(p)
logger.info("Calculating low complexity region...")
if len(procs) == 0:
break
else:
sleep(5)
logger.info("Calculation finished.")
self._concat_and_remove_tfiles()
def get_outfile_path(self):
return self.outf
# test
if __name__ == "__main__":
# test
lm = LqMask("sdust", "./")
chunk_n = 0
fn = sys.argv[1]
file_code = guess_format(fn)
for (reads, n_seqs, n_bases) in open_seq_chunk(fn, file_code, chunk_size=float(sys.argv[2])*1024**3, is_upper=True):
lm.submit_sdust(reads, chunk_n)
chunk_n += 1
lm.close_pool()
lm.plot_masked_fraction("./masked_frac.png")
|
# -----------------------------------------------------------------------------
# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
# -----------------------------------------------------------------------------
"""
"""
# -----------------------------------------------------------------------------
# Boilerplate
# -----------------------------------------------------------------------------
import logging # isort:skip
log = logging.getLogger(__name__)
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
# Standard library imports
import sys
# Bokeh imports
from bokeh.application.handlers.code_runner import CodeRunner
from bokeh.application.handlers.handler import Handler
from bokeh.io.doc import curdoc, set_curdoc
# -----------------------------------------------------------------------------
# Globals and constants
# -----------------------------------------------------------------------------
__all__ = ("ExampleHandler",)
# -----------------------------------------------------------------------------
# General API
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Dev API
# -----------------------------------------------------------------------------
class ExampleHandler(Handler):
""" A stripped-down handler similar to CodeHandler but that does
some appropriate monkeypatching.
"""
_output_funcs = ["output_notebook", "output_file", "reset_output"]
_io_funcs = ["show", "save"]
def __init__(self, source, filename):
super().__init__(self)
self._runner = CodeRunner(source, filename, [])
def modify_document(self, doc):
if self.failed:
return
module = self._runner.new_module()
sys.modules[module.__name__] = module
doc._modules.append(module)
orig_curdoc = curdoc()
set_curdoc(doc)
old_io, old_doc = self._monkeypatch()
try:
self._runner.run(module, lambda: None)
finally:
self._unmonkeypatch(old_io, old_doc)
set_curdoc(orig_curdoc)
def _monkeypatch(self):
def _pass(*args, **kw):
pass
def _add_root(obj, *args, **kw):
curdoc().add_root(obj)
def _curdoc(*args, **kw):
return curdoc()
# these functions are transitively imported from io into plotting,
# so we have to patch them all. Assumption is that no other patching
# has occurred, i.e. we can just save the funcs being patched once,
# from io, and use those as the originals to replace everywhere
import bokeh.io as io # lgtm [py/import-and-import-from]
import bokeh.plotting as p
mods = [io, p]
old_io = {}
for f in self._output_funcs + self._io_funcs:
old_io[f] = getattr(io, f)
for mod in mods:
for f in self._output_funcs:
setattr(mod, f, _pass)
for f in self._io_funcs:
setattr(mod, f, _add_root)
import bokeh.document as d
old_doc = d.Document
d.Document = _curdoc
return old_io, old_doc
def _unmonkeypatch(self, old_io, old_doc):
import bokeh.io as io # lgtm [py/import-and-import-from]
import bokeh.plotting as p
mods = [io, p]
for mod in mods:
for f in old_io:
setattr(mod, f, old_io[f])
import bokeh.document as d
d.Document = old_doc
@property
def failed(self):
return self._runner.failed
@property
def error(self):
return self._runner.error
@property
def error_detail(self):
return self._runner.error_detail
# -----------------------------------------------------------------------------
# Private API
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Code
# -----------------------------------------------------------------------------
|
#!/usr/bin/python3
import sys
def evaluate_postfix(postfix):
"""
Evaluates a postfix expression. Precondition: postfix is a valid postfix expression (no extra numbers, extra operators, etc.)
:param postfix: Postfix expression to be evaluated
:return: Result of the postfix expression, or error if unsupported operators used
"""
stack = []
postfix_tokens = postfix.split()
for token in postfix_tokens:
if token in "1234567890":
stack.append(int(token)) # If number, push to stack
else:
num2 = stack.pop() # If operator then pop last
num1 = stack.pop() # two numbers in stack
# Do the calculation
if token is "*":
result = num1 * num2
elif token is "/":
result = num1 / num2
elif token is "+":
result = num1 + num2
elif token is "-":
result = num1 - num2
else:
return "Invalid operator detected."
stack.append(result) # Add calculation to stack
return stack.pop() # Pop the last remaining number, which is the result.
def main():
if len(sys.argv) == 2:
print(evaluate_postfix(sys.argv[1]))
else:
print("Parameter Error: Please give a postfix expression as a parameter.")
if __name__ == '__main__': main()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('login', '0002_auto_20160522_0301'),
]
operations = [
migrations.CreateModel(
name='EmailVericationCodes',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('verification_code', models.CharField(max_length=100, blank=True)),
('expiry_date', models.DateField()),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
migrations.AlterField(
model_name='userprofile',
name='user',
field=models.OneToOneField(related_name='user_login', to=settings.AUTH_USER_MODEL),
),
]
|
from __future__ import division
import logging
import math
import numpy as np
import scipy.ndimage
from gunpowder import BatchFilter, Roi, ArrayKey, Coordinate
logger = logging.getLogger(__name__)
def _spatial_roi(roi, spatial_dims):
return Roi(
roi.get_begin()[-spatial_dims:],
roi.get_shape()[-spatial_dims:]
)
class Misalign(BatchFilter):
"""
Misalign serial sections by randomly shifting along yx-coordinate axes (1, 2).
Arrays can have different voxel sizes but the z-component of each voxel size
has to be integer multiple of z_resolution. In practice, use the lowest resolution
along z of all arrays requested in the roi.
Args:
z_resolution (``int``):
Resolution at which to generate shifts. Note that, for all specs in the request,
spec.voxel_size[0] is integer multiple of z_resolution
prob_slip (``float``):
Probability of a section to "slip", i.e., be independently moved in
x-y.
prob_shift (``float``):
Probability of a section and all following sections to move in x-y. This
is a conditional probability for the case that slip == False. If slip == True,
no shift will occur for a section.
max_misalign (``tuple`` of two ``floats``):
Maximal displacement to shift in x and y. Samples will be drawn
uniformly.
ignore_keys_for_slip (``tuple`` of keys):
Only apply shifts (but not slips) to any key in this ``tuple``.
"""
def __init__(
self,
z_resolution,
prob_slip=0,
prob_shift=0,
max_misalign=(0, 0),
ignore_keys_for_slip=(),
seed=None):
super(BatchFilter, self).__init__()
self.z_resolution = Coordinate((z_resolution,))
self.prob_slip = prob_slip
self.prob_shift = prob_shift
self.max_misalign = max_misalign
self.ignore_keys_for_slip = ignore_keys_for_slip
self.seed = seed
logger.debug('initialized with parameters '
'prob_slip=%f '
'prob_shift=%f '
'max_misalign=%s '
'ignore_keys_for_slip=%s '
'seed=%d',
self.prob_slip,
self.prob_shift,
self.max_misalign,
self.ignore_keys_for_slip,
self.seed)
self.translations = {}
self.target_rois = {}
def setup(self):
pass
'''
prepare:
target_roi := requested roi (world space)
b := begin of requested roi
t := array of translations per section (world space)
roi := roi for upstream request (world space), deducted from target_roi:
- begin: shift b by min(s)
- shape: extend shape of target_roi by max(t) - min(t)
- snap to voxel grid
b_tilde := begin of roi
process:
o := offset between target_ roi and roi: b - b_tilde
shifts := array of shifts (offset into voxel array of roi) per section indexed by i:
shifts[i] = o + t[i] / voxel_size
use shifts for offset parameter in scipy affine transform
'''
def prepare(self, request):
logger.debug('%s preparing request %s with z_resolution %s', type(self).__name__, request, self.z_resolution)
self._sanity_check(request)
total_roi = request.get_total_roi()
master_roi = self._z_roi(total_roi)
if self.seed is not None:
np.random.seed(self.seed)
master_roi_snapped = master_roi.snap_to_grid(self.z_resolution, mode='grow')
master_roi_voxels = master_roi_snapped // self.z_resolution
master_shifts, master_slips = map(np.asarray, self._misalign(master_roi_voxels.get_shape()[0]))
# np.asarray(self._misalign(master_roi_voxels.get_shape()[0]))
self.translations.clear()
self.target_rois.clear()
for key, spec in request.items():
assert isinstance(key, ArrayKey), 'Only ArrayKey supported but got %s in request'%type(key)
z_resolution = Coordinate((spec.voxel_size[0],))
z_roi = self._z_roi(spec.roi)
z_roi_voxels = z_roi / z_resolution
z_roi_snapped_voxels = ( master_roi_snapped ) / z_resolution
voxel_size_ratio = int((self.z_resolution / z_resolution)[0])
half_voxel_size_diff = (self.z_resolution - z_resolution)[0] / 2
logger.debug('prepare key %s: half voxel size diff=%s', key, half_voxel_size_diff)
assert half_voxel_size_diff.is_integer() and (half_voxel_size_diff / z_resolution[0]).is_integer(), \
'half of voxel size diff %f must be integer multiple of z_resolution %s'%(half_voxel_size_diff, z_resolution)
logger.debug('prepare key %s: voxel size ratio=%s', key, voxel_size_ratio)
offset = (z_roi.get_begin() - master_roi_snapped.get_begin())
voxel_offset = int((z_roi_voxels.get_begin() - z_roi_snapped_voxels.get_begin())[0])
logger.debug('prepare key %s: offset=%s voxel_offset=%s voxel_size=%s', key, offset, voxel_offset, spec.voxel_size)
start = voxel_offset + int(half_voxel_size_diff / z_resolution[0])
logger.debug('prepare key %s: voxel_offset=%s start=%s', key, voxel_offset, start)
stop = start + int(z_roi_voxels.get_shape()[0])
master_translations = master_shifts if key in self.ignore_keys_for_slip else master_shifts + master_slips
translations = np.repeat(master_translations, voxel_size_ratio, axis=0)[start:stop]
# logger.debug('prepare key %s: translations=%s', key, translations)
m = np.min(translations, axis=0)
M = np.max(translations, axis=0)
d = M - m
slice_roi = Roi(offset = m + np.asarray(spec.roi.get_begin()[-2:]), shape = d + np.asarray(spec.roi.get_shape()[-2:]))
slice_roi = slice_roi.snap_to_grid(spec.voxel_size[1:])
self.translations[key] = translations
# remember roi of key in original request
self.target_rois[key] = spec.roi
# if all translation are > 0, new roi.begin might be larger than original roi.begin, which is ok
# new roi need not contain all of original roi (target roi)
spec.roi = Roi(
spec.roi.get_begin()[:-2] + slice_roi.get_begin(),
spec.roi.get_shape()[:-2] + slice_roi.get_shape())
def process(self, batch, request):
for key, _ in request.items():
logger.debug('process key %s', key)
assert key in batch.arrays, 'only arrays supported but got %s'%key
array = batch.arrays[key]
voxel_size = np.asarray(array.spec.voxel_size)
# target_roi is roi in original request
target_roi = self.target_rois[key]
target_roi_voxels = _spatial_roi(target_roi, 3) / array.spec.voxel_size
roi_voxels = _spatial_roi(array.spec.roi, 3) / array.spec.voxel_size
# offset can be negative, thus use in64 instead of uin64
offset_voxels = np.asarray(target_roi_voxels.get_begin() - roi_voxels.get_begin())[1:].astype(np.int64)
slice_shape = np.asarray(target_roi_voxels.get_shape()[1:]).astype(np.int64)
data = np.empty(shape=target_roi.get_shape()[:-3] + target_roi_voxels.get_shape(), dtype=array.data.dtype)
interpolate = array.spec.interpolatable
for index, translation in enumerate(self.translations[key]):
translation_in_voxels = translation / voxel_size[1:]
current_slice = array.data[..., index, :, :]
if np.all(translation_in_voxels == 0):
start = offset_voxels
stop = start + slice_shape
data[..., index, :, :] = current_slice[..., start[0]:stop[0], start[1]:stop[1]]
else:
shift = offset_voxels + translation_in_voxels
source = np.reshape(current_slice, (-1,) + current_slice.shape[-2:])
target = np.reshape(data[..., index, :, :], (-1,) + tuple(map(int, slice_shape)))
matrix = np.ones((2,))
order = 1 if interpolate else 0
for s, t in zip(source, target):
# output_shape has to be specified even if output is provided, soooo annoying to figure out
# from the scipy doc, offset is wrt input:
"""
Apply an affine transformation.
Given an output image pixel index vector ``o``, the pixel value
is determined from the input image at position
``np.dot(matrix, o) + offset``."""
scipy.ndimage.interpolation.affine_transform(input=s, output=t, output_shape=t.shape, matrix=matrix, offset=shift, order=order)
array.spec.roi = target_roi
array.data = data
def _z_roi(self, roi):
return Roi(
roi.get_begin()[-3:-2],
roi.get_shape()[-3:-2]
)
def _misalign(self, num_sections):
"""
:param num_sections: number of sections
:return: (shifts, slips)
"""
slips = [Coordinate((0,0))]*num_sections
shifts = [Coordinate((0,0))]*num_sections
for z in range(num_sections):
r = np.random.random()
if r <= self.prob_slip:
slips[z] = self._random_offset()
elif r <= self.prob_slip + self.prob_shift:
offset = self._random_offset()
for zp in range(z, num_sections):
shifts[zp] += offset
logger.debug("misaligning sections with " + str(shifts))
return shifts, slips
def _sanity_check(self, request):
for key, spec in request.items():
logger.debug('Sanity checking key=%s spec=%s', key, spec)
assert key is not None, 'Key is none'
assert spec is not None, 'Spec is None for key %s'%key
assert spec.voxel_size is not None, 'Voxel size is None for key %s'%key
assert spec.roi is not None, 'Roi is None for key %s'%key
assert spec.roi.get_begin(), 'Offset is None for key %s'%key
assert spec.roi.get_shape(), 'Shape is None for key %s'%key
assert int(self.z_resolution[0]) % spec.voxel_size[0] == 0, \
'z_resolution is not integer multiple of z resolution for key %s'%key
def _get_source_roi(self, transformation):
dims = transformation.shape[0]
# get bounding box of needed data for transformation
bb_min = Coordinate(int(math.floor(transformation[d].min())) for d in range(dims))
bb_max = Coordinate(int(math.ceil(transformation[d].max())) + 1 for d in range(dims))
# from the scipy doc for affine transform, offset is wrt input:
'''
Apply an affine transformation.
Given an output image pixel index vector ``o``, the pixel value
is determined from the input image at position
``np.dot(matrix, o) + offset``.'''
# create roi sufficiently large to feed transformation
source_roi = Roi(
bb_min,
bb_max - bb_min
)
return source_roi
def _random_offset(self):
return Coordinate(tuple(ma - np.random.rand() * 2 * ma for ma in self.max_misalign))
|
from setuptools import setup
# see https://stackoverflow.com/questions/14399534/reference-requirements-txt-for-the-install-requires-kwarg-in-setuptools-setup-py
setup(name='fuzzymatcher',
version='0.0.5',
description='Fuzzy match two pandas dataframes based on one or more common fields',
url='https://github.com/RobinL/fuzzymatcher',
author='Robin Linacre',
author_email='robinlinacre@hotmail.com',
license='MIT',
packages=['fuzzymatcher'], # The directory to look in for the source code
install_requires=['pandas', 'metaphone', 'python-Levenshtein', 'fuzzywuzzy', 'python-dateutil'],
test_requires=["pylint", "coverage", "codecov"],
keywords=["matching", "fuzzy", "probabalistic", "recordlinking", "fuzzymatching"],
download_url = 'https://github.com/RobinL/fuzzymatcher/archive/v0.0.4.tar.gz',
zip_safe=False)
|
"""
"""
import sys
import argparse
import os
import gzip
import pandas
import gtfparse
import shellinford
from Bio import SeqIO
parser = argparse.ArgumentParser(usage=__doc__)
parser.add_argument(
"input_paths",
nargs="+",
help="Fasta files to process")
parser.add_argument(
"--out-csv",
required=True,
metavar="FILE.csv",
help="CSV output")
parser.add_argument(
"--out-index",
required=True,
metavar="FILE.fm",
help="Index output")
parser.add_argument(
"--id-mapping",
required=True,
metavar="FILE.idmapping.gz",
help="Uniprot mapping file")
parser.add_argument(
"--ensembl-gtf",
required=True,
metavar="FILE.gtf.gz",
help="Ensembl GTF file")
def run():
args = parser.parse_args(sys.argv[1:])
fm = shellinford.FMIndex()
df = []
for f in args.input_paths:
print("Processing", f)
with gzip.open(f, "rt") as fd:
records = SeqIO.parse(fd, format='fasta')
for (i, record) in enumerate(records):
seq = str(record.seq).upper()
df.append((record.name, record.description, seq))
fm.push_back("$" + seq + "$") # include sentinels
df = pandas.DataFrame(df, columns=["name", "description", "seq"])
print("Done reading fastas")
print(df)
pieces = df.name.str.split("|")
df["db"] = pieces.str.get(0)
df["accession"] = pieces.str.get(1)
df["entry"] = pieces.str.get(2)
print("Annotating using mapping", args.id_mapping)
mapping_df = pandas.read_csv(
args.id_mapping, sep="\t", header=None)
mapping_df.columns = ['accession', 'key', 'value']
for item in ["Ensembl", "Ensembl_TRS", "Gene_Name"]:
accession_to_values = mapping_df.loc[
mapping_df.key == item
].groupby("accession").value.unique().map(" ".join)
df[item.lower()] = df.accession.map(accession_to_values)
print("Annotating using gtf", args.ensembl_gtf)
gtf_df = gtfparse.read_gtf(args.ensembl_gtf)
matching_ensembl_genes = set(gtf_df.gene_id.unique())
ensembl_primary = []
for ensembls in df.ensembl.fillna("").str.split():
result = ""
for item in ensembls:
if item in matching_ensembl_genes:
result = item
break
ensembl_primary.append(result)
df["ensembl_primary"] = ensembl_primary
print("Fraction of records with matching ensembl genes", (
df.ensembl_primary != "").mean())
gene_records = gtf_df.loc[gtf_df.feature == "gene"].set_index("gene_id")
df["primary_ensembl_contig"] = df.ensembl_primary.map(gene_records.seqname)
df["primary_ensembl_start"] = df.ensembl_primary.map(gene_records.start)
df["primary_ensembl_end"] = df.ensembl_primary.map(gene_records.end)
df["primary_ensembl_strand"] = df.ensembl_primary.map(gene_records.strand)
print("Done annotating")
print(df)
df.to_csv(args.out_csv, index=True)
print("Wrote: ", os.path.abspath((args.out_csv)))
print("Building index")
fm.build()
fm.write(args.out_index)
print("Wrote: ", os.path.abspath((args.out_index)))
if __name__ == '__main__':
run()
|
import os
import json
import paramtools
import pandas as pd
from .outputs import credit_plot, rate_plot, liability_plot
from .constants import MetaParameters
from .helpers import arp_ref
from bokeh.models import ColumnDataSource
from taxcrunch.cruncher import Cruncher, CruncherParams
from taxcrunch.multi_cruncher import Batch
import taxcrunch
from taxcalc import Policy
from collections import OrderedDict
CURRENT_PATH = os.path.abspath(os.path.dirname(__file__))
def get_version():
version = taxcrunch.__version__
return f"Tax-Cruncher-ARP v0.1.0"
def get_inputs(meta_params_dict):
"""
Return default parameters from Tax-Cruncher
"""
metaparams = MetaParameters()
metaparams.adjust(meta_params_dict)
params = CruncherParams()
keep = [
"mstat",
"page",
"sage",
"dep6",
"dep12",
"dep16",
"dep17",
"dep18",
"otherdep",
"pwages",
"swages",
"dividends",
"intrec",
"stcg",
"ltcg",
"businc",
"sstb",
"w2paid",
"qualprop",
"otherprop",
"nonprop",
"pensions",
"gssi",
"ui",
"proptax",
"otheritem",
"childcare",
"mortgage",
"mtr_options",
"schema"
]
cruncher_dict = params.dump()
default_params = {
"Tax Information": {k: v for k, v in cruncher_dict.items() if k in keep},
}
meta = metaparams.dump()
return {"meta_parameters": meta, "model_parameters": default_params}
def validate_inputs(meta_params_dict, adjustment, errors_warnings):
params = CruncherParams()
params.adjust(adjustment["Tax Information"], raise_errors=False)
errors_warnings["Tax Information"]["errors"].update(params.errors)
return {"errors_warnings": errors_warnings}
def run_model(meta_params_dict, adjustment):
meta_params = MetaParameters()
meta_params.adjust(meta_params_dict)
adjustment["Tax Information"]["year"] = meta_params.year
params = CruncherParams()
params.adjust(adjustment["Tax Information"], raise_errors=False)
newvals = params.specification()
crunch = Cruncher(inputs=newvals, custom_reform=arp_ref)
# make dataset for bokeh plots
ivar = crunch.batch_ivar
_, mtr_opt, _ = crunch.taxsim_inputs()
df = pd.concat([ivar] * 10000, ignore_index=True)
increments = pd.DataFrame(list(range(0, 2000000, 200)))
# use Calculation Option to determine what var to increment
if mtr_opt == 'Taxpayer Earnings':
span = int(ivar[11])
df[11] = increments
elif mtr_opt == 'Spouse Earnings':
span = int(ivar[12])
df[12] = increments
elif mtr_opt == 'Qualified Dividends':
span = int(ivar[13])
df[13] = increments
elif mtr_opt == 'Interest Received':
span = int(ivar[14])
df[14] = increments
elif mtr_opt == 'Short Term Gains':
span = int(ivar[15])
df[15] = increments
elif mtr_opt == 'Long Term Gains':
span = int(ivar[16])
df[16] = increments
elif mtr_opt == 'Business Income':
span = int(ivar[17])
df[17] = increments
elif mtr_opt == 'Pensions':
span = int(ivar[23])
df[23] = increments
elif mtr_opt == 'Gross Social Security Benefits':
span = int(ivar[24])
df[24] = increments
elif mtr_opt == 'Real Estate Taxes Paid':
span = int(ivar[26])
df[26] = increments
elif mtr_opt == 'Mortgage':
span = int(ivar[29])
df[29] = increments
b = Batch(df)
df_base = b.create_table()
df_reform = b.create_table(reform_file=arp_ref)
df_reform['CTC New'] = df_reform['CTC Refundable'] + df_reform['CTC Refundable ARP']
df_reform['CDCC'] = df_reform['Child care credit'] + df_reform['Child care credit ARP']
# compute average tax rates
df_base['IATR'] = df_base['Individual Income Tax'] / df_base['AGI']
df_base['PATR'] = df_base['Payroll Tax'] / df_base['AGI']
df_reform['IATR'] = df_reform['Individual Income Tax'] / df_reform['AGI']
df_reform['PATR'] = df_reform['Payroll Tax'] / df_reform['AGI']
df_base['Axis'] = increments
df_reform['Axis'] = increments
return comp_output(crunch, df_base, df_reform, span, mtr_opt)
def comp_output(crunch, df_base, df_reform, span, mtr_opt):
liabilities = liability_plot(df_base, df_reform, span, mtr_opt)
rates = rate_plot(df_base, df_reform, span, mtr_opt)
credits = credit_plot(df_base, df_reform, span, mtr_opt)
basic = crunch.basic_table()
detail = crunch.calc_table()
table_basic = basic.to_html(
classes="table table-striped table-hover text-right"
)
table_detail = detail.to_html(
classes="table table-striped table-hover text-right"
)
comp_dict = {
"renderable": [
{"media_type": "table", "title": "Basic Liabilities", "data": table_basic},
liabilities, rates, credits,
{
"media_type": "table",
"title": "Calculation of Liabilities",
"data": table_detail,
},
],
"downloadable": [
{
"media_type": "CSV",
"title": "basic_table",
"data": basic.to_csv(),
},
{
"media_type": "CSV",
"title": "calculation_table",
"data": detail.to_csv(),
},
],
}
return comp_dict
|
from django.utils.translation import ugettext as _
from .modelform import ModelFormMixin
class ObjectsFormMixin(ModelFormMixin):
pluralize = True
link_attributes = {
'data-listaction': 'urlupdate',
}
menus = ['list_action']
def get_invalid_pks(self):
return len(self.request.GET.getlist('pks')) - len(self.object_list)
def get_object_list(self):
self.object_list = self.queryset.filter(
pk__in=self.request.GET.getlist('pks')
)
return self.object_list
def get_success_url(self):
return self.router['list'].reverse()
def get_form_valid_message(self):
return '{}: {}'.format(
_(self.view_label),
', '.join([str(o) for o in self.object_list]),
).capitalize()
|
#!/usr/bin/env python
#
# query.py - The FileTreeQuery class
#
# Author: Paul McCarthy <pauldmccarthy@gmail.com>
# Author: Michiel Cottaar <michiel.cottaar@.ndcn.ox.ac.uk>
#
"""This module contains the :class:`FileTreeQuery` class, which can be used to
search for files in a directory described by a :class:`.FileTree`. A
``FileTreeQuery`` object returns :class:`Match` objects which each represent a
file that is described by the ``FileTree``, and which is present in the
directory.
The following utility functions, used by the ``FileTreeQuery`` class, are also
defined in this module:
.. autosummary::
:nosignatures:
scan
allVariables
"""
import logging
import collections
import functools as ft
import os.path as op
from typing import Dict, List, Tuple
import numpy as np
from . import FileTree
log = logging.getLogger(__name__)
class FileTreeQuery(object):
"""The ``FileTreeQuery`` class uses a :class:`.FileTree` to search
a directory for files which match a specific query.
A ``FileTreeQuery`` scans the contents of a directory which is described
by a :class:`.FileTree`, and identifies all file types (a.k.a. *templates*
or *short names*) that are present, and the values of variables within each
short name that are present. The :meth:`query` method can be used to
retrieve files which match a specific template, and variable values.
The :meth:`query` method returns a collection of :class:`Match` objects,
each of which represents one file which matches the query.
Example usage::
>>> from fsl.utils.filetree import FileTree, FileTreeQuery
>>> tree = FileTree.read('bids_raw', './my_bids_data')
>>> query = FileTreeQuery(tree)
>>> query.axes('anat_image')
['acq', 'ext', 'modality', 'participant', 'rec', 'run_index',
'session']
>>> query.variables('anat_image')
{'acq': [None],
'ext': ['.nii.gz'],
'modality': ['T1w', 'T2w'],
'participant': ['01', '02', '03'],
'rec': [None],
'run_index': [None, '01', '02', '03'],
'session': [None]}
>>> query.query('anat_image', participant='01')
[Match(./my_bids_data/sub-01/anat/sub-01_T1w.nii.gz),
Match(./my_bids_data/sub-01/anat/sub-01_T2w.nii.gz)]
Matches for templates contained within sub-trees are referred to by
constructing a hierarchical path from the sub-tree template name(s),
and the template name - see the :meth:`Match.full_name` method.
"""
def __init__(self, tree):
"""Create a ``FileTreeQuery``. The contents of the tree directory are
scanned via the :func:`scan` function, which may take some time for
large data sets.
:arg tree: The :class:`.FileTree` object
"""
# Hard-code into the templates any pre-defined variables
tree = tree.partial_fill()
# Find all files present in the directory
# (as Match objects), and find all variables,
# plus their values, and all templates,
# that are present in the directory.
matches = scan(tree)
allvars, templatevars = allVariables(tree, matches)
# Now we are going to build a series of ND
# arrays to store Match objects. We create
# one array for each template. Each axis
# in an array corresponds to a variable
# present in files of that template type,
# and each position along an axis corresponds
# to one value of that variable.
#
# These arrays will be used to store and
# retrieve Match objects - given a template
# and a set of variable values, we can
# quickly find the corresponding Match
# object (or objects).
# matcharrays contains {template : ndarray}
# mappings, and varidxs contains
# {template : {varvalue : index}} mappings
matcharrays = {}
varidxs = {}
for template, tvars in templatevars.items():
tvarlens = [len(allvars[v]) for v in tvars]
# "Scalar" match objects - templates
# which have no variables, and for
# which zero or one file is present
if len(tvarlens) == 0:
tvarlens = 1
# An ND array for this short
# name. Each element is a
# Match object, or nan.
matcharray = np.zeros(tvarlens, dtype=np.object)
matcharray[:] = np.nan
# indices into the match array
# for each variable value
tvaridxs = {}
for v in tvars:
tvaridxs[v] = {n : i for i, n in enumerate(allvars[v])}
matcharrays[template] = matcharray
varidxs[ template] = tvaridxs
# Populate the match arrays
for match in matches:
tvars = templatevars[match.full_name]
tvaridxs = varidxs[ match.full_name]
tarr = matcharrays[ match.full_name]
idx = []
if len(match.variables) == 0:
idx = [0]
else:
for var in tvars:
val = match.variables[var]
idx.append(tvaridxs[var][val])
tarr[tuple(idx)] = match
self.__tree = tree
self.__allvars = allvars
self.__templatevars = templatevars
self.__matches = matches
self.__matcharrays = matcharrays
self.__varidxs = varidxs
def axes(self, template) -> List[str]:
"""Returns a list containing the names of variables present in files
of the given ``template`` type, in the same order of the axes of
:class:`Match` arrays that are returned by the :meth:`query` method.
"""
return self.__templatevars[template]
def variables(self, template=None) -> Dict[str, List]:
"""Return a dict of ``{variable : [values]}`` mappings.
This dict describes all variables and their possible values in
the tree.
If a ``template`` is specified, only variables which are present in
files of that ``template`` type are returned.
"""
if template is None:
return {var : list(vals) for var, vals in self.__allvars.items()}
else:
varnames = self.__templatevars[template]
return {var : list(self.__allvars[var]) for var in varnames}
@property
def tree(self):
"""Returns the :class:`.FileTree` associated with this
``FileTreeQuery``.
"""
return self.__tree
@property
def templates(self) -> List[str]:
"""Returns a list containing all templates of the ``FileTree`` that
are present in the directory.
"""
return list(self.__templatevars.keys())
def query(self, template, asarray=False, **variables):
"""Search for files of the given ``template``, which match
the specified ``variables``. All hits are returned for variables
that are unspecified.
:arg template: Template of files to search for.
:arg asarray: If ``True``, the relevant :class:`Match` objects are
returned in a in a ND ``numpy.array`` where each
dimension corresponds to a variable for the
``templates`` in question (as returned by
:meth:`axes`). Otherwise (the default), they are
returned in a list.
All other arguments are assumed to be ``variable=value`` pairs,
used to restrict which matches are returned. All values are returned
for variables that are not specified, or variables which are given a
value of ``'*'``.
:returns: A list of ``Match`` objects, (or a ``numpy.array`` if
``asarray=True``).
"""
varnames = list(variables.keys())
allvarnames = self.__templatevars[template]
varidxs = self.__varidxs[ template]
matcharray = self.__matcharrays[ template]
slc = []
for var in allvarnames:
if var in varnames: val = variables[var]
else: val = '*'
# We're using np.newaxis to retain
# the full dimensionality of the
# array, so that the axis labels
# returned by the axes() method
# are valid.
if val == '*': slc.append(slice(None))
else: slc.extend([np.newaxis, varidxs[var][val]])
result = matcharray[tuple(slc)]
if asarray: return result
else: return [m for m in result.flat if isinstance(m, Match)]
@ft.total_ordering
class Match(object):
"""A ``Match`` object represents a file with a name matching a template in
a ``FileTree``. The :func:`scan` function and :meth:`FileTree.query`
method both return ``Match`` objects.
"""
def __init__(self, filename, template, tree, variables):
"""Create a ``Match`` object. All arguments are added as attributes.
:arg filename: name of existing file
:arg template: template identifier
:arg tree: :class:`.FileTree` which contains this ``Match``
:arg variables: Dictionary of ``{variable : value}`` mappings
containing all variables present in the file name.
"""
self.__filename = filename
self.__template = template
self.__tree = tree
self.__variables = dict(variables)
@property
def filename(self):
return self.__filename
@property
def template(self):
return self.__template
@property
def full_name(self):
"""The ``full_name`` of a ``Match`` is a combination of the
``template`` (i.e. the matched template), and the name(s) of
the relevant ``FileTree`` objects.
It allows one to unamiguously identify the location of a ``Match``
in a ``FileTree`` hierarchy, where the same ``short_name`` may be
used in different sub-trees.
"""
def parents(tree):
if tree.parent is None:
return []
else:
return [tree.parent] + parents(tree.parent)
trees = [self.tree] + parents(self.tree)
# Drop the root tree
trees = list(reversed(trees))[1:]
return '/'.join([t.name for t in trees] + [self.template])
@property
def tree(self):
return self.__tree
@property
def variables(self):
return dict(self.__variables)
def __eq__(self, other):
return (isinstance(other, Match) and
self.filename == other.filename and
self.template == other.template and
self.tree is other.tree and
self.variables == other.variables)
def __lt__(self, other):
return isinstance(other, Match) and self.filename < other.filename
def __repr__(self):
"""Returns a string representation of this ``Match``. """
return 'Match({}: {})'.format(self.full_name, self.filename)
def __str__(self):
"""Returns a string representation of this ``Match``. """
return repr(self)
def scan(tree : FileTree) -> List[Match]:
"""Scans the directory of the given ``FileTree`` to find all files which
match a tree template.
:arg tree: :class:`.FileTree` to scan
:returns: list of :class:`Match` objects
"""
matches = []
for template in tree.templates:
for variables in tree.get_all_vars(template, glob_vars='all'):
filename = tree.update(**variables).get(template)
if not op.isfile(filename):
continue
matches.append(Match(filename, template, tree, variables))
for tree_name, sub_tree in tree.sub_trees.items():
matches.extend(scan(sub_tree))
return matches
def allVariables(
tree : FileTree,
matches : List[Match]) -> Tuple[Dict[str, List], Dict[str, List]]:
"""Identifies the ``FileTree`` variables which are actually represented
in files in the directory.
:arg filetree: The ``FileTree`` object
:arg matches: list of ``Match`` objects (e.g. as returned by :func:`scan`)
:returns: a tuple containing two dicts:
- A dict of ``{ variable : [values] }`` mappings containing all
variables and their possible values present in the given list
of ``Match`` objects.
- A dict of ``{ full_name : [variables] }`` mappings,
containing the variables which are relevant to each template.
"""
allvars = collections.defaultdict(set)
alltemplates = {}
for m in matches:
if m.full_name not in alltemplates:
alltemplates[m.full_name] = set()
for var, val in m.variables.items():
allvars[ var] .add(val)
alltemplates[m.full_name].add(var)
# allow us to compare None with strings
def key(v):
if v is None: return ''
else: return v
allvars = {var : list(sorted(vals, key=key))
for var, vals in allvars.items()}
alltemplates = {tn : list(sorted(vars))
for tn, vars in alltemplates.items()}
return allvars, alltemplates
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import math
import time
import numpy as np
import unittest
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
import paddle.fluid as fluid
from paddle.fluid.dygraph import to_variable
from paddle.fluid.dygraph import Embedding, Linear, GRUUnit
from paddle.fluid.dygraph import declarative, ProgramTranslator
SEED = 2020
program_translator = ProgramTranslator()
class DynamicGRU(fluid.dygraph.Layer):
def __init__(self,
size,
h_0=None,
param_attr=None,
bias_attr=None,
is_reverse=False,
gate_activation='sigmoid',
candidate_activation='tanh',
origin_mode=False,
init_size=None):
super(DynamicGRU, self).__init__()
self.gru_unit = GRUUnit(
size * 3,
param_attr=param_attr,
bias_attr=bias_attr,
activation=candidate_activation,
gate_activation=gate_activation,
origin_mode=origin_mode)
self.size = size
self.h_0 = h_0
self.is_reverse = is_reverse
def forward(self, inputs):
# Use `to_variable` to create a copy of global h_0 created not in `DynamicGRU`,
# to avoid modify it because `h_0` is both used in other `DynamicGRU`.
hidden = to_variable(self.h_0)
hidden.stop_gradient = True
res = []
for i in range(inputs.shape[1]):
if self.is_reverse:
j = fluid.layers.shape(inputs)[1] - 1 - i
else:
# TODO(Aurelius84): In while block, if the var created in parent block
# participates in the calculation of gradient, the result of gradient
# is incorrect because each step scope always returns the same value
# generated by last step. Here we add 0 to create `j` in while block to
# avoid this bug, and working on fixing it in next PR.
j = i + 0
# FIXME(Aurelius84): see above explanation.
hidden = fluid.layers.scale(hidden, 1)
# See above explanation.
# input_ = inputs[:, i:i+1, :] # original code
input_ = fluid.layers.slice(
inputs, axes=[1], starts=[j], ends=[j + 1])
input_ = fluid.layers.reshape(
input_, [-1, input_.shape[2]], inplace=False)
hidden, reset, gate = self.gru_unit(input_, hidden)
hidden_ = fluid.layers.reshape(
hidden, [-1, 1, hidden.shape[1]], inplace=False)
res.append(hidden_)
if self.is_reverse:
res = res[::-1]
res = fluid.layers.concat(res, axis=1)
return res
class BiGRU(fluid.dygraph.Layer):
def __init__(self, input_dim, grnn_hidden_dim, init_bound, h_0=None):
super(BiGRU, self).__init__()
self.pre_gru = Linear(
input_dim=input_dim,
output_dim=grnn_hidden_dim * 3,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Uniform(
low=-init_bound, high=init_bound),
regularizer=fluid.regularizer.L2DecayRegularizer(
regularization_coeff=1e-4)))
self.gru = DynamicGRU(
size=grnn_hidden_dim,
h_0=h_0,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Uniform(
low=-init_bound, high=init_bound),
regularizer=fluid.regularizer.L2DecayRegularizer(
regularization_coeff=1e-4)))
self.pre_gru_r = Linear(
input_dim=input_dim,
output_dim=grnn_hidden_dim * 3,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Uniform(
low=-init_bound, high=init_bound),
regularizer=fluid.regularizer.L2DecayRegularizer(
regularization_coeff=1e-4)))
self.gru_r = DynamicGRU(
size=grnn_hidden_dim,
is_reverse=True,
h_0=h_0,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Uniform(
low=-init_bound, high=init_bound),
regularizer=fluid.regularizer.L2DecayRegularizer(
regularization_coeff=1e-4)))
def forward(self, input_feature):
res_pre_gru = self.pre_gru(input_feature)
res_gru = self.gru(res_pre_gru)
res_pre_gru_r = self.pre_gru_r(input_feature)
res_gru_r = self.gru_r(res_pre_gru_r)
bi_merge = fluid.layers.concat(input=[res_gru, res_gru_r], axis=-1)
return bi_merge
class LinearChainCRF(fluid.dygraph.Layer):
def __init__(self, param_attr, size=None, is_test=False, dtype='float32'):
super(LinearChainCRF, self).__init__()
self._param_attr = param_attr
self._dtype = dtype
self._size = size
self._is_test = is_test
self._transition = self.create_parameter(
attr=self._param_attr,
shape=[self._size + 2, self._size],
dtype=self._dtype)
@property
def weight(self):
return self._transition
@weight.setter
def weight(self, value):
self._transition = value
def forward(self, input, label, length=None):
alpha = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
emission_exps = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
transition_exps = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
log_likelihood = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
this_inputs = {
"Emission": [input],
"Transition": self._transition,
"Label": [label]
}
if length is not None:
this_inputs['Length'] = [length]
self._helper.append_op(
type='linear_chain_crf',
inputs=this_inputs,
outputs={
"Alpha": [alpha],
"EmissionExps": [emission_exps],
"TransitionExps": transition_exps,
"LogLikelihood": log_likelihood
},
attrs={"is_test": self._is_test, })
return log_likelihood
class CRFDecoding(fluid.dygraph.Layer):
def __init__(self, param_attr, size=None, is_test=False, dtype='float32'):
super(CRFDecoding, self).__init__()
self._dtype = dtype
self._size = size
self._is_test = is_test
self._param_attr = param_attr
self._transition = self.create_parameter(
attr=self._param_attr,
shape=[self._size + 2, self._size],
dtype=self._dtype)
@property
def weight(self):
return self._transition
@weight.setter
def weight(self, value):
self._transition = value
def forward(self, input, label=None, length=None):
viterbi_path = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
this_inputs = {
"Emission": [input],
"Transition": self._transition,
"Label": label
}
if length is not None:
this_inputs['Length'] = [length]
self._helper.append_op(
type='crf_decoding',
inputs=this_inputs,
outputs={"ViterbiPath": [viterbi_path]},
attrs={"is_test": self._is_test, })
return viterbi_path
class ChunkEval(fluid.dygraph.Layer):
def __init__(self, num_chunk_types, chunk_scheme,
excluded_chunk_types=None):
super(ChunkEval, self).__init__()
self.num_chunk_types = num_chunk_types
self.chunk_scheme = chunk_scheme
self.excluded_chunk_types = excluded_chunk_types
def forward(self, input, label, seq_length=None):
precision = self._helper.create_variable_for_type_inference(
dtype="float32")
recall = self._helper.create_variable_for_type_inference(
dtype="float32")
f1_score = self._helper.create_variable_for_type_inference(
dtype="float32")
num_infer_chunks = self._helper.create_variable_for_type_inference(
dtype="int64")
num_label_chunks = self._helper.create_variable_for_type_inference(
dtype="int64")
num_correct_chunks = self._helper.create_variable_for_type_inference(
dtype="int64")
this_input = {"Inference": [input], "Label": [label]}
if seq_length is not None:
this_input["SeqLength"] = [seq_length]
self._helper.append_op(
type='chunk_eval',
inputs=this_input,
outputs={
"Precision": [precision],
"Recall": [recall],
"F1-Score": [f1_score],
"NumInferChunks": [num_infer_chunks],
"NumLabelChunks": [num_label_chunks],
"NumCorrectChunks": [num_correct_chunks]
},
attrs={
"num_chunk_types": self.num_chunk_types,
"chunk_scheme": self.chunk_scheme,
"excluded_chunk_types": self.excluded_chunk_types or []
})
return (precision, recall, f1_score, num_infer_chunks, num_label_chunks,
num_correct_chunks)
class LexNet(fluid.dygraph.Layer):
def __init__(self, args, length=None):
super(LexNet, self).__init__()
"""
define the lexical analysis network structure
word: stores the input of the model
for_infer: a boolean value, indicating if the model to be created is for training or predicting.
return:
for infer: return the prediction
otherwise: return the prediction
"""
self.word_emb_dim = args.word_emb_dim
self.vocab_size = args.vocab_size
self.num_labels = args.num_labels
self.grnn_hidden_dim = args.grnn_hidden_dim
self.emb_lr = args.emb_learning_rate if 'emb_learning_rate' in dir(
args) else 1.0
self.crf_lr = args.emb_learning_rate if 'crf_learning_rate' in dir(
args) else 1.0
self.bigru_num = args.bigru_num
self.init_bound = 0.1
self.word_embedding = Embedding(
size=[self.vocab_size, self.word_emb_dim],
dtype='float32',
param_attr=fluid.ParamAttr(
learning_rate=self.emb_lr,
name="word_emb",
initializer=fluid.initializer.Uniform(
low=-self.init_bound, high=self.init_bound)))
h_0 = np.zeros((args.batch_size, self.grnn_hidden_dim), dtype="float32")
h_0 = to_variable(h_0)
self.bigru_units = []
for i in range(self.bigru_num):
if i == 0:
self.bigru_units.append(
self.add_sublayer(
"bigru_units%d" % i,
BiGRU(
self.grnn_hidden_dim,
self.grnn_hidden_dim,
self.init_bound,
h_0=h_0)))
else:
self.bigru_units.append(
self.add_sublayer(
"bigru_units%d" % i,
BiGRU(
self.grnn_hidden_dim * 2,
self.grnn_hidden_dim,
self.init_bound,
h_0=h_0)))
self.fc = Linear(
input_dim=self.grnn_hidden_dim * 2,
output_dim=self.num_labels,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Uniform(
low=-self.init_bound, high=self.init_bound),
regularizer=fluid.regularizer.L2DecayRegularizer(
regularization_coeff=1e-4)))
self.linear_chain_crf = LinearChainCRF(
param_attr=fluid.ParamAttr(
name='linear_chain_crfw', learning_rate=self.crf_lr),
size=self.num_labels)
self.crf_decoding = CRFDecoding(
param_attr=fluid.ParamAttr(
name='crfw', learning_rate=self.crf_lr),
size=self.num_labels)
# share weight
self.crf_decoding.weight = self.linear_chain_crf.weight
@declarative
def forward(self, word, target, length=None):
"""
Configure the network
"""
word_embed = self.word_embedding(word)
input_feature = word_embed
for i in range(self.bigru_num):
bigru_output = self.bigru_units[i](input_feature)
input_feature = bigru_output
emission = self.fc(bigru_output)
crf_cost = self.linear_chain_crf(
input=emission, label=target, length=length)
avg_cost = fluid.layers.mean(x=crf_cost)
crf_decode = self.crf_decoding(input=emission, length=length)
return avg_cost, crf_decode
class Args(object):
epoch = 1
batch_size = 4
vocab_size = 100
num_labels = 10
word_emb_dim = 128
grnn_hidden_dim = 128
base_learning_rate = 0.01
bigru_num = 2
print_steps = 1
model_save_dir = "./lac_model"
dy_param_path = "./lac_dy_param"
def get_random_input_data(batch_size, vocab_size, num_labels, max_seq_len=64):
local_random = np.random.RandomState(SEED)
padding_id = np.int64(0)
iter_num = 5
def __reader__():
batch, init_lens = [], []
for i in range(iter_num * batch_size):
cur_len = local_random.randint(3, max_seq_len)
word_ids = local_random.randint(0, vocab_size,
[cur_len]).astype('int64').tolist()
label_ids = local_random.randint(0, num_labels,
[cur_len]).astype('int64').tolist()
batch.append((word_ids, label_ids))
init_lens.append(cur_len)
if len(batch) == batch_size:
batch_max_len = min(max(init_lens), max_seq_len)
new_batch = []
for words_len, (word_ids, label_ids) in zip(init_lens, batch):
word_ids = word_ids[0:batch_max_len]
words_len = np.int64(len(word_ids))
word_ids += [
padding_id for _ in range(batch_max_len - words_len)
]
label_ids = label_ids[0:batch_max_len]
label_ids += [
padding_id for _ in range(batch_max_len - words_len)
]
assert len(word_ids) == len(label_ids)
new_batch.append((word_ids, label_ids, words_len))
yield new_batch
batch, init_lens = [], []
return __reader__
def create_dataloader(reader, place):
data_loader = fluid.io.DataLoader.from_generator(
capacity=16, use_double_buffer=True, iterable=True)
data_loader.set_sample_list_generator(reader, places=place)
return data_loader
def do_train(args, to_static):
program_translator.enable(to_static)
place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.dygraph.guard(place):
fluid.default_startup_program().random_seed = SEED
fluid.default_main_program().random_seed = SEED
reader = get_random_input_data(args.batch_size, args.vocab_size,
args.num_labels)
train_loader = create_dataloader(reader, place)
model = LexNet(args)
optimizer = fluid.optimizer.AdamOptimizer(
learning_rate=args.base_learning_rate,
parameter_list=model.parameters())
chunk_eval = ChunkEval(
int(math.ceil((args.num_labels - 1) / 2.0)), "IOB")
step = 0
chunk_evaluator = fluid.metrics.ChunkEvaluator()
chunk_evaluator.reset()
loss_data = []
for epoch_id in range(args.epoch):
for batch in train_loader():
words, targets, length = batch
start_time = time.time()
avg_cost, crf_decode = model(words, targets, length)
loss_data.append(avg_cost.numpy()[0])
# backward and optimization
avg_cost.backward()
optimizer.minimize(avg_cost)
model.clear_gradients()
end_time = time.time()
if step % args.print_steps == 0:
(precision, recall, f1_score, num_infer_chunks,
num_label_chunks, num_correct_chunks) = chunk_eval(
input=crf_decode, label=targets, seq_length=length)
outputs = [avg_cost, precision, recall, f1_score]
avg_cost, precision, recall, f1_score = [
np.mean(x.numpy()) for x in outputs
]
print(
"[train] step = %d, loss = %f, P: %f, R: %f, F1: %f, elapsed time %f"
% (step, avg_cost, precision, recall, f1_score,
end_time - start_time))
step += 1
# save inference model
if to_static:
program_translator.save_inference_model(
dirname=args.model_save_dir, feed=[0, 2], fetch=[1])
else:
fluid.dygraph.save_dygraph(model.state_dict(), args.dy_param_path)
return np.array(loss_data)
class TestLACModel(unittest.TestCase):
def setUp(self):
self.args = Args()
self.place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda(
) else fluid.CPUPlace()
def train(self, to_static):
out = do_train(self.args, to_static)
return out
def test_train(self):
st_out = self.train(to_static=True)
dy_out = self.train(to_static=False)
self.assertTrue(
np.allclose(dy_out, st_out),
msg="dygraph output:\n{},\nstatic output:\n {}.".format(dy_out,
st_out))
# Prediction needs trained models, so put `test_predict` at last of `test_train`
self.verify_predict()
def verify_predict(self):
reader = get_random_input_data(
self.args.batch_size, self.args.vocab_size, self.args.num_labels)
for batch in reader():
batch = [np.vstack(var) for var in zip(*batch)]
dy_pre = self.predict_dygraph(batch)
st_pre = self.predict_static(batch)
self.assertTrue(
np.allclose(dy_pre, st_pre),
msg="dy_pre:\n {}\n, st_pre: \n{}.".format(dy_pre, st_pre))
def predict_dygraph(self, batch):
words, targets, length = batch
program_translator.enable(False)
with fluid.dygraph.guard(self.place):
model = LexNet(self.args)
# load dygraph trained parameters
model_dict, _ = fluid.load_dygraph(self.args.dy_param_path +
".pdparams")
model.set_dict(model_dict)
model.eval()
_, pred_res = model(
to_variable(words), to_variable(targets), to_variable(length))
return pred_res.numpy()
def predict_static(self, batch):
"""
LAC model contains h_0 created in `__init__` that is necessary for inferring.
Load inference model to test it's ok for prediction.
"""
exe = fluid.Executor(self.place)
# load inference model
[inference_program, feed_target_names,
fetch_targets] = fluid.io.load_inference_model(
self.args.model_save_dir, executor=exe)
words, targets, length = batch
pred_res = exe.run(
inference_program,
feed={feed_target_names[0]: words,
feed_target_names[1]: length},
fetch_list=fetch_targets)
return pred_res[0]
if __name__ == "__main__":
unittest.main()
|
"""Unit test package for data_quality_framework."""
|
import matplotlib.pyplot as plt
from velocity_transformations import *
# --------------------------------------------------------
# ---------------- Constants -----------------------------
# --------------------------------------------------------
QUIVER_SCALE = 200.
QUIVER_WIDTH = 0.001
def plot_theoretical_motion(v_xyz_stream, img_prefix='', dist=1000):
"""
:param v_xyz_stream:
:param img_prefix:
:param dist:
:return:
"""
# get theoretical observed rv pmra pmdec, based on streams rv values
ra_range = np.deg2rad(np.arange(0, 360, 0.5))
for dec_deg in np.arange(-20., 90., 10.):
plt.plot(ra_range, compute_pmra(ra_range, np.deg2rad(dec_deg), dist, v_xyz_stream))
plt.savefig(img_prefix+'_pmra.png')
plt.close()
for dec_deg in np.arange(-20., 90., 10.):
plt.plot(ra_range, compute_pmdec(ra_range, np.deg2rad(dec_deg), dist, v_xyz_stream))
plt.savefig(img_prefix+'_pmdec.png')
plt.close()
for dec_deg in np.arange(-20., 90., 10.):
plt.plot(ra_range, compute_rv(ra_range, np.deg2rad(dec_deg), v_xyz_stream))
plt.savefig(img_prefix+'_rv.png')
plt.close()
def plot_members_location_motion(gaia, pmra_pred=None, pmdec_pred=None, idx=None, radiant=None, add_errors=None, color=None,
path='members.png', title=''):
"""
:param gaia:
:param pmra_pred:
:param pmdec_pred:
:param idx:
:param radiant:
:param path:
:param title:
:return:
"""
if idx is None:
# use all datarows
idx = np.ndarray(len(gaia))
idx.fill(True)
use_gaia_data = gaia[idx]
# plot location of the stars
if radiant is not None:
plt.scatter(radiant[0], radiant[1], lw=0, s=15, c='black', marker='*')
if color is None:
plt.scatter(use_gaia_data['ra_gaia'], use_gaia_data['dec_gaia'], lw=0, c='black', s=5)
else:
plt.scatter(use_gaia_data['ra_gaia'], use_gaia_data['dec_gaia'], lw=0, c=color, s=5)
plt.colorbar()
gaia_features = gaia.colnames
if add_errors:
if 'pmra_error' in gaia_features and 'pmdec_error' in gaia_features:
plt.errorbar(use_gaia_data['ra_gaia'], use_gaia_data['dec_gaia'],
xerr=use_gaia_data['pmra_error'], yerr=use_gaia_data['pmdec_error'], fmt='o', ecolor='black', markersize=0, capsize=0)
if 'pmra' in gaia_features and 'pmdec' in gaia_features:
plt.quiver(use_gaia_data['ra_gaia'], use_gaia_data['dec_gaia'], use_gaia_data['pmra'], use_gaia_data['pmdec'],
pivot='tail', scale=QUIVER_SCALE, color='green', width=QUIVER_WIDTH)
if pmra_pred is not None and pmdec_pred is not None:
plt.quiver(use_gaia_data['ra_gaia'], use_gaia_data['dec_gaia'], pmra_pred[idx], pmdec_pred[idx],
pivot='tail', scale=QUIVER_SCALE, color='red', width=QUIVER_WIDTH)
# annotate graph
plt.xlabel('RA [deg]')
plt.ylabel('DEC [deg]')
plt.title(title)
plt.xlim((0, 360))
plt.ylim((-90, 90))
# save graph
plt.tight_layout()
plt.savefig(path, dpi=250)
plt.close()
def plot_members_location_motion_theoretical(ra, dec, pmra_pred, pmdec_pred, radiant=None,
path='members.png', title=''):
"""
:param ra:
:param dec:
:param pmra_pred:
:param pmdec_pred:
:param radiant:
:param path:
:param title:
:return:
"""
if radiant is not None:
plt.scatter(radiant[0], radiant[1], lw=0, s=15, c='black', marker='*')
plt.scatter(ra, dec, lw=0, c='black', s=4)
plt.quiver(ra, dec, pmra_pred, pmdec_pred,
pivot='tail', scale=QUIVER_SCALE, color='green', width=QUIVER_WIDTH)
# annotate graph
plt.xlabel('RA [deg]')
plt.ylabel('DEC [deg]')
plt.title(title)
plt.xlim((0, 360))
plt.ylim((-90, 90))
# save graph
plt.tight_layout()
plt.savefig(path, dpi=250)
plt.close()
def plot_members_location_velocity(gaia, rv=None, rv_ref=None, idx=None, radiant=None,
path='members.png', title=''):
"""
:param gaia:
:param idx:
:param radiant:
:param path:
:param title:
:return:
"""
if idx is None:
# use all datarows
idx = np.ndarray(len(gaia))
idx.fill(True)
use_gaia_data = gaia[idx]
# plot location of the stars
if radiant is not None:
plt.scatter(radiant[0], radiant[1], lw=0, s=25, c='black', marker='*')
plt.scatter(use_gaia_data['ra_gaia'], use_gaia_data['dec_gaia'], lw=0, c='black', s=4)
gaia_features = gaia.colnames
# plot rv vectors
dec_offset = -0.2
if rv is None and 'RV' in gaia_features:
rv_plot = use_gaia_data['RV']
elif rv is not None:
rv_plot = rv[idx]
if 'rv_plot' in locals():
plt.quiver(use_gaia_data['ra_gaia'], use_gaia_data['dec_gaia']-dec_offset, rv_plot, 0.,
pivot='tail', scale=QUIVER_SCALE, color='green', width=QUIVER_WIDTH)
if rv_ref is not None:
plt.quiver(use_gaia_data['ra_gaia'], use_gaia_data['dec_gaia']+dec_offset, rv_ref[idx], 0.,
pivot='tail', scale=QUIVER_SCALE, color='red', width=QUIVER_WIDTH)
# annotate graph
plt.xlabel('RA [deg]')
plt.ylabel('DEC [deg]')
plt.title(title)
plt.xlim((0, 360))
plt.ylim((-90, 90))
# save graph
plt.tight_layout()
plt.savefig(path, dpi=250)
plt.close()
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 26 02:02:15 2020
@author: Sujay J
"""
import pandas as pd
messages = pd.read_csv("spam Classifier\SMSSpamCollection",sep = '\t',names = ["labels","message"])
import nltk
import re #regular Expressions
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from nltk.stem import WordNetLemmatizer
ps = PorterStemmer()
wordnet = WordNetLemmatizer()
corpus = []
for i in range(len(messages)):
review = re.sub('[^a-zA-Z]',' ' , messages['message'][i])
review = review.lower()
review = review.split()
review = [ps.stem(word)for word in review if not word in set(stopwords.words('english'))]
review = ' '.join(review)
corpus.append(review)
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer(max_features=5000)
X = cv.fit_transform(corpus).toarray()
y = pd.get_dummies(messages['labels'])
y = y.iloc[:,1].values
from sklearn.model_selection import train_test_split
X_train ,X_test,y_train,y_test = train_test_split(X,y,test_size=0.2,random_state=0)
from sklearn.naive_bayes import MultinomialNB
model=MultinomialNB().fit(X_train,y_train)
y_pred=model.predict(X_test)
y_pred
from sklearn.metrics import confusion_matrix,accuracy_score
confu = confusion_matrix(y_test,y_pred)
accu = accuracy_score(y_test,y_pred) |
"""
This tests various types of arguments to make sure they are properly copied
by the namespace. This includes objects with circular references which need to
be carefully copied by NamespaceAPIFunctionWrapper._copy().
"""
import namespace
def foo(testarg):
return testarg
def noop(*args, **kwargs):
pass
foo_func_dict = {
'target_func' : foo,
'arg_checking_func' : noop,
'return_checking_func' : noop,
}
foo_wrapper_obj = namespace.NamespaceAPIFunctionWrapper(foo_func_dict)
wrapped_foo = foo_wrapper_obj.wrapped_function
# Set (no circular references, don't think that's possible).
myfrozenset = frozenset() # frozensets are immutable and so will not be copied.
myset = set([myfrozenset, 2])
retval = wrapped_foo(myset)
assert(retval is not myset)
assert(myfrozenset in retval) # That's not an identity check.
assert(2 in retval)
# Frozenset (no circular references, don't think that's possible).
myfrozenset = frozenset() # frozensets are immutable and so will not be copied.
retval = wrapped_foo(myfrozenset)
assert(retval is myfrozenset)
# List with circular references.
circlist = []
circlist.append(circlist)
retval = wrapped_foo(circlist)
# Make sure that the retval is not the original argument and that the retval
# has circular references like the original argument.
assert(retval is not circlist)
assert(retval[0] is retval)
# Dict with circular references.
circdict = {}
circdict["test"] = circdict
retval = wrapped_foo(circdict)
# Make sure that the retval is not the original argument and that the retval
# has circular references like the original argument.
assert(retval is not circdict)
assert(retval["test"] is retval)
# Tuple with circular references. Note that I don't believe it's possible to
# have a tuple with directly circular references, but instead only indirectly
# through other objects in the tuple such as other lists and dicts.
mydict = {}
circtuple = (mydict,)
mydict["test"] = circtuple
retval = wrapped_foo(circtuple)
# Make sure that the retval is not the original argument and that the retval
# has circular references like the original argument.
assert(retval is not circtuple)
assert(retval[0]["test"] is retval)
|
class HtmlParser:
"""
@param: content: content source code
@return: a list of links
"""
def parseUrls(self, content):
import re
links = re.findall(r'href\s*=\s*("|\')+([^"\'>\s]*)', content, re.I)
return [link[1] for link in links if len(link[1]) and not link[1].startswith('#')] |
import random
import json
import os
import re
import socket
import sqlite3
import uuid
from dataclasses import dataclass
from datetime import datetime
from urllib.request import urlopen
from pathlib import Path
from speedtest import Speedtest
CURDIR = Path(__file__).parent
@dataclass
class Vendor(object):
'''Contains vendor info from mac-address.
NOTE: Run `download.py` to update OUI database from time to time
Usage examples:
>>> vendor = Vendor()
>>> vendor.oui = 'e09467'
>>> print (vendor.address_list()[-1])
>>> # prints 2-signs country code or None
or
>>> vendor = Vendor('e09467')
>>> print (vendor.name())
>>> # prints vendor for mac-address if found or None
or
>>> vendor = Vendor()
>>> print (vendor.name())
>>> # prints vendor for mac-address in current system or None
'''
oui: str = hex(uuid.getnode())[2:8]
dbname: str = Path(CURDIR) / 'oui.db'
def __post_init__(self):
self.con = sqlite3.connect(self.dbname)
def _sqlite(self):
cur = self.con.cursor()
query = cur.execute('SELECT * FROM vendors WHERE vendors.base16 = ?', (self.oui.upper(),))
answer = query.fetchall()
return answer[0] if answer else (None, None, None, None, None)
def name(self) -> str:
return self._sqlite()[4] # Or [3], it's the same
def address(self) -> str:
return self._sqlite()[5]
def address_line(self, repl=', ') -> str:
n = self._sqlite()
return n[5].replace('\n', repl) if n[5] else None
def address_list(self) -> list:
n = self._sqlite()
return n()[5].split('\n') if n[5] else None
def everything(self) -> list:
return self._sqlite()
def oui_hex(self) -> str:
return self._sqlite()[1]
def oui_base16(self) -> str:
return self._sqlite()[2]
def __del__(self):
self.con.close()
del self.con
@dataclass
class Host(object):
'''Contains info about host in local network and from outside.
ip_remote: str ip address from outside
ip_local: str ip address in local network
name_remote: str host name from outside
name_local: str host name in local network
city: str city name for public ip address
region: str the same but region
country: str the same but country
timezone: str the same but timezone
location: list latitude and longitude for public ip address
organization: str the public ip owner
postal: str postal code of organization above
Usage examples:
>>> print(Host().location)
>>> host = Host()
>>> print(host.ip_local)
'''
def _get_ip():
'''Get IP address from http://dydns.com for `class Host()`'''
# Third-party website dependency, be careful
html = urlopen('http://checkip.dyndns.com/').read().decode('utf-8')
return re.findall(r'\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}', html)[0]
ip_remote: str = _get_ip()
def _get_ip_info(ip_remote):
'''Get info for public IP from http://ipinfo.io for `class Host()`'''
# Third-party website dependency, be careful
info = urlopen('http://ipinfo.io/{}/json'.format(ip_remote)
).read().decode('utf-8')
return json.loads(info)
ip_info = _get_ip_info(ip_remote)
name_local: str = socket.gethostname()
ip_local: str = socket.gethostbyname(socket.gethostname())
name_remote: str = ip_info['hostname']
city: str = ip_info['city']
region: str = ip_info['region']
country: str = ip_info['country']
location = ip_info['loc'].split(',')
org: str = ip_info['org']
postal: str = ip_info['postal']
timezone: str = ip_info['timezone']
def generate_sql(update_values: dict) -> str:
'''Generate table creation SQL for statistics'''
# Matching data types Python -> SQLite3
sql_types = {
str: "TEXT",
float: "REAL",
bool: "INTEGER",
int: "INTEGER"
}
# Making the request string, then return it
rq: str = ',\n\t'.join('{:<15} {} NOT NULL'.format(
k, sql_types[type(v)]) for k, v in update_values.items())
return 'CREATE TABLE IF NOT EXISTS sessions\n\t' + \
'(\n\tid INTEGER PRIMARY KEY AUTOINCREMENT,\n\t' + rq + '\n\t);'
def save_statistics(stats: dict, best: bool, dbname: str = 'speedtest.db') -> bool:
'''Save `Speedtest()` and other statistics to sqlite3 `dbname` database.
For detailed fields description see `sqlite_strusture.md` document at:
https://github.com/trankov/speedtest-statistics/blob/main/sqlite_strusture.md
'''
# Collecting info about related context
vendor: object = Vendor()
host: object = Host()
# Get MAC Address in 1A:2B:3C:4D:5E:6F format
mc_hex: str = hex(uuid.getnode())[2:].upper()
macaddress: str = ":".join(mc_hex[i:i+2] for i in range(0, len(mc_hex), 2))
# Prepare the data structures
# Description of the data to be stored
# We also use explicit type conversion, because all data comes anyhow
update_values = {
'best': bool (best),
'download': float (stats['download']),
'upload': float (stats['upload']),
'ping': float (stats['ping']),
'url': str (stats['server']['url']),
'test_lat': float (stats['server']['lat']),
'test_lon': float (stats['server']['lon']),
'test_name': str (stats['server']['name']),
'test_country': str (stats['server']['country']),
'test_cc': str (stats['server']['cc']),
'sponsor': str (stats['server']['sponsor']),
'test_id': int (stats['server']['id']),
'test_host': str (stats['server']['host']),
'd': float (stats['server']['d']),
'latency': float (stats['server']['latency']),
'timestamp': str (stats['timestamp']),
'bytes_sent': int (stats['bytes_sent']),
'bytes_received': int (stats['bytes_received']),
'client_ip': str (stats['client']['ip']),
'client_lat': float (stats['client']['lat']),
'client_lon': float (stats['client']['lon']),
'client_isp': str (stats['client']['isp']),
'client_country': str (stats['client']['country']),
'ip_remote': str (host.ip_remote),
'ip_local': str (host.ip_local),
'name_remote': str (host.name_remote),
'name_local': str (host.name_local),
'city': str (host.city),
'region': str (host.region),
'country': str (host.country),
'timezone': str (host.timezone),
'public_ip_lat': float (host.location[0]),
'public_ip_lon': float (host.location[1]),
'organization': str (host.org),
'postal': str (host.postal),
'vendor': str (vendor.name()),
'oui_hex': str (vendor.oui_hex()),
'oui_base16': str (vendor.oui_base16()),
'vendor_address': str (vendor.address()),
'mac_address': str (macaddress),
'unixtime': float (datetime.now().timestamp())
}
sqlite3.paramstyle = 'named' # for better matching and security reasons
con = sqlite3.connect(Path(CURDIR) / dbname)
cur = con.cursor()
# Create database file and table if they don't exist
sql_table = generate_sql(update_values)
cur.execute(sql_table)
# Save statistics to database.sessions table
sql_request = '''INSERT INTO sessions ({}) VALUES ({})'''.format(
', '.join(update_values),
', '.join(f':{i}' for i in update_values)
)
cur.execute(sql_request, update_values)
con.commit()
con.close()
class ProgressLine(object):
'''Controls progress bar for multitreading requests'''
def __init__(self):
self.status = {
'start': '\033[48;5;227m \033[0m', # yellow space
'end': '\033[0;0;42m \033[0m', # green space
'empty': '\033[48;5;160m \033[0m' # red space
}
self._line = []
@property
def line(self):
return self._line
@line.setter
def line(self, value: int):
'''Just in case. Unnessessary but usefull.'''
if self._line == []:
self._line = [self.status['empty']] * value
return self._line
def update(self, idx, count, **kwargs):
'''Marks cells in progress bar.
Actually callback format is not documented and don't mentioned
in official documentation, but nevertheless it is available for use.
Callback getting tuple `(int, int)` and one positional argument,
which can be `start = True` or `end = True`. First int valued a current
request, second int contains total number of requests (always the same).
'''
if self._line == []:
self._line = [self.status['empty']] * count
state = list(kwargs.keys())[0]
self._line[idx] = self.status[state]
self.show()
def show(self):
print ('\r' + self.__str__(), end='')
def reset(self):
self._line = []
print()
def __str__(self):
return ''.join(self._line)
def __repr__(self):
return ''.join(self._line)
def print_res(results: dict):
'''Puts out formatted report about the speed test'''
print(' Download: \033[4m{} Mbit/s\033[0m, Upload: \033[4m{} Mbit/s\033[0m, Ping: \033[4m{} ms\033[0m.\n Provider: \033[38;5;198m{}, {}, {}\033[0m\n'\
.format(
*[round(results[i] / 1048576, 2) for i in ['download', 'upload', 'ping']],
results['server']['sponsor'],
results['server']['name'],
results['server']['country']
)
)
if __name__ == '__main__':
# Initialization of progress bar
line = ProgressLine()
# Get the list of optional servers for further testing.
speedtest = Speedtest()
srv = speedtest.get_servers()
# Put out the main header
print('\n\033[48;5;197m\033[38;5;220m',
'Start Speedtest...'.center(65),
'\033[0;0;m\n',
sep=''
)
# test at closest ("best") server
speedtest.get_best_server()
# Testing download
print(' \N{LONG RIGHTWARDS ARROW} Test download speed from the best server...')
speedtest.download(callback=line.update)
line.reset()
# Testing upload
print(' \N{LONG LEFTWARDS ARROW} Test upload speed at the best server...')
speedtest.upload(callback=line.update)
line.reset()
# Memorize the results for database, then show them
res_best = speedtest.results.dict()
print('\n\033[38;5;51mTest speed at best server finished.\033[0m')
print_res(res_best)
# test at random server
# object have to be reinitialized for changing the test server
# due to the peculiarities of the official library
del speedtest
speedtest = Speedtest()
# Now get the random server id
rand_idx = random.choice(list(srv.keys()))
if isinstance(srv[rand_idx], list):
srv_id = srv[rand_idx][0]['id']
else:
srv_id = srv[rand_idx]['id']
speedtest.get_servers([srv_id])
# Testing download
print(' \N{LONG RIGHTWARDS ARROW} Test download speed from the random server...')
speedtest.download(callback=line.update)
line.reset()
# Testing upload
print(' \N{LONG LEFTWARDS ARROW} Test upload speed at the random server...')
speedtest.upload(callback=line.update)
line.reset()
res_rand = speedtest.results.dict()
print('\n\033[38;5;51mTest speed at random server finished.\033[0m')
print_res(res_rand)
print('Save statistics...')
save_statistics(stats=res_best, best=True)
save_statistics(stats=res_rand, best=False)
print('Statistics saved.')
print('_'*65)
# END
|
from main import scale, scale1
a = "abcd\nefgh\nijkl\nmnop"
r = "aabbccdd\naabbccdd\naabbccdd\neeffgghh\neeffgghh\neeffgghh\niijjkkll\niijjkkll\niijjkkll\nmmnnoopp\nmmnnoopp\nmmnnoopp"
def test1(benchmark):
assert benchmark(scale, a, 2, 3) == r
def test(benchmark):
assert benchmark(scale1, a, 2, 3) == r
# py.test
|
class ConnectionError(Exception):
def __init__(self, response, content=None, message=None):
self.response = response
self.content = content
self.message = message
def __str__(self):
message = "Failed."
if hasattr(self.response, 'status_code'):
message += " Response status: %s." % (self.response.status_code)
if hasattr(self.response, 'reason'):
message += " Response message: %s." % (self.response.reason)
if hasattr(self.response, 'request'):
message += " Request URL: %s." % self.response.request.url
if self.content is not None:
message += " Error message: " + str(self.content)
return message
class Redirection(ConnectionError):
"""3xx Redirection
"""
def __str__(self):
message = super(Redirection, self).__str__()
if self.response.get('Location'):
message = "%s => %s" % (message, self.response.get('Location'))
return message
class MissingParam(TypeError):
pass
class MissingConfig(Exception):
pass
class ClientError(ConnectionError):
"""4xx Client Error
"""
pass
class BadRequest(ClientError):
"""400 Bad Request
"""
pass
class UnauthorizedAccess(ClientError):
"""401 Unauthorized
"""
pass
class ForbiddenAccess(ClientError):
"""403 Forbidden
"""
pass
class ResourceNotFound(ClientError):
"""404 Not Found
"""
pass
class ResourceConflict(ClientError):
"""409 Conflict
"""
pass
class ResourceGone(ClientError):
"""410 Gone
"""
pass
class PreconditionFailed(ClientError):
"""412 Precondition Failed"""
pass
class RequestEntityTooLarge(ClientError):
"""413 Request Entity Too Large"""
pass
class ResourceInvalid(ClientError):
"""422 Invalid
"""
pass
class ServerError(ConnectionError):
"""5xx Server Error
"""
pass
class MethodNotAllowed(ClientError):
"""405 Method Not Allowed
"""
def allowed_methods(self):
return self.response['Allow']
_exception_map = {
301: Redirection,
302: Redirection,
303: Redirection,
307: Redirection,
400: BadRequest,
401: UnauthorizedAccess,
403: ForbiddenAccess,
404: ResourceNotFound,
405: MethodNotAllowed,
409: ResourceConflict,
410: ResourceGone,
412: PreconditionFailed,
413: RequestEntityTooLarge,
422: ResourceInvalid,
}
def exception_for_status(status_code):
"""Returns the exception class for the given status code."""
try:
return _exception_map[status_code]
except KeyError:
pass
if 400 <= status_code <= 499:
return ClientError
elif 500 <= status_code <= 599:
return ServerError
return None
|
import unittest
from cgi import FieldStorage, MiniFieldStorage
from roundup.cgi.templating import *
from test_actions import MockNull, true
class MockDatabase(MockNull):
def getclass(self, name):
return self.classes[name]
class TemplatingTestCase(unittest.TestCase):
def setUp(self):
self.form = FieldStorage()
self.client = MockNull()
self.client.db = db = MockDatabase()
db.security.hasPermission = lambda *args, **kw: True
self.client.form = self.form
class HTMLDatabaseTestCase(TemplatingTestCase):
def test_HTMLDatabase___getitem__(self):
db = HTMLDatabase(self.client)
self.assert_(isinstance(db['issue'], HTMLClass))
# following assertions are invalid
# since roundup/cgi/templating.py r1.173.
# HTMLItem is function, not class,
# but HTMLUserClass and HTMLUser are passed on.
# these classes are no more. they have ceased to be.
#self.assert_(isinstance(db['user'], HTMLUserClass))
#self.assert_(isinstance(db['issue1'], HTMLItem))
#self.assert_(isinstance(db['user1'], HTMLUser))
def test_HTMLDatabase___getattr__(self):
db = HTMLDatabase(self.client)
self.assert_(isinstance(db.issue, HTMLClass))
# see comment in test_HTMLDatabase___getitem__
#self.assert_(isinstance(db.user, HTMLUserClass))
#self.assert_(isinstance(db.issue1, HTMLItem))
#self.assert_(isinstance(db.user1, HTMLUser))
def test_HTMLDatabase_classes(self):
db = HTMLDatabase(self.client)
db._db.classes = {'issue':MockNull(), 'user': MockNull()}
db.classes()
class FunctionsTestCase(TemplatingTestCase):
def test_lookupIds(self):
db = HTMLDatabase(self.client)
def lookup(key):
if key == 'ok':
return '1'
if key == 'fail':
raise KeyError, 'fail'
return key
db._db.classes = {'issue': MockNull(lookup=lookup)}
prop = MockNull(classname='issue')
self.assertEqual(lookupIds(db._db, prop, ['1','2']), ['1','2'])
self.assertEqual(lookupIds(db._db, prop, ['ok','2']), ['1','2'])
self.assertEqual(lookupIds(db._db, prop, ['ok', 'fail'], 1),
['1', 'fail'])
self.assertEqual(lookupIds(db._db, prop, ['ok', 'fail']), ['1'])
def test_lookupKeys(self):
db = HTMLDatabase(self.client)
def get(entry, key):
return {'1': 'green', '2': 'eggs'}.get(entry, entry)
shrubbery = MockNull(get=get)
db._db.classes = {'shrubbery': shrubbery}
self.assertEqual(lookupKeys(shrubbery, 'spam', ['1','2']),
['green', 'eggs'])
self.assertEqual(lookupKeys(shrubbery, 'spam', ['ok','2']), ['ok',
'eggs'])
class HTMLClassTestCase(TemplatingTestCase) :
def test_link(self):
"""Make sure lookup of a Link property works even in the
presence of multiple values in the form."""
def lookup(key) :
self.assertEqual(key, key.strip())
return "Status%s"%key
self.form.list.append(MiniFieldStorage("status", "1"))
self.form.list.append(MiniFieldStorage("status", "2"))
status = hyperdb.Link("status")
self.client.db.classes = dict \
( issue = MockNull(getprops = lambda : dict(status = status))
, status = MockNull(get = lambda id, name : id, lookup = lookup)
)
cls = HTMLClass(self.client, "issue")
cls["status"]
def test_multilink(self):
"""`lookup` of an item will fail if leading or trailing whitespace
has not been stripped.
"""
def lookup(key) :
self.assertEqual(key, key.strip())
return "User%s"%key
self.form.list.append(MiniFieldStorage("nosy", "1, 2"))
nosy = hyperdb.Multilink("user")
self.client.db.classes = dict \
( issue = MockNull(getprops = lambda : dict(nosy = nosy))
, user = MockNull(get = lambda id, name : id, lookup = lookup)
)
cls = HTMLClass(self.client, "issue")
cls["nosy"]
def test_url_match(self):
'''Test the URL regular expression in StringHTMLProperty.
'''
def t(s, nothing=False, **groups):
m = StringHTMLProperty.hyper_re.search(s)
if nothing:
if m:
self.assertEquals(m, None, '%r matched (%r)'%(s, m.groupdict()))
return
else:
self.assertNotEquals(m, None, '%r did not match'%s)
d = m.groupdict()
for g in groups:
self.assertEquals(d[g], groups[g], '%s %r != %r in %r'%(g, d[g],
groups[g], s))
#t('123.321.123.321', 'url')
t('http://localhost/', url='http://localhost/')
t('http://roundup.net/', url='http://roundup.net/')
t('http://richard@localhost/', url='http://richard@localhost/')
t('http://richard:sekrit@localhost/',
url='http://richard:sekrit@localhost/')
t('<HTTP://roundup.net/>', url='HTTP://roundup.net/')
t('www.a.ex', url='www.a.ex')
t('foo.a.ex', nothing=True)
t('StDevValidTimeSeries.GetObservation', nothing=True)
t('http://a.ex', url='http://a.ex')
t('http://a.ex/?foo&bar=baz\\.@!$%()qwerty',
url='http://a.ex/?foo&bar=baz\\.@!$%()qwerty')
t('www.foo.net', url='www.foo.net')
t('richard@com.example', email='richard@com.example')
t('r@a.com', email='r@a.com')
t('i1', **{'class':'i', 'id':'1'})
t('item123', **{'class':'item', 'id':'123'})
t('www.user:pass@host.net', email='pass@host.net')
t('user:pass@www.host.net', url='user:pass@www.host.net')
t('123.35', nothing=True)
t('-.3535', nothing=True)
def test_url_replace(self):
p = StringHTMLProperty(self.client, 'test', '1', None, 'test', '')
def t(s): return p.hyper_re.sub(p._hyper_repl, s)
ae = self.assertEqual
ae(t('item123123123123'), 'item123123123123')
ae(t('http://roundup.net/'),
'<a href="http://roundup.net/">http://roundup.net/</a>')
ae(t('<HTTP://roundup.net/>'),
'<<a href="HTTP://roundup.net/">HTTP://roundup.net/</a>>')
ae(t('<http://roundup.net/>.'),
'<<a href="http://roundup.net/">http://roundup.net/</a>>.')
ae(t('<www.roundup.net>'),
'<<a href="http://www.roundup.net">www.roundup.net</a>>')
ae(t('(www.roundup.net)'),
'(<a href="http://www.roundup.net">www.roundup.net</a>)')
ae(t('foo http://msdn.microsoft.com/en-us/library/ms741540(VS.85).aspx bar'),
'foo <a href="http://msdn.microsoft.com/en-us/library/ms741540(VS.85).aspx">'
'http://msdn.microsoft.com/en-us/library/ms741540(VS.85).aspx</a> bar')
ae(t('(e.g. http://en.wikipedia.org/wiki/Python_(programming_language))'),
'(e.g. <a href="http://en.wikipedia.org/wiki/Python_(programming_language)">'
'http://en.wikipedia.org/wiki/Python_(programming_language)</a>)')
ae(t('(e.g. http://en.wikipedia.org/wiki/Python_(programming_language)).'),
'(e.g. <a href="http://en.wikipedia.org/wiki/Python_(programming_language)">'
'http://en.wikipedia.org/wiki/Python_(programming_language)</a>).')
ae(t('(e.g. http://en.wikipedia.org/wiki/Python_(programming_language))>.'),
'(e.g. <a href="http://en.wikipedia.org/wiki/Python_(programming_language)">'
'http://en.wikipedia.org/wiki/Python_(programming_language)</a>)>.')
ae(t('(e.g. http://en.wikipedia.org/wiki/Python_(programming_language>)).'),
'(e.g. <a href="http://en.wikipedia.org/wiki/Python_(programming_language">'
'http://en.wikipedia.org/wiki/Python_(programming_language</a>>)).')
for c in '.,;:!':
# trailing punctuation is not included
ae(t('http://roundup.net/%c ' % c),
'<a href="http://roundup.net/">http://roundup.net/</a>%c ' % c)
# but it's included if it's part of the URL
ae(t('http://roundup.net/%c/' % c),
'<a href="http://roundup.net/%c/">http://roundup.net/%c/</a>' % (c, c))
'''
class HTMLPermissions:
def is_edit_ok(self):
def is_view_ok(self):
def is_only_view_ok(self):
def view_check(self):
def edit_check(self):
def input_html4(**attrs):
def input_xhtml(**attrs):
class HTMLInputMixin:
def __init__(self):
class HTMLClass(HTMLInputMixin, HTMLPermissions):
def __init__(self, client, classname, anonymous=0):
def __repr__(self):
def __getitem__(self, item):
def __getattr__(self, attr):
def designator(self):
def getItem(self, itemid, num_re=re.compile('-?\d+')):
def properties(self, sort=1):
def list(self, sort_on=None):
def csv(self):
def propnames(self):
def filter(self, request=None, filterspec={}, sort=(None,None),
def classhelp(self, properties=None, label='(list)', width='500',
def submit(self, label="Submit New Entry"):
def history(self):
def renderWith(self, name, **kwargs):
class HTMLItem(HTMLInputMixin, HTMLPermissions):
def __init__(self, client, classname, nodeid, anonymous=0):
def __repr__(self):
def __getitem__(self, item):
def __getattr__(self, attr):
def designator(self):
def is_retired(self):
def submit(self, label="Submit Changes"):
def journal(self, direction='descending'):
def history(self, direction='descending', dre=re.compile('\d+')):
def renderQueryForm(self):
class HTMLUserPermission:
def is_edit_ok(self):
def is_view_ok(self):
def _user_perm_check(self, type):
class HTMLUserClass(HTMLUserPermission, HTMLClass):
class HTMLUser(HTMLUserPermission, HTMLItem):
def __init__(self, client, classname, nodeid, anonymous=0):
def hasPermission(self, permission, classname=_marker):
class HTMLProperty(HTMLInputMixin, HTMLPermissions):
def __init__(self, client, classname, nodeid, prop, name, value,
def __repr__(self):
def __str__(self):
def __cmp__(self, other):
def is_edit_ok(self):
def is_view_ok(self):
class StringHTMLProperty(HTMLProperty):
def _hyper_repl(self, match):
def hyperlinked(self):
def plain(self, escape=0, hyperlink=0):
def stext(self, escape=0):
def field(self, size = 30):
def multiline(self, escape=0, rows=5, cols=40):
def email(self, escape=1):
class PasswordHTMLProperty(HTMLProperty):
def plain(self):
def field(self, size = 30):
def confirm(self, size = 30):
class NumberHTMLProperty(HTMLProperty):
def plain(self):
def field(self, size = 30):
def __int__(self):
def __float__(self):
class BooleanHTMLProperty(HTMLProperty):
def plain(self):
def field(self):
class DateHTMLProperty(HTMLProperty):
def plain(self):
def now(self):
def field(self, size = 30):
def reldate(self, pretty=1):
def pretty(self, format=_marker):
def local(self, offset):
class IntervalHTMLProperty(HTMLProperty):
def plain(self):
def pretty(self):
def field(self, size = 30):
class LinkHTMLProperty(HTMLProperty):
def __init__(self, *args, **kw):
def __getattr__(self, attr):
def plain(self, escape=0):
def field(self, showid=0, size=None):
def menu(self, size=None, height=None, showid=0, additional=[],
class MultilinkHTMLProperty(HTMLProperty):
def __init__(self, *args, **kwargs):
def __len__(self):
def __getattr__(self, attr):
def __getitem__(self, num):
def __contains__(self, value):
def reverse(self):
def plain(self, escape=0):
def field(self, size=30, showid=0):
def menu(self, size=None, height=None, showid=0, additional=[],
def make_sort_function(db, classname, sort_on=None):
def sortfunc(a, b):
def find_sort_key(linkcl):
def handleListCGIValue(value):
class ShowDict:
def __init__(self, columns):
def __getitem__(self, name):
class HTMLRequest(HTMLInputMixin):
def __init__(self, client):
def _post_init(self):
def updateFromURL(self, url):
def update(self, kwargs):
def description(self):
def __str__(self):
def indexargs_form(self, columns=1, sort=1, group=1, filter=1,
def indexargs_url(self, url, args):
def base_javascript(self):
def batch(self):
class Batch(ZTUtils.Batch):
def __init__(self, client, sequence, size, start, end=0, orphan=0,
def __getitem__(self, index):
def propchanged(self, property):
def previous(self):
def next(self):
class TemplatingUtils:
def __init__(self, client):
def Batch(self, sequence, size, start, end=0, orphan=0, overlap=0):
class NoTemplate(Exception):
class Unauthorised(Exception):
def __init__(self, action, klass):
def __str__(self):
class Loader:
def __init__(self, dir):
def precompileTemplates(self):
def load(self, name, extension=None):
def __getitem__(self, name):
class RoundupPageTemplate(PageTemplate.PageTemplate):
def getContext(self, client, classname, request):
def render(self, client, classname, request, **options):
def __repr__(self):
'''
# vim: set et sts=4 sw=4 :
|
# -*- coding: utf-8 -*-
# This work is licensed under the GNU Public License (GPL).
# To view a copy of this license, visit http://www.gnu.org/copyleft/gpl.html
# Written by Abdullah Diab (mpcabd)
# Email: mpcabd@gmail.com
# Website: http://mpcabd.xyz
# Ported and tweaked from Java to Python, from Better Arabic Reshaper
# [https://github.com/agawish/Better-Arabic-Reshaper/]
# Usage:
# Install python-bidi [https://github.com/MeirKriheli/python-bidi], can be
# installed from pip `pip install python-bidi`.
# import arabic_reshaper
# from bidi.algorithm import get_display
# reshaped_text = arabic_reshaper.reshape(u'ุงููุบุฉ ุงูุนุฑุจูุฉ ุฑุงุฆุนุฉ')
# bidi_text = get_display(reshaped_text)
# Now you can pass `bidi_text` to any function that handles
# displaying/printing of the text, like writing it to PIL Image or passing
# it to a PDF generating method.
import re
DEFINED_CHARACTERS_ORGINAL_ALF_UPPER_MDD = u'\u0622'
DEFINED_CHARACTERS_ORGINAL_ALF_UPPER_HAMAZA = u'\u0623'
DEFINED_CHARACTERS_ORGINAL_ALF_LOWER_HAMAZA = u'\u0625'
DEFINED_CHARACTERS_ORGINAL_ALF = u'\u0627'
DEFINED_CHARACTERS_ORGINAL_LAM = u'\u0644'
LAM_ALEF_GLYPHS = [
[u'\u0622', u'\uFEF6', u'\uFEF5'],
[u'\u0623', u'\uFEF8', u'\uFEF7'],
[u'\u0627', u'\uFEFC', u'\uFEFB'],
[u'\u0625', u'\uFEFA', u'\uFEF9']
]
HARAKAT = [
u'\u0600', u'\u0601', u'\u0602', u'\u0603', u'\u0606', u'\u0607', u'\u0608', u'\u0609',
u'\u060A', u'\u060B', u'\u060D', u'\u060E', u'\u0610', u'\u0611', u'\u0612', u'\u0613',
u'\u0614', u'\u0615', u'\u0616', u'\u0617', u'\u0618', u'\u0619', u'\u061A', u'\u061B',
u'\u061E', u'\u061F', u'\u0621', u'\u063B', u'\u063C', u'\u063D', u'\u063E', u'\u063F',
u'\u0640', u'\u064B', u'\u064C', u'\u064D', u'\u064E', u'\u064F', u'\u0650', u'\u0651',
u'\u0652', u'\u0653', u'\u0654', u'\u0655', u'\u0656', u'\u0657', u'\u0658', u'\u0659',
u'\u065A', u'\u065B', u'\u065C', u'\u065D', u'\u065E', u'\u0660', u'\u066A', u'\u066B',
u'\u066C', u'\u066F', u'\u0670', u'\u0672', u'\u06D4', u'\u06D5', u'\u06D6', u'\u06D7',
u'\u06D8', u'\u06D9', u'\u06DA', u'\u06DB', u'\u06DC', u'\u06DF', u'\u06E0', u'\u06E1',
u'\u06E2', u'\u06E3', u'\u06E4', u'\u06E5', u'\u06E6', u'\u06E7', u'\u06E8', u'\u06E9',
u'\u06EA', u'\u06EB', u'\u06EC', u'\u06ED', u'\u06EE', u'\u06EF', u'\u06D6', u'\u06D7',
u'\u06D8', u'\u06D9', u'\u06DA', u'\u06DB', u'\u06DC', u'\u06DD', u'\u06DE', u'\u06DF',
u'\u06F0', u'\u06FD', u'\uFE70', u'\uFE71', u'\uFE72', u'\uFE73', u'\uFE74', u'\uFE75',
u'\uFE76', u'\uFE77', u'\uFE78', u'\uFE79', u'\uFE7A', u'\uFE7B', u'\uFE7C', u'\uFE7D',
u'\uFE7E', u'\uFE7F', u'\uFC5E', u'\uFC5F', u'\uFC60', u'\uFC61', u'\uFC62', u'\uFC63'
]
ARABIC_GLYPHS = {
u'\u0622': [u'\u0622', u'\uFE81', u'\uFE81', u'\uFE82', u'\uFE82', 2],
u'\u0623': [u'\u0623', u'\uFE83', u'\uFE83', u'\uFE84', u'\uFE84', 2],
u'\u0624': [u'\u0624', u'\uFE85', u'\uFE85', u'\uFE86', u'\uFE86', 2],
u'\u0625': [u'\u0625', u'\uFE87', u'\uFE87', u'\uFE88', u'\uFE88', 2],
u'\u0626': [u'\u0626', u'\uFE89', u'\uFE8B', u'\uFE8C', u'\uFE8A', 4],
u'\u0627': [u'\u0627', u'\u0627', u'\u0627', u'\uFE8E', u'\uFE8E', 2],
u'\u0628': [u'\u0628', u'\uFE8F', u'\uFE91', u'\uFE92', u'\uFE90', 4],
u'\u0629': [u'\u0629', u'\uFE93', u'\uFE93', u'\uFE94', u'\uFE94', 2],
u'\u062A': [u'\u062A', u'\uFE95', u'\uFE97', u'\uFE98', u'\uFE96', 4],
u'\u062B': [u'\u062B', u'\uFE99', u'\uFE9B', u'\uFE9C', u'\uFE9A', 4],
u'\u062C': [u'\u062C', u'\uFE9D', u'\uFE9F', u'\uFEA0', u'\uFE9E', 4],
u'\u062D': [u'\u062D', u'\uFEA1', u'\uFEA3', u'\uFEA4', u'\uFEA2', 4],
u'\u062E': [u'\u062E', u'\uFEA5', u'\uFEA7', u'\uFEA8', u'\uFEA6', 4],
u'\u062F': [u'\u062F', u'\uFEA9', u'\uFEA9', u'\uFEAA', u'\uFEAA', 2],
u'\u0630': [u'\u0630', u'\uFEAB', u'\uFEAB', u'\uFEAC', u'\uFEAC', 2],
u'\u0631': [u'\u0631', u'\uFEAD', u'\uFEAD', u'\uFEAE', u'\uFEAE', 2],
u'\u0632': [u'\u0632', u'\uFEAF', u'\uFEAF', u'\uFEB0', u'\uFEB0', 2],
u'\u0633': [u'\u0633', u'\uFEB1', u'\uFEB3', u'\uFEB4', u'\uFEB2', 4],
u'\u0634': [u'\u0634', u'\uFEB5', u'\uFEB7', u'\uFEB8', u'\uFEB6', 4],
u'\u0635': [u'\u0635', u'\uFEB9', u'\uFEBB', u'\uFEBC', u'\uFEBA', 4],
u'\u0636': [u'\u0636', u'\uFEBD', u'\uFEBF', u'\uFEC0', u'\uFEBE', 4],
u'\u0637': [u'\u0637', u'\uFEC1', u'\uFEC3', u'\uFEC4', u'\uFEC2', 4],
u'\u0638': [u'\u0638', u'\uFEC5', u'\uFEC7', u'\uFEC8', u'\uFEC6', 4],
u'\u0639': [u'\u0639', u'\uFEC9', u'\uFECB', u'\uFECC', u'\uFECA', 4],
u'\u063A': [u'\u063A', u'\uFECD', u'\uFECF', u'\uFED0', u'\uFECE', 4],
u'\u0641': [u'\u0641', u'\uFED1', u'\uFED3', u'\uFED4', u'\uFED2', 4],
u'\u0642': [u'\u0642', u'\uFED5', u'\uFED7', u'\uFED8', u'\uFED6', 4],
u'\u0643': [u'\u0643', u'\uFED9', u'\uFEDB', u'\uFEDC', u'\uFEDA', 4],
u'\u0644': [u'\u0644', u'\uFEDD', u'\uFEDF', u'\uFEE0', u'\uFEDE', 4],
u'\u0645': [u'\u0645', u'\uFEE1', u'\uFEE3', u'\uFEE4', u'\uFEE2', 4],
u'\u0646': [u'\u0646', u'\uFEE5', u'\uFEE7', u'\uFEE8', u'\uFEE6', 4],
u'\u0647': [u'\u0647', u'\uFEE9', u'\uFEEB', u'\uFEEC', u'\uFEEA', 4],
u'\u0648': [u'\u0648', u'\uFEED', u'\uFEED', u'\uFEEE', u'\uFEEE', 2],
u'\u0649': [u'\u0649', u'\uFEEF', u'\uFEEF', u'\uFEF0', u'\uFEF0', 2],
u'\u0671': [u'\u0671', u'\u0671', u'\u0671', u'\uFB51', u'\uFB51', 2],
u'\u064A': [u'\u064A', u'\uFEF1', u'\uFEF3', u'\uFEF4', u'\uFEF2', 4],
u'\u066E': [u'\u066E', u'\uFBE4', u'\uFBE8', u'\uFBE9', u'\uFBE5', 4],
u'\u06AA': [u'\u06AA', u'\uFB8E', u'\uFB90', u'\uFB91', u'\uFB8F', 4],
u'\u06C1': [u'\u06C1', u'\uFBA6', u'\uFBA8', u'\uFBA9', u'\uFBA7', 4],
u'\u06E4': [u'\u06E4', u'\u06E4', u'\u06E4', u'\u06E4', u'\uFEEE', 2],
u'\u067E': [u'\u067E', u'\uFB56', u'\uFB58', u'\uFB59', u'\uFB57', 4],
u'\u0698': [u'\u0698', u'\uFB8A', u'\uFB8A', u'\uFB8A', u'\uFB8B', 2],
u'\u06AF': [u'\u06AF', u'\uFB92', u'\uFB94', u'\uFB95', u'\uFB93', 4],
u'\u0686': [u'\u0686', u'\uFB7A', u'\uFB7C', u'\uFB7D', u'\uFB7B', 4],
u'\u06A9': [u'\u06A9', u'\uFB8E', u'\uFB90', u'\uFB91', u'\uFB8F', 4],
u'\u06CC': [u'\u06CC', u'\uFEEF', u'\uFEF3', u'\uFEF4', u'\uFEF0', 4]
}
ARABIC_GLYPHS_LIST = [
[u'\u0622', u'\uFE81', u'\uFE81', u'\uFE82', u'\uFE82', 2],
[u'\u0623', u'\uFE83', u'\uFE83', u'\uFE84', u'\uFE84', 2],
[u'\u0624', u'\uFE85', u'\uFE85', u'\uFE86', u'\uFE86', 2],
[u'\u0625', u'\uFE87', u'\uFE87', u'\uFE88', u'\uFE88', 2],
[u'\u0626', u'\uFE89', u'\uFE8B', u'\uFE8C', u'\uFE8A', 4],
[u'\u0627', u'\u0627', u'\u0627', u'\uFE8E', u'\uFE8E', 2],
[u'\u0628', u'\uFE8F', u'\uFE91', u'\uFE92', u'\uFE90', 4],
[u'\u0629', u'\uFE93', u'\uFE93', u'\uFE94', u'\uFE94', 2],
[u'\u062A', u'\uFE95', u'\uFE97', u'\uFE98', u'\uFE96', 4],
[u'\u062B', u'\uFE99', u'\uFE9B', u'\uFE9C', u'\uFE9A', 4],
[u'\u062C', u'\uFE9D', u'\uFE9F', u'\uFEA0', u'\uFE9E', 4],
[u'\u062D', u'\uFEA1', u'\uFEA3', u'\uFEA4', u'\uFEA2', 4],
[u'\u062E', u'\uFEA5', u'\uFEA7', u'\uFEA8', u'\uFEA6', 4],
[u'\u062F', u'\uFEA9', u'\uFEA9', u'\uFEAA', u'\uFEAA', 2],
[u'\u0630', u'\uFEAB', u'\uFEAB', u'\uFEAC', u'\uFEAC', 2],
[u'\u0631', u'\uFEAD', u'\uFEAD', u'\uFEAE', u'\uFEAE', 2],
[u'\u0632', u'\uFEAF', u'\uFEAF', u'\uFEB0', u'\uFEB0', 2],
[u'\u0633', u'\uFEB1', u'\uFEB3', u'\uFEB4', u'\uFEB2', 4],
[u'\u0634', u'\uFEB5', u'\uFEB7', u'\uFEB8', u'\uFEB6', 4],
[u'\u0635', u'\uFEB9', u'\uFEBB', u'\uFEBC', u'\uFEBA', 4],
[u'\u0636', u'\uFEBD', u'\uFEBF', u'\uFEC0', u'\uFEBE', 4],
[u'\u0637', u'\uFEC1', u'\uFEC3', u'\uFEC4', u'\uFEC2', 4],
[u'\u0638', u'\uFEC5', u'\uFEC7', u'\uFEC8', u'\uFEC6', 4],
[u'\u0639', u'\uFEC9', u'\uFECB', u'\uFECC', u'\uFECA', 4],
[u'\u063A', u'\uFECD', u'\uFECF', u'\uFED0', u'\uFECE', 4],
[u'\u0641', u'\uFED1', u'\uFED3', u'\uFED4', u'\uFED2', 4],
[u'\u0642', u'\uFED5', u'\uFED7', u'\uFED8', u'\uFED6', 4],
[u'\u0643', u'\uFED9', u'\uFEDB', u'\uFEDC', u'\uFEDA', 4],
[u'\u0644', u'\uFEDD', u'\uFEDF', u'\uFEE0', u'\uFEDE', 4],
[u'\u0645', u'\uFEE1', u'\uFEE3', u'\uFEE4', u'\uFEE2', 4],
[u'\u0646', u'\uFEE5', u'\uFEE7', u'\uFEE8', u'\uFEE6', 4],
[u'\u0647', u'\uFEE9', u'\uFEEB', u'\uFEEC', u'\uFEEA', 4],
[u'\u0648', u'\uFEED', u'\uFEED', u'\uFEEE', u'\uFEEE', 2],
[u'\u0649', u'\uFEEF', u'\uFEEF', u'\uFEF0', u'\uFEF0', 2],
[u'\u0671', u'\u0671', u'\u0671', u'\uFB51', u'\uFB51', 2],
[u'\u064A', u'\uFEF1', u'\uFEF3', u'\uFEF4', u'\uFEF2', 4],
[u'\u066E', u'\uFBE4', u'\uFBE8', u'\uFBE9', u'\uFBE5', 4],
[u'\u06AA', u'\uFB8E', u'\uFB90', u'\uFB91', u'\uFB8F', 4],
[u'\u06C1', u'\uFBA6', u'\uFBA8', u'\uFBA9', u'\uFBA7', 4],
[u'\u067E', u'\uFB56', u'\uFB58', u'\uFB59', u'\uFB57', 4],
[u'\u0698', u'\uFB8A', u'\uFB8A', u'\uFB8A', u'\uFB8B', 2],
[u'\u06AF', u'\uFB92', u'\uFB94', u'\uFB95', u'\uFB93', 4],
[u'\u0686', u'\uFB7A', u'\uFB7C', u'\uFB7D', u'\uFB7B', 4],
[u'\u06A9', u'\uFB8E', u'\uFB90', u'\uFB91', u'\uFB8F', 4],
[u'\u06CC', u'\uFEEF', u'\uFEF3', u'\uFEF4', u'\uFEF0', 4]
]
def get_reshaped_glyph(target, location):
if target in ARABIC_GLYPHS:
return ARABIC_GLYPHS[target][location]
else:
return target
def get_glyph_type(target):
if target in ARABIC_GLYPHS:
return ARABIC_GLYPHS[target][5]
else:
return 2
def is_haraka(target):
return target in HARAKAT
def replace_jalalah(unshaped_word):
return re.sub(u'^\u0627\u0644\u0644\u0647$', u'\uFDF2', unshaped_word)
def replace_lam_alef(unshaped_word):
list_word = list(unshaped_word)
letter_before = u''
for i in range(len(unshaped_word)):
if not is_haraka(unshaped_word[i]) and unshaped_word[i] != DEFINED_CHARACTERS_ORGINAL_LAM:
letter_before = unshaped_word[i]
if unshaped_word[i] == DEFINED_CHARACTERS_ORGINAL_LAM:
candidate_lam = unshaped_word[i]
lam_position = i
haraka_position = i + 1
while haraka_position < len(unshaped_word) and is_haraka(unshaped_word[haraka_position]):
haraka_position += 1
if haraka_position < len(unshaped_word):
if lam_position > 0 and get_glyph_type(letter_before) > 2:
lam_alef = get_lam_alef(
list_word[haraka_position], candidate_lam, False)
else:
lam_alef = get_lam_alef(
list_word[haraka_position], candidate_lam, True)
if lam_alef != '':
list_word[lam_position] = lam_alef
list_word[haraka_position] = u' '
return u''.join(list_word).replace(u' ', u'')
def get_lam_alef(candidate_alef, candidate_lam, is_end_of_word):
shift_rate = 1
reshaped_lam_alef = u''
if is_end_of_word:
shift_rate += 1
if DEFINED_CHARACTERS_ORGINAL_LAM == candidate_lam:
if DEFINED_CHARACTERS_ORGINAL_ALF_UPPER_MDD == candidate_alef:
reshaped_lam_alef = LAM_ALEF_GLYPHS[0][shift_rate]
if DEFINED_CHARACTERS_ORGINAL_ALF_UPPER_HAMAZA == candidate_alef:
reshaped_lam_alef = LAM_ALEF_GLYPHS[1][shift_rate]
if DEFINED_CHARACTERS_ORGINAL_ALF == candidate_alef:
reshaped_lam_alef = LAM_ALEF_GLYPHS[2][shift_rate]
if DEFINED_CHARACTERS_ORGINAL_ALF_LOWER_HAMAZA == candidate_alef:
reshaped_lam_alef = LAM_ALEF_GLYPHS[3][shift_rate]
return reshaped_lam_alef
class DecomposedWord(object):
def __init__(self, word):
self.stripped_harakat = []
self.harakat_positions = []
self.stripped_regular_letters = []
self.letters_position = []
for i in range(len(word)):
c = word[i]
if is_haraka(c):
self.harakat_positions.append(i)
self.stripped_harakat.append(c)
else:
self.letters_position.append(i)
self.stripped_regular_letters.append(c)
def reconstruct_word(self, reshaped_word):
l = list(u'\0' * (len(self.stripped_harakat) + len(reshaped_word)))
for i in range(len(self.letters_position)):
l[self.letters_position[i]] = reshaped_word[i]
for i in range(len(self.harakat_positions)):
l[self.harakat_positions[i]] = self.stripped_harakat[i]
return u''.join(l)
def get_reshaped_word(unshaped_word):
unshaped_word = replace_jalalah(unshaped_word)
unshaped_word = replace_lam_alef(unshaped_word)
decomposed_word = DecomposedWord(unshaped_word)
result = u''
if decomposed_word.stripped_regular_letters:
result = reshape_it(u''.join(decomposed_word.stripped_regular_letters))
return decomposed_word.reconstruct_word(result)
def reshape_it(unshaped_word):
if not unshaped_word:
return u''
if len(unshaped_word) == 1:
return get_reshaped_glyph(unshaped_word[0], 1)
reshaped_word = []
for i in range(len(unshaped_word)):
before = False
after = False
if i == 0:
after = get_glyph_type(unshaped_word[i]) == 4
elif i == len(unshaped_word) - 1:
before = get_glyph_type(unshaped_word[i - 1]) == 4
else:
after = get_glyph_type(unshaped_word[i]) == 4
before = get_glyph_type(unshaped_word[i - 1]) == 4
if after and before:
reshaped_word.append(get_reshaped_glyph(unshaped_word[i], 3))
elif after and not before:
reshaped_word.append(get_reshaped_glyph(unshaped_word[i], 2))
elif not after and before:
reshaped_word.append(get_reshaped_glyph(unshaped_word[i], 4))
elif not after and not before:
reshaped_word.append(get_reshaped_glyph(unshaped_word[i], 1))
return u''.join(reshaped_word)
def is_arabic_character(target):
return target in ARABIC_GLYPHS or target in HARAKAT
def get_words(sentence):
if sentence:
return re.split('\\s', sentence)
return []
def has_arabic_letters(word):
for c in word:
if is_arabic_character(c):
return True
return False
def is_arabic_word(word):
for c in word:
if not is_arabic_character(c):
return False
return True
def get_words_from_mixed_word(word):
temp_word = u''
words = []
for c in word:
if is_arabic_character(c):
if temp_word and not is_arabic_word(temp_word):
words.append(temp_word)
temp_word = c
else:
temp_word += c
else:
if temp_word and is_arabic_word(temp_word):
words.append(temp_word)
temp_word = c
else:
temp_word += c
if temp_word:
words.append(temp_word)
return words
def reshape(text):
if text:
lines = re.split('\\r?\\n', text)
for i in range(len(lines)):
lines[i] = reshape_sentence(lines[i])
return u'\n'.join(lines)
return u''
def reshape_sentence(sentence):
words = get_words(sentence)
for i in range(len(words)):
word = words[i]
if has_arabic_letters(word):
if is_arabic_word(word):
words[i] = get_reshaped_word(word)
else:
mixed_words = get_words_from_mixed_word(word)
for j in range(len(mixed_words)):
mixed_words[j] = get_reshaped_word(mixed_words[j])
words[i] = u''.join(mixed_words)
return u' '.join(words)
|
from utils import sync
def firestore(event, context):
"""Triggered by a change to a Firestore document.
Args:
event (dict): Event payload.
context (google.cloud.functions.Context): Metadata for the event.
"""
resource_string = context.resource
# print out the resource string that triggered the function
print(f"Function triggered by change to: {resource_string}.")
# now sync the data
sync(event) |
import mailbox
mbox = mailbox.Maildir('Example')
for message in mbox:
print(message['subject'])
|
# MIT License
#
# Copyright (c) 2016 David Sandberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# from __future__ import absolute_import
# from __future__ import division
# from __future__ import print_function
"""
export pb model for inference
python srez_freeze_graph.py ./checkpoint ./good_model.pb
"""
import os
import sys
path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.dirname(path))
sys.path[0], sys.path[-1] = sys.path[-1], sys.path[0]
print(sys.path)
import numpy as np
import srez_model
import tensorflow as tf
from tensorflow.python.framework import graph_util
import argparse
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = ""
def get_model_filenames(model_dir):
files = os.listdir(model_dir)
# meta_files = [s for s in files if s.endswith('.meta')]
# if len(meta_files) == 0:
# raise ValueError('No meta file found in the model directory (%s)' % model_dir)
# elif len(meta_files) > 1:
# raise ValueError('There should not be more than one meta file in the model directory (%s)' % model_dir)
# meta_file = meta_files[0]
ckpt = tf.train.get_checkpoint_state(model_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_file = os.path.basename(ckpt.model_checkpoint_path)
meta_file = ckpt_file + '.meta'
return meta_file, ckpt_file
meta_files = [s for s in files if s.endswith('.meta')]
max_step = -1
for f in meta_files:
step = int(f.split('.')[0].split('_')[-1])
if step > max_step:
max_step = step
meta_file = f
ckpt_file = f.split('.')[0] + '.ckpt'
return meta_file, ckpt_file
def construct_model(sess):
test_labels = np.random.rand(16,64,64,3)
test_features = tf.placeholder(tf.float32, shape=[16,16,16, 3])
test_labels = tf.placeholder(tf.float32, shape=[16,64,64, 3])
in_tensor, out_tensor = srez_model.create_inference_model(sess,test_features, test_labels)
def main(args):
with tf.Graph().as_default():
with tf.Session() as sess:
# Load the model metagraph and checkpoint
print('Model directory: %s' % args.model_dir)
model_dir_exp = os.path.expanduser(args.model_dir)
meta_file, ckpt_file = get_model_filenames(os.path.expanduser(args.model_dir))
print('Metagraph file: %s' % meta_file)
print('Checkpoint file: %s' % ckpt_file)
construct_model(sess)
# sess.run(tf.global_variables_initializer())
# sess.run(tf.local_variables_initializer())
saver = tf.train.Saver()
saver.restore(sess, os.path.join(model_dir_exp, ckpt_file))
# Retrieve the protobuf graph definition and fix the batch norm nodes
input_graph_def = sess.graph.as_graph_def()
# Freeze the graph def
output_graph_def = freeze_graph_def(sess, input_graph_def, 'gene_output')
# Serialize and dump the output graph to the filesystem
with tf.gfile.GFile(args.output_file, 'wb') as f:
f.write(output_graph_def.SerializeToString())
print("%d ops in the final graph: %s" % (len(output_graph_def.node), args.output_file))
def freeze_graph_def(sess, input_graph_def, output_node_names):
'''
Error when loading the frozen graph with tensorflow.contrib.layers.python.layers.batch_norm
ValueError: graph_def is invalid at node u'BatchNorm/cond/AssignMovingAvg/Switch': Input tensor 'BatchNorm/moving_mean:0' Cannot convert a tensor of type float32 to an input of type float32_ref
freeze_graph.py doesn't seem to store moving_mean and moving_variance properly
An ugly way to get it working:
manually replace the wrong node definitions in the frozen graph
RefSwitch --> Switch + add '/read' to the input names
AssignSub --> Sub + remove use_locking attributes
'''
for node in input_graph_def.node:
if node.op == 'RefSwitch':
node.op = 'Switch'
for index in range(len(node.input)):
if 'moving_' in node.input[index] and "Switch" not in node.input[index]:
node.input[index] = node.input[index] + '/read'
elif node.op == 'AssignSub':
node.op = 'Sub'
if 'use_locking' in node.attr: del node.attr['use_locking']
elif node.op == 'AssignAdd':
node.op = 'Add'
if 'use_locking' in node.attr: del node.attr['use_locking']
elif node.op == 'Assign':
node.op = 'Identity'
if 'use_locking' in node.attr: del node.attr['use_locking']
if 'validate_shape' in node.attr: del node.attr['validate_shape']
if len(node.input) == 2:
# input0: ref: Should be from a Variable node. May be uninitialized.
# input1: value: The value to be assigned to the variable.
node.input[0] = node.input[1]
del node.input[1]
# Get the list of important nodes
whitelist_names = []
for node in input_graph_def.node:
if (node.name.startswith('MobileFaceNet') or node.name.startswith('embeddings')):
whitelist_names.append(node.name)
# Replace all the variables in the graph with constants of the same values
output_graph_def = graph_util.convert_variables_to_constants(
sess, input_graph_def, output_node_names.split(","),
variable_names_whitelist=None)
return output_graph_def
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('model_dir', type=str,
help='Directory containing the metagraph (.meta) file and the checkpoint (ckpt) file containing model parameters')
parser.add_argument('output_file', type=str,
help='Filename for the exported graphdef protobuf (.pb)')
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
|
import pytest
from .test_utils import *
@pytest.fixture
def noop_fixture():
pass
@pytest.fixture
def rootdir():
return get_current_dir()
@pytest.fixture
def sqlalchemy_conn():
engine = get_sqlalchemy_mysql_engine()
conn = engine.connect()
conn.execute("use %s" % test_config["MySQLTestSchema"])
yield conn
conn.close()
@pytest.fixture
def pymysql_conn():
conn = get_pymysql_conn()
yield conn
conn.close()
@pytest.fixture
def sqlite_in_conn():
conn = get_sqlite_in_conn()
yield conn
conn.close()
@pytest.fixture
def sqlite_out_conn():
conn = get_sqlite_out_conn()
yield conn
conn.close()
|
# -*- coding: utf-8 -*-
from rest_framework import mixins, viewsets
from rest_framework.authentication import SessionAuthentication
from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework.status import HTTP_400_BAD_REQUEST, HTTP_200_OK
from super_notifications.models import Notification, NotificationType
from super_notifications.app_settings import NotificationSerializer
class NotificationsViewSet(mixins.ListModelMixin, viewsets.GenericViewSet):
"""
This viewset returns the list of documents from an agency, and do CRUD operations on a document.
"""
queryset = Notification.objects.all()
# TODO : useful ? load different templates in serializer or choose different serializers
# depending on the nf_type
serializer_class = NotificationSerializer
authentication_classes = [SessionAuthentication]
lookup_field = 'pk'
def get_queryset(self):
queryset = self.queryset.unread().filter(recipient=self.request.user)
return queryset
def filter_queryset(self, queryset):
if 'flag' in self.request.query_params:
flag = self.request.query_params.get('flag')
last_notification = int(flag) if flag.isdigit() else None
if last_notification:
new_queryset = queryset.filter(id__gt=last_notification).active().prefetch()
return new_queryset
return queryset
@action(methods=['post'], detail=False, authentication_classes=[SessionAuthentication])
def mark(self, request):
notification_id = request.data.get('id', None)
action = request.data.get('action', None)
success = True
if notification_id:
try:
notification = Notification.objects.get(pk=notification_id,
recipient=request.user)
notification_type = NotificationType.objects.get(label=notification.nf_type, manual=True)
if action == 'read':
notification.mark_as_read()
msg = "Marked as read"
elif action == 'unread':
notification.mark_as_unread()
msg = "Marked as unread"
else:
success = False
msg = "Invalid mark action."
except Notification.DoesNotExist:
success = False
msg = "Notification does not exists."
except NotificationType.DoesNotExist:
success = False
msg = "Notification type not readable manually"
else:
success = False
msg = "Invalid Notification ID"
ctx = {'message': msg, 'action': action}
if not success:
return Response(ctx, status=HTTP_400_BAD_REQUEST)
else:
return Response(ctx, status=HTTP_200_OK)
# @action(methods=['get'], detail=False, authentication_classes=[SessionAuthentication])
# def live(self, request):
# """
# Handles live updating of notifications, follows ajax-polling approach.
#
# Read more: http://stackoverflow.com/a/12855533/4726598
#
# Required URL parameters: ``flag``.
#
# Explanation:
#
# - The ``flag`` parameter carries the last notification ID \
# received by the user's browser.
#
# - This ``flag`` is most likely to be generated by using \
# a simple JS/JQuery DOM. Just grab the first element of \
# the notification list.
#
# - The element will have a ``data-id`` attribute set to the \
# corresponding notification.
# - We'll use it's value as the flag parameter.
#
# - The view treats the ``last notification flag`` as a model \
# ```filter()`` and fetches all notifications greater than \
# the flag for the user.
#
# - Then the a JSON data is prepared with all necessary \
# details such as, ``verb``, ``actor``, ``target`` and their \
# URL etc. The foreignkey are serialized as their \
# default ``__str__`` value.
#
# - Everything will be HTML escaped by django's ``escape()``.
#
# - Since these notification sent will only serve temporarily \
# on the notification box and will be generated fresh \
# using a whole template, to avoid client-side notification \
# generation using the JSON data, the JSON data will also \
# contain a rendered HTML string so that you can easily \
# do a JQuery ``$yourNotificationBox.prepend()`` on the \
# rendered html string of the notification.
#
# - The template used is expected to be different than the \
# template used in full page notification as the css \
# and some other elements are highly likely to be \
# different than the full page notification list. \
#
# - The template used will be the ``notification type`` of the \
# notification suffixed ``_box.html``. So, if your \
# notification type is ``comment_reply``, the template \
# will be ``comment_reply_box.html``.
#
# - This template will be stored in ``notifications/includes/`` \
# of your template directory.
#
# - That makes: ``notifications/includes/comment_reply_box.html``
#
# - The rest is self-explanatory.
#
# :param request: HTTP request context.
#
# :return: Notification updates (if any) in JSON format.
# """
# flag = request.GET.get('flag', None)
# last_notification = int(flag) if flag.isdigit() else None
#
# if last_notification:
#
# new_notifications = request.user.notifications.filter(
# id__gt=last_notification).active().prefetch()
#
# msg = "Notifications successfully retrieved." if new_notifications else "No new notifications."
# notification_list = []
# for nf in new_notifications:
# # TODO: use Serializer -> NotificationSerializer instead of as_json
# notification = nf.as_json()
# notification_list.append(notification)
#
# ctx = {
# "retrieved": len(new_notifications),
# "unread_count": request.user.notifications.unread().count(),
# "notifications": notification_list,
# "success": True,
# "msg": msg,
# }
#
# return JsonResponse(ctx)
#
# else:
# msg = "Notification flag not sent."
#
# ctx = {"success": False, "msg": msg}
# return JsonResponse(ctx) |
# Copyright 2017, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import collections
import functools
import itertools
import logging
import threading
import uuid
import grpc
import six
from google.api_core import bidi
from google.api_core import exceptions
from google.cloud.pubsub_v1 import types
from google.cloud.pubsub_v1.subscriber._protocol import dispatcher
from google.cloud.pubsub_v1.subscriber._protocol import heartbeater
from google.cloud.pubsub_v1.subscriber._protocol import histogram
from google.cloud.pubsub_v1.subscriber._protocol import leaser
from google.cloud.pubsub_v1.subscriber._protocol import messages_on_hold
from google.cloud.pubsub_v1.subscriber._protocol import requests
import google.cloud.pubsub_v1.subscriber.message
import google.cloud.pubsub_v1.subscriber.scheduler
from google.pubsub_v1 import types as gapic_types
_LOGGER = logging.getLogger(__name__)
_RPC_ERROR_THREAD_NAME = "Thread-OnRpcTerminated"
_RETRYABLE_STREAM_ERRORS = (
exceptions.DeadlineExceeded,
exceptions.ServiceUnavailable,
exceptions.InternalServerError,
exceptions.Unknown,
exceptions.GatewayTimeout,
exceptions.Aborted,
)
_TERMINATING_STREAM_ERRORS = (exceptions.Cancelled,)
_MAX_LOAD = 1.0
"""The load threshold above which to pause the incoming message stream."""
_RESUME_THRESHOLD = 0.8
"""The load threshold below which to resume the incoming message stream."""
def _wrap_as_exception(maybe_exception):
"""Wrap an object as a Python exception, if needed.
Args:
maybe_exception (Any): The object to wrap, usually a gRPC exception class.
Returns:
The argument itself if an instance of ``BaseException``, otherwise
the argument represented as an instance of ``Exception`` (sub)class.
"""
if isinstance(maybe_exception, grpc.RpcError):
return exceptions.from_grpc_error(maybe_exception)
elif isinstance(maybe_exception, BaseException):
return maybe_exception
return Exception(maybe_exception)
def _wrap_callback_errors(callback, on_callback_error, message):
"""Wraps a user callback so that if an exception occurs the message is
nacked.
Args:
callback (Callable[None, Message]): The user callback.
message (~Message): The Pub/Sub message.
"""
try:
callback(message)
except Exception as exc:
# Note: the likelihood of this failing is extremely low. This just adds
# a message to a queue, so if this doesn't work the world is in an
# unrecoverable state and this thread should just bail.
_LOGGER.exception(
"Top-level exception occurred in callback while processing a message"
)
message.nack()
on_callback_error(exc)
class StreamingPullManager(object):
"""The streaming pull manager coordinates pulling messages from Pub/Sub,
leasing them, and scheduling them to be processed.
Args:
client (~.pubsub_v1.subscriber.client): The subscriber client used
to create this instance.
subscription (str): The name of the subscription. The canonical
format for this is
``projects/{project}/subscriptions/{subscription}``.
flow_control (~google.cloud.pubsub_v1.types.FlowControl): The flow
control settings.
use_legacy_flow_control (bool): Disables enforcing flow control settings
at the Cloud PubSub server and uses the less accurate method of only
enforcing flow control at the client side.
scheduler (~google.cloud.pubsub_v1.scheduler.Scheduler): The scheduler
to use to process messages. If not provided, a thread pool-based
scheduler will be used.
"""
def __init__(
self,
client,
subscription,
flow_control=types.FlowControl(),
scheduler=None,
use_legacy_flow_control=False,
):
self._client = client
self._subscription = subscription
self._flow_control = flow_control
self._use_legacy_flow_control = use_legacy_flow_control
self._ack_histogram = histogram.Histogram()
self._last_histogram_size = 0
self._ack_deadline = 10
self._rpc = None
self._callback = None
self._closing = threading.Lock()
self._closed = False
self._close_callbacks = []
# Generate a random client id tied to this object. All streaming pull
# connections (initial and re-connects) will then use the same client
# id. Doing so lets the server establish affinity even across stream
# disconncetions.
self._client_id = str(uuid.uuid4())
if scheduler is None:
self._scheduler = (
google.cloud.pubsub_v1.subscriber.scheduler.ThreadScheduler()
)
else:
self._scheduler = scheduler
# A collection for the messages that have been received from the server,
# but not yet sent to the user callback.
self._messages_on_hold = messages_on_hold.MessagesOnHold()
# The total number of bytes consumed by the messages currently on hold
self._on_hold_bytes = 0
# A lock ensuring that pausing / resuming the consumer are both atomic
# operations that cannot be executed concurrently. Needed for properly
# syncing these operations with the current leaser load. Additionally,
# the lock is used to protect modifications of internal data that
# affects the load computation, i.e. the count and size of the messages
# currently on hold.
self._pause_resume_lock = threading.Lock()
# The threads created in ``.open()``.
self._dispatcher = None
self._leaser = None
self._consumer = None
self._heartbeater = None
@property
def is_active(self):
"""bool: True if this manager is actively streaming.
Note that ``False`` does not indicate this is complete shut down,
just that it stopped getting new messages.
"""
return self._consumer is not None and self._consumer.is_active
@property
def flow_control(self):
"""google.cloud.pubsub_v1.types.FlowControl: The active flow control
settings."""
return self._flow_control
@property
def dispatcher(self):
"""google.cloud.pubsub_v1.subscriber._protocol.dispatcher.Dispatcher:
The dispatcher helper.
"""
return self._dispatcher
@property
def leaser(self):
"""google.cloud.pubsub_v1.subscriber._protocol.leaser.Leaser:
The leaser helper.
"""
return self._leaser
@property
def ack_histogram(self):
"""google.cloud.pubsub_v1.subscriber._protocol.histogram.Histogram:
The histogram tracking time-to-acknowledge.
"""
return self._ack_histogram
@property
def ack_deadline(self):
"""Return the current ack deadline based on historical time-to-ack.
This method is "sticky". It will only perform the computations to
check on the right ack deadline if the histogram has gained a
significant amount of new information.
Returns:
int: The ack deadline.
"""
target_size = min(
self._last_histogram_size * 2, self._last_histogram_size + 100
)
hist_size = len(self.ack_histogram)
if hist_size > target_size:
self._last_histogram_size = hist_size
self._ack_deadline = self.ack_histogram.percentile(percent=99)
if self.flow_control.max_duration_per_lease_extension > 0:
self._ack_deadline = min(
self._ack_deadline, self.flow_control.max_duration_per_lease_extension
)
return self._ack_deadline
@property
def load(self):
"""Return the current load.
The load is represented as a float, where 1.0 represents having
hit one of the flow control limits, and values between 0.0 and 1.0
represent how close we are to them. (0.5 means we have exactly half
of what the flow control setting allows, for example.)
There are (currently) two flow control settings; this property
computes how close the manager is to each of them, and returns
whichever value is higher. (It does not matter that we have lots of
running room on setting A if setting B is over.)
Returns:
float: The load value.
"""
if self._leaser is None:
return 0.0
# Messages that are temporarily put on hold are not being delivered to
# user's callbacks, thus they should not contribute to the flow control
# load calculation.
# However, since these messages must still be lease-managed to avoid
# unnecessary ACK deadline expirations, their count and total size must
# be subtracted from the leaser's values.
return max(
[
(self._leaser.message_count - self._messages_on_hold.size)
/ self._flow_control.max_messages,
(self._leaser.bytes - self._on_hold_bytes)
/ self._flow_control.max_bytes,
]
)
def add_close_callback(self, callback):
"""Schedules a callable when the manager closes.
Args:
callback (Callable): The method to call.
"""
self._close_callbacks.append(callback)
def activate_ordering_keys(self, ordering_keys):
"""Send the next message in the queue for each of the passed-in
ordering keys, if they exist. Clean up state for keys that no longer
have any queued messages.
Since the load went down by one message, it's probably safe to send the
user another message for the same key. Since the released message may be
bigger than the previous one, this may increase the load above the maximum.
This decision is by design because it simplifies MessagesOnHold.
Args:
ordering_keys(Sequence[str]): A sequence of ordering keys to
activate. May be empty.
"""
with self._pause_resume_lock:
if self._scheduler is None:
return # We are shutting down, don't try to dispatch any more messages.
self._messages_on_hold.activate_ordering_keys(
ordering_keys, self._schedule_message_on_hold
)
def maybe_pause_consumer(self):
"""Check the current load and pause the consumer if needed."""
with self._pause_resume_lock:
if self.load >= _MAX_LOAD:
if self._consumer is not None and not self._consumer.is_paused:
_LOGGER.debug(
"Message backlog over load at %.2f, pausing.", self.load
)
self._consumer.pause()
def maybe_resume_consumer(self):
"""Check the load and held messages and resume the consumer if needed.
If there are messages held internally, release those messages before
resuming the consumer. That will avoid leaser overload.
"""
with self._pause_resume_lock:
# If we have been paused by flow control, check and see if we are
# back within our limits.
#
# In order to not thrash too much, require us to have passed below
# the resume threshold (80% by default) of each flow control setting
# before restarting.
if self._consumer is None or not self._consumer.is_paused:
return
_LOGGER.debug("Current load: %.2f", self.load)
# Before maybe resuming the background consumer, release any messages
# currently on hold, if the current load allows for it.
self._maybe_release_messages()
if self.load < _RESUME_THRESHOLD:
_LOGGER.debug("Current load is %.2f, resuming consumer.", self.load)
self._consumer.resume()
else:
_LOGGER.debug("Did not resume, current load is %.2f.", self.load)
def _maybe_release_messages(self):
"""Release (some of) the held messages if the current load allows for it.
The method tries to release as many messages as the current leaser load
would allow. Each released message is added to the lease management,
and the user callback is scheduled for it.
If there are currently no messages on hold, or if the leaser is
already overloaded, this method is effectively a no-op.
The method assumes the caller has acquired the ``_pause_resume_lock``.
"""
released_ack_ids = []
while self.load < _MAX_LOAD:
msg = self._messages_on_hold.get()
if not msg:
break
self._schedule_message_on_hold(msg)
released_ack_ids.append(msg.ack_id)
self._leaser.start_lease_expiry_timer(released_ack_ids)
def _schedule_message_on_hold(self, msg):
"""Schedule a message on hold to be sent to the user and change
on-hold-bytes.
The method assumes the caller has acquired the ``_pause_resume_lock``.
Args:
msg (google.cloud.pubsub_v1.message.Message): The message to
schedule to be sent to the user.
"""
assert msg, "Message must not be None."
# On-hold bytes goes down, increasing load.
self._on_hold_bytes -= msg.size
if self._on_hold_bytes < 0:
_LOGGER.warning(
"On hold bytes was unexpectedly negative: %s", self._on_hold_bytes
)
self._on_hold_bytes = 0
_LOGGER.debug(
"Released held message, scheduling callback for it, "
"still on hold %s (bytes %s).",
self._messages_on_hold.size,
self._on_hold_bytes,
)
self._scheduler.schedule(self._callback, msg)
def _send_unary_request(self, request):
"""Send a request using a separate unary request instead of over the
stream.
Args:
request (gapic_types.StreamingPullRequest): The stream request to be
mapped into unary requests.
"""
if request.ack_ids:
self._client.acknowledge(
subscription=self._subscription, ack_ids=list(request.ack_ids)
)
if request.modify_deadline_ack_ids:
# Send ack_ids with the same deadline seconds together.
deadline_to_ack_ids = collections.defaultdict(list)
for n, ack_id in enumerate(request.modify_deadline_ack_ids):
deadline = request.modify_deadline_seconds[n]
deadline_to_ack_ids[deadline].append(ack_id)
for deadline, ack_ids in six.iteritems(deadline_to_ack_ids):
self._client.modify_ack_deadline(
subscription=self._subscription,
ack_ids=ack_ids,
ack_deadline_seconds=deadline,
)
_LOGGER.debug("Sent request(s) over unary RPC.")
def send(self, request):
"""Queue a request to be sent to the RPC.
If a RetryError occurs, the manager shutdown is triggered, and the
error is re-raised.
"""
try:
self._send_unary_request(request)
except exceptions.GoogleAPICallError:
_LOGGER.debug(
"Exception while sending unary RPC. This is typically "
"non-fatal as stream requests are best-effort.",
exc_info=True,
)
except exceptions.RetryError as exc:
_LOGGER.debug(
"RetryError while sending unary RPC. Waiting on a transient "
"error resolution for too long, will now trigger shutdown.",
exc_info=False,
)
# The underlying channel has been suffering from a retryable error
# for too long, time to give up and shut the streaming pull down.
self._on_rpc_done(exc)
raise
def heartbeat(self):
"""Sends an empty request over the streaming pull RPC.
Returns:
bool: If a heartbeat request has actually been sent.
"""
if self._rpc is not None and self._rpc.is_active:
self._rpc.send(gapic_types.StreamingPullRequest())
return True
return False
def open(self, callback, on_callback_error):
"""Begin consuming messages.
Args:
callback (Callable[None, google.cloud.pubsub_v1.message.Message]):
A callback that will be called for each message received on the
stream.
on_callback_error (Callable[Exception]):
A callable that will be called if an exception is raised in
the provided `callback`.
"""
if self.is_active:
raise ValueError("This manager is already open.")
if self._closed:
raise ValueError("This manager has been closed and can not be re-used.")
self._callback = functools.partial(
_wrap_callback_errors, callback, on_callback_error
)
# Create the RPC
stream_ack_deadline_seconds = self.ack_histogram.percentile(99)
get_initial_request = functools.partial(
self._get_initial_request, stream_ack_deadline_seconds
)
self._rpc = bidi.ResumableBidiRpc(
start_rpc=self._client.api.streaming_pull,
initial_request=get_initial_request,
should_recover=self._should_recover,
should_terminate=self._should_terminate,
throttle_reopen=True,
)
self._rpc.add_done_callback(self._on_rpc_done)
_LOGGER.debug(
"Creating a stream, default ACK deadline set to {} seconds.".format(
stream_ack_deadline_seconds
)
)
# Create references to threads
self._dispatcher = dispatcher.Dispatcher(self, self._scheduler.queue)
self._consumer = bidi.BackgroundConsumer(self._rpc, self._on_response)
self._leaser = leaser.Leaser(self)
self._heartbeater = heartbeater.Heartbeater(self)
# Start the thread to pass the requests.
self._dispatcher.start()
# Start consuming messages.
self._consumer.start()
# Start the lease maintainer thread.
self._leaser.start()
# Start the stream heartbeater thread.
self._heartbeater.start()
def close(self, reason=None, await_msg_callbacks=False):
"""Stop consuming messages and shutdown all helper threads.
This method is idempotent. Additional calls will have no effect.
Args:
reason (Any): The reason to close this. If None, this is considered
an "intentional" shutdown. This is passed to the callbacks
specified via :meth:`add_close_callback`.
await_msg_callbacks (bool):
If ``True``, the method will wait until all scheduler threads terminate
and only then proceed with the shutdown with the remaining shutdown
tasks,
If ``False`` (default), the method will shut down the scheduler in a
non-blocking fashion, i.e. it will not wait for the currently executing
scheduler threads to terminate.
"""
with self._closing:
if self._closed:
return
# Stop consuming messages.
if self.is_active:
_LOGGER.debug("Stopping consumer.")
self._consumer.stop()
self._consumer = None
# Shutdown all helper threads
_LOGGER.debug("Stopping scheduler.")
dropped_messages = self._scheduler.shutdown(
await_msg_callbacks=await_msg_callbacks
)
self._scheduler = None
# Leaser and dispatcher reference each other through the shared
# StreamingPullManager instance, i.e. "self", thus do not set their
# references to None until both have been shut down.
#
# NOTE: Even if the dispatcher operates on an inactive leaser using
# the latter's add() and remove() methods, these have no impact on
# the stopped leaser (the leaser is never again re-started). Ditto
# for the manager's maybe_resume_consumer() / maybe_pause_consumer(),
# because the consumer gets shut down first.
_LOGGER.debug("Stopping leaser.")
self._leaser.stop()
total = len(dropped_messages) + len(
self._messages_on_hold._messages_on_hold
)
_LOGGER.debug(f"NACK-ing all not-yet-dispatched messages (total: {total}).")
messages_to_nack = itertools.chain(
dropped_messages, self._messages_on_hold._messages_on_hold
)
for msg in messages_to_nack:
msg.nack()
_LOGGER.debug("Stopping dispatcher.")
self._dispatcher.stop()
self._dispatcher = None
# dispatcher terminated, OK to dispose the leaser reference now
self._leaser = None
_LOGGER.debug("Stopping heartbeater.")
self._heartbeater.stop()
self._heartbeater = None
self._rpc = None
self._closed = True
_LOGGER.debug("Finished stopping manager.")
for callback in self._close_callbacks:
callback(self, reason)
def _get_initial_request(self, stream_ack_deadline_seconds):
"""Return the initial request for the RPC.
This defines the initial request that must always be sent to Pub/Sub
immediately upon opening the subscription.
Args:
stream_ack_deadline_seconds (int):
The default message acknowledge deadline for the stream.
Returns:
google.pubsub_v1.types.StreamingPullRequest: A request
suitable for being the first request on the stream (and not
suitable for any other purpose).
"""
# Any ack IDs that are under lease management need to have their
# deadline extended immediately.
if self._leaser is not None:
# Explicitly copy the list, as it could be modified by another
# thread.
lease_ids = list(self._leaser.ack_ids)
else:
lease_ids = []
# Put the request together.
request = gapic_types.StreamingPullRequest(
modify_deadline_ack_ids=list(lease_ids),
modify_deadline_seconds=[self.ack_deadline] * len(lease_ids),
stream_ack_deadline_seconds=stream_ack_deadline_seconds,
subscription=self._subscription,
client_id=self._client_id,
max_outstanding_messages=(
0 if self._use_legacy_flow_control else self._flow_control.max_messages
),
max_outstanding_bytes=(
0 if self._use_legacy_flow_control else self._flow_control.max_bytes
),
)
# Return the initial request.
return request
def _on_response(self, response):
"""Process all received Pub/Sub messages.
For each message, send a modified acknowledgment request to the
server. This prevents expiration of the message due to buffering by
gRPC or proxy/firewall. This makes the server and client expiration
timer closer to each other thus preventing the message being
redelivered multiple times.
After the messages have all had their ack deadline updated, execute
the callback for each message using the executor.
"""
if response is None:
_LOGGER.debug(
"Response callback invoked with None, likely due to a "
"transport shutdown."
)
return
# IMPORTANT: Circumvent the wrapper class and operate on the raw underlying
# protobuf message to significantly gain on attribute access performance.
received_messages = response._pb.received_messages
_LOGGER.debug(
"Processing %s received message(s), currently on hold %s (bytes %s).",
len(received_messages),
self._messages_on_hold.size,
self._on_hold_bytes,
)
# Immediately (i.e. without waiting for the auto lease management)
# modack the messages we received, as this tells the server that we've
# received them.
items = [
requests.ModAckRequest(message.ack_id, self._ack_histogram.percentile(99))
for message in received_messages
]
self._dispatcher.modify_ack_deadline(items)
with self._pause_resume_lock:
for received_message in received_messages:
message = google.cloud.pubsub_v1.subscriber.message.Message(
received_message.message,
received_message.ack_id,
received_message.delivery_attempt,
self._scheduler.queue,
)
self._messages_on_hold.put(message)
self._on_hold_bytes += message.size
req = requests.LeaseRequest(
ack_id=message.ack_id,
byte_size=message.size,
ordering_key=message.ordering_key,
)
self.leaser.add([req])
self._maybe_release_messages()
self.maybe_pause_consumer()
def _should_recover(self, exception):
"""Determine if an error on the RPC stream should be recovered.
If the exception is one of the retryable exceptions, this will signal
to the consumer thread that it should "recover" from the failure.
This will cause the stream to exit when it returns :data:`False`.
Returns:
bool: Indicates if the caller should recover or shut down.
Will be :data:`True` if the ``exception`` is "acceptable", i.e.
in a list of retryable / idempotent exceptions.
"""
exception = _wrap_as_exception(exception)
# If this is in the list of idempotent exceptions, then we want to
# recover.
if isinstance(exception, _RETRYABLE_STREAM_ERRORS):
_LOGGER.info("Observed recoverable stream error %s", exception)
return True
_LOGGER.info("Observed non-recoverable stream error %s", exception)
return False
def _should_terminate(self, exception):
"""Determine if an error on the RPC stream should be terminated.
If the exception is one of the terminating exceptions, this will signal
to the consumer thread that it should terminate.
This will cause the stream to exit when it returns :data:`True`.
Returns:
bool: Indicates if the caller should terminate or attempt recovery.
Will be :data:`True` if the ``exception`` is "acceptable", i.e.
in a list of terminating exceptions.
"""
exception = _wrap_as_exception(exception)
if isinstance(exception, _TERMINATING_STREAM_ERRORS):
_LOGGER.info("Observed terminating stream error %s", exception)
return True
_LOGGER.info("Observed non-terminating stream error %s", exception)
return False
def _on_rpc_done(self, future):
"""Triggered whenever the underlying RPC terminates without recovery.
This is typically triggered from one of two threads: the background
consumer thread (when calling ``recv()`` produces a non-recoverable
error) or the grpc management thread (when cancelling the RPC).
This method is *non-blocking*. It will start another thread to deal
with shutting everything down. This is to prevent blocking in the
background consumer and preventing it from being ``joined()``.
"""
_LOGGER.info("RPC termination has signaled streaming pull manager shutdown.")
error = _wrap_as_exception(future)
thread = threading.Thread(
name=_RPC_ERROR_THREAD_NAME, target=self.close, kwargs={"reason": error}
)
thread.daemon = True
thread.start()
|
from .once_per_worker import once_per_worker
__all__ = ["once_per_worker"]
|
from __future__ import absolute_import
import numpy as np
from .abundance import FractionalAbundance
import pdb
class CollRadEquilibrium(object):
def __init__(self, atomic_data):
self.atomic_data = atomic_data
self.ionisation_coeff = atomic_data.coeffs['ionisation'] # RateCoefficient objects
self.recombination_coeff = atomic_data.coeffs['recombination']
self.nuclear_charge = atomic_data.nuclear_charge #could be generalized to include metastables?
def ionisation_stage_distribution(self, temperature, density):
"""Compute ionisation stage fractions for collrad equilibrium.
This case only includes ionisation and recombination.
It does not include charge exchange, or any time-dependent effects.
Args:
temperature (array_like): temperatures [eV].
density (array_like): densities [m^-3].
Returns:
A FractionalAbundance object
"""
if len(temperature) == 1 and len(density) > 1:
temperature = temperature * np.ones_like(density)
y = np.zeros((self.nuclear_charge + 1, len(temperature)))
y[0] = np.ones_like(temperature)
for k in range(self.nuclear_charge):
S = self.ionisation_coeff(k, temperature, density)
alpha = self.recombination_coeff(k, temperature, density)
y[k+1] = y[k] * S / alpha
y /= y.sum(0) # fractional abundance
# pdb.set_trace()
return FractionalAbundance(self.atomic_data, y, temperature, density)
if __name__ == '__main__':
pass
|
import asyncio
from logging.config import dictConfig
import pytest
from fastapi import FastAPI
from httpx import AsyncClient
from starlette.middleware import Middleware
from asgi_correlation_id import correlation_id_filter
from asgi_correlation_id.log_filters import celery_tracing_id_filter
from asgi_correlation_id.middleware import CorrelationIdMiddleware
@pytest.fixture(autouse=True, scope='session')
def _configure_logging():
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'correlation_id': {'()': correlation_id_filter()},
'celery_tracing': {'()': celery_tracing_id_filter()},
},
'formatters': {
'full': {
'class': 'logging.Formatter',
'datefmt': '%H:%M:%S',
'format': '[%(correlation_id)s] [%(celery_parent_id)s-%(celery_current_id)s] %(message)s',
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'filters': ['correlation_id', 'celery_tracing'],
'formatter': 'full',
},
},
'loggers': {
# project logger
'asgi_correlation_id': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
},
}
dictConfig(LOGGING)
app = FastAPI(middleware=[Middleware(CorrelationIdMiddleware)])
@pytest.fixture(scope='session', autouse=True)
def event_loop():
loop = asyncio.get_event_loop_policy().new_event_loop()
yield loop
loop.close()
@pytest.fixture(scope='module')
async def client() -> AsyncClient:
async with AsyncClient(app=app, base_url='http://test') as client:
yield client
|
from crispy_forms.layout import Field
from django import forms
from .models import Stock_ID
class SearchStockForm(forms.ModelForm):
class Meta:
model = Stock_ID
fields = ['stock_ticker']
|
"""
Django settings for ananas project.
#
Generated by 'django-admin startproject' using Django 3.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'xu!k68q$@_*82ja*jgqqv+3)5o(s+gsi()cf47e-^75)sr*dv_'
# SECURITY WARNING: don't run with debug turned on in production!
if os.name == 'nt':
DEBUG = True
else:
DEBUG = False
ALLOWED_HOSTS = ['localhost', '127.0.0.1']
PHONENUMBER_DB_FORMAT = 'NATIONAL'
PHONENUMBER_DEFAULT_REGION = "FR"
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.sites',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'login',
'channels',
'messenger',
'timeline',
'phonenumber_field',
'rest_framework',
'rest_framework.authtoken',
'profil',
'mdeditor',
'actstream',
'debug_toolbar',
]
SITE_ID = 1
ACTSTREAM_SETTINGS = {
'FETCH_RELATIONS': True,
'USE_JSONFIELD': True,
}
X_FRAME_OPTIONS = 'SAMEORIGIN'
MDEDITOR_CONFIGS = {
'default':{'width': '90% ', # Custom edit box width
'heigth': 500, # Custom edit box height
'toolbar': ["undo", "redo", "|",
"bold", "del", "italic", "quote", "ucwords", "uppercase", "lowercase", "|",
"h1", "h2", "h3", "h5", "h6", "|",
"list-ul", "list-ol", "hr", "|",
"link", "reference-link", "image", "code", "preformatted-text", "code-block", "table"
, "pagebreak", "goto-line", "|",
"help", "info",
"||", "preview", "watch", "fullscreen"], # custom edit box toolbar
'upload_image_formats': ["jpg", "jpeg", "gif", "png", "bmp", "webp","jfif"], # image upload format type
'image_folder': 'editor', # image save the folder name
'theme': 'default', # edit box theme, dark / default
'preview_theme': 'default', # Preview area theme, dark / default
'editor_theme': 'default', # edit area theme, pastel-on-dark / default
'toolbar_autofixed': True, # Whether the toolbar capitals
'search_replace': True, # Whether to open the search for replacement
'emoji': False, # whether to open the expression function
'tex': True, # whether to open the tex chart function
'flow_chart': True, # whether to open the flow chart function
'sequence': True, # Whether to open the sequence diagram function
'watch': True, # Live preview
'lineWrapping': False, # lineWrapping
'lineNumbers': False, # lineNumbers},
'language': 'en',
},
}
PAGEDOWN_IMAGE_UPLOAD_ENABLED = True
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
# allow debug_toolbar
INTERNAL_IPS = ['127.0.0.1']
ROOT_URLCONF = 'ananas.urls'
if os.name != 'nt':
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': "127.0.0.1:11211",
}
}
AUTHENTICATION_BACKENDS = ['django.contrib.auth.backends.ModelBackend']
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ananas.wsgi.application'
ASGI_APPLICATION = 'ananas.routing.application'
CHANNEL_LAYERS = {
'default': {
'BACKEND': 'channels_redis.core.RedisChannelLayer',
'config': {
'hosts': [('127.0.0.1', 6379)],
},
},
}
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
if os.name == 'nt':
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'django_ananas',
'USER': 'ananas',
'PASSWORD': 'ananas_rs2020;',
'HOST': '127.0.0.1',
'PORT': 3306
}
}
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': ('rest_framework.permissions.IsAuthenticated',),
'DEFAULT_AUTHENTICATION_CLASSES': ('rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication',),
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us' #Fr
AUTH_USER_MODEL = "login.User"
TIME_ZONE = 'CET'
USE_I18N = True
USE_L10N = True
USE_TZ = True
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')
MEDIA_URL = '/media/'
LOGIN_URL = "/account/connexion" # redirection by RequiredLoginMixin
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'ananas.webmaster@gmail.com'
EMAIL_HOST_PASSWORD = 'afdauqweelyrpnph'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
STATIC_ROOT = "/home/cor_mag91/ananas.localnetwork.tk/static/"
|
#!/usr/bin/python
import math
import re
import numpy as np
import inkscapeMadeEasy.inkscapeMadeEasy_Base as inkBase
import inkscapeMadeEasy.inkscapeMadeEasy_Draw as inkDraw
import inkscapeMadeEasy.inkscapeMadeEasy_Plot as inkPlot
def findMinMaxStep(data, step):
''' returns the limits of the data, such that the data fits the interval and the limits are multiples of step'''
maximum = math.ceil(max(data) / step) * step
mininum = math.floor(min(data) / step) * step
return np.array([mininum, maximum])
class BodePlot(inkBase.inkscapeMadeEasy):
def __init__(self):
inkBase.inkscapeMadeEasy.__init__(self)
self.arg_parser.add_argument("--tab", type=str, dest="tab", default="object")
self.arg_parser.add_argument("--subTab_confPlot", type=str, dest="subTab_confPlot", default="object")
self.arg_parser.add_argument("--numerator", type=str, dest="numerator", default="1")
self.arg_parser.add_argument("--denominator", type=str, dest="denominator", default="1 1")
self.arg_parser.add_argument("--nPoints", type=int, dest="nPoints", default=20)
self.arg_parser.add_argument("--plotGain", type=self.bool, dest="plotGain", default=True)
self.arg_parser.add_argument("--plotPhase", type=self.bool, dest="plotPhase", default=True)
self.arg_parser.add_argument("--plotZP", type=self.bool, dest="plotZP", default=True)
self.arg_parser.add_argument("--writeEqn", type=self.bool, dest="writeEqn", default=True)
# FOR CONTINUOUS TIME BODE PLOT
self.arg_parser.add_argument("--fMinS", type=float, dest="fMinS", default=0.01)
self.arg_parser.add_argument("--fMaxS", type=float, dest="fMaxS", default=1000)
# FOR DISCRITE TIME BODE PLOT
self.arg_parser.add_argument("--fMaxZ", type=str, dest="fMaxZ", default='maxSampling')
self.arg_parser.add_argument("--fTickStep", type=float, dest="fTickStep", default='0.5')
self.arg_parser.add_argument("--fScale", type=float, dest="fScale", default=5)
self.arg_parser.add_argument("--fTicks", type=self.bool, dest="fTicks", default=False)
self.arg_parser.add_argument("--fGrid", type=self.bool, dest="fGrid", default=False)
self.arg_parser.add_argument("--fUnit", type=str, dest="fUnit", default='radS')
self.arg_parser.add_argument("--fLabel", type=str, dest="fLabel", default='lower')
self.arg_parser.add_argument("--fLabelCustom", type=str, dest="fLabelCustom", default='freq.')
self.arg_parser.add_argument("--generalAspectFactor", type=float, dest="generalAspectFactor", default=1.0)
# gain
self.arg_parser.add_argument("--yMaxGain", type=float, dest="yMaxGain", default=2.0)
self.arg_parser.add_argument("--gLabel", type=str, dest="gLabel", default='lower')
self.arg_parser.add_argument("--gLabelCustom", type=str, dest="gLabelCustom", default='freq.')
self.arg_parser.add_argument("--gUnit", type=str, dest="gUnit", default='log10')
self.arg_parser.add_argument("--yTicksGain", type=self.bool, dest="yTicksGain", default=False)
self.arg_parser.add_argument("--yTickStepGain", type=float, dest="yTickStepGain", default=0.5)
self.arg_parser.add_argument("--yScaleGain", type=float, dest="yScaleGain", default=10)
self.arg_parser.add_argument("--yGridGain", type=self.bool, dest="yGridGain", default=False)
# phase
self.arg_parser.add_argument("--pLabel", type=str, dest="pLabel", default='lower')
self.arg_parser.add_argument("--pLabelCustom", type=str, dest="pLabelCustom", default='freq.')
self.arg_parser.add_argument("--pUnit", type=str, dest="pUnit", default='deg')
self.arg_parser.add_argument("--yTicksPhase", type=self.bool, dest="yTicksPhase", default=False)
self.arg_parser.add_argument("--yTickStepPhaseDeg", type=float, dest="yTickStepPhaseDeg", default=45)
self.arg_parser.add_argument("--yScalePhase", type=float, dest="yScalePhase", default=10)
self.arg_parser.add_argument("--yGridPhase", type=self.bool, dest="yGridPhase", default=False)
# zero/Poles
self.arg_parser.add_argument("--markerAspectFactor", type=float, dest="markerAspectFactor", default=1.0)
self.arg_parser.add_argument("--ZPScale", type=float, dest="ZPScale", default=5)
self.arg_parser.add_argument("--ZPTicks", type=self.bool, dest="ZPTicks", default=True)
self.arg_parser.add_argument("--ZPGrid", type=self.bool, dest="ZPGrid", default=True)
self.arg_parser.add_argument("--ZPTickStep", type=float, dest="ZPTickStep", default=1)
self.arg_parser.add_argument("--zeroColor", type=str, dest="zeroColor", default='blue')
self.arg_parser.add_argument("--zeroColorPicker", type=str, dest="zeroColorPicker", default='0')
self.arg_parser.add_argument("--poleColor", type=str, dest="poleColor", default='red')
self.arg_parser.add_argument("--poleColorPicker", type=str, dest="poleColorPicker", default='0')
# FOR DISCRITE TIME H(z) only
self.arg_parser.add_argument("--drawUnitCircle", type=self.bool, dest="drawUnitCircle", default=True)
# equation
self.arg_parser.add_argument("--eqnPrecision", type=int, dest="eqnPrecision", default=2)
self.arg_parser.add_argument("--eqnSimplifyOne", type=self.bool, dest="eqnSimplifyOne", default=False)
self.arg_parser.add_argument("--eqnSimplifySZ0", type=self.bool, dest="eqnSimplifySZ0", default=False)
self.arg_parser.add_argument("--eqnHideZeroTerms", type=self.bool, dest="eqnHideZeroTerms", default=False)
self.arg_parser.add_argument("--eqnNormalizeDen", type=self.bool, dest="eqnNormalizeDen", default=False)
# FOR CONTINUOUS TIME H(s) only
self.arg_parser.add_argument("--eqnSimplifySZ1", type=self.bool, dest="eqnSimplifySZ1", default=False)
def effect(self):
so = self.options
so.tab = so.tab.replace('"', '') # removes de exceeding double quotes from the string
root_layer = self.document.getroot()
if so.tab.startswith('BodePlot_S'):
self.typeTime = 'continuous'
if so.tab.startswith('BodePlot_Z'):
self.typeTime = 'discrete'
if not inkDraw.useLatex:
self.useLatex = False
else:
self.useLatex = True
# colors
zeroColor = inkDraw.color.parseColorPicker(so.zeroColor, so.zeroColorPicker)
poleColor = inkDraw.color.parseColorPicker(so.poleColor, so.poleColorPicker)
# sets the position to the viewport center, round to next 10.
position = [self.svg.namedview.center[0], self.svg.namedview.center[1]]
position[0] = int(math.ceil(position[0] / 10.0)) * 10
position[1] = int(math.ceil(position[1] / 10.0)) * 10
# line style
lineWidthPlot = so.generalAspectFactor * min(so.fScale, so.yScaleGain) / 30.0
lineColor = inkDraw.color.defined('blue')
lineStylePlot = inkDraw.lineStyle.set(lineWidth=lineWidthPlot, lineColor=lineColor)
numerator = np.array([float(x) for x in so.numerator.replace(',', ' ').split()])
denominator = np.array([float(x) for x in so.denominator.replace(',', ' ').split()])
extraTextfreq = ''
#createLAbels
if self.useLatex:
[fLabel, gLabel, pLabel] = self.createLabelsLatex()
else:
[fLabel, gLabel, pLabel] = self.createLabelsNoLatex()
if so.eqnNormalizeDen:
a0=denominator[0]
numerator/=a0
denominator/=a0
if self.typeTime == 'continuous':
[freqData, Hresponse] = self.Bode_S(numerator, denominator)
freqLogScale = True
fTickStep = 1
extraTextfreq = ''
if self.typeTime == 'discrete':
[freqData, Hresponse] = self.Bode_Z(numerator, denominator)
freqLogScale = False
fTickStep = so.fTickStep
if so.fUnit == 'freqRad':
if self.useLatex:
extraTextfreq = r'\pi'
else:
extraTextfreq = 'ฯ'
else:
extraTextfreq = ''
# write equation
signFunc = lambda x: ('+', '-')[x < 0]
if so.writeEqn:
if self.useLatex:
if self.typeTime == 'continuous':
textH = r'$H(s)=\displaystyle\frac'
if self.typeTime == 'discrete':
textH = r'$H(z)=\displaystyle\frac'
for poly in [numerator,denominator]:
textPoly=''
for i,x in enumerate(poly):
if x == 0 and so.eqnHideZeroTerms:
continue
# s^n or z^{-n}
if self.typeTime == 'continuous':
n = len(poly) - 1 - i
else:
n=i
if so.eqnSimplifySZ0 and n==0:
szN=''
else:
if self.typeTime == 'continuous':
if n==1 and so.eqnSimplifySZ1:
szN = 's'
else:
szN = 's^{%d}' % n
if self.typeTime == 'discrete':
if n==1 and so.eqnSimplifySZ1:
szN = 'z'
else:
if n==0:
szN = 'z^{%d}' % n
else:
szN = 'z^{-%d}' % n
sign = signFunc(x)
if abs(x)==1.0 and so.eqnSimplifyOne:
coef = ''
else:
coef = ('{:.%df}' % so.eqnPrecision).format(abs(x))
textPoly += sign + coef + szN
if textPoly.endswith('+') or textPoly.endswith('-'):
textPoly +=('{:.%df}' % so.eqnPrecision).format(1.0)
# removes '+' from first coef.
if textPoly.startswith('+'):
textPoly = textPoly[1:]
textH += '{' + textPoly + '}'
textH +='$'
inkDraw.text.latex(self, root_layer, textH, position, fontSize=5*so.generalAspectFactor)
# abs value plot
if so.plotGain:
# limit gain response
gainData = np.absolute(Hresponse)
gainLimitReached = max(gainData > so.yMaxGain)
gainData[gainData > so.yMaxGain] = so.yMaxGain
if gainLimitReached and so.plotPhase and so.plotPhase:
inkDraw.text.write(self, 'Warning: Some gain values exceeded \'Gain plot limit\'=%f.\n Plot will be truncated.' % (so.yMaxGain),
position, root_layer, fontSize=5)
if so.gUnit == 'dB':
gainData = 20 * np.log10(gainData)
ylog = False
ylim = [min(gainData), 0]
yTickStepGain = so.yTickStepGain
yScaleGain = so.yScaleGain
if so.gUnit == 'linear':
ylog = False
yTickStepGain = so.yTickStepGain
ylim = findMinMaxStep(gainData, yTickStepGain)
ylim[0] = 0
yScaleGain = so.yScaleGain
if so.gUnit == 'log10':
ylog = True
ylim = None
yTickStepGain = 1 # does not matter
yScaleGain = so.yScaleGain
if so.gUnit == 'dB':
extraDistY = 1.0
else:
extraDistY = 0.0
[graph, limits, origin] = inkPlot.plot.cartesian(self, root_layer, freqData, gainData, position, xLabel=fLabel, yLabel=gLabel,
xlog10scale=freqLogScale, ylog10scale=ylog, xTicks=so.fTicks, yTicks=so.yTicksGain,
xTickStep=fTickStep, yTickStep=yTickStepGain, xScale=so.fScale, yScale=yScaleGain,
xGrid=so.fGrid, yGrid=so.yGridGain, xExtraText=extraTextfreq,
generalAspectFactorAxis=so.generalAspectFactor, lineStylePlot=lineStylePlot,
forceYlim=ylim, ExtraLengthAxisY=extraDistY)
position = [position[0], position[1] - limits[3][1] + limits[2][1]]
# adjust so.generalAspectFactor so that we get the same text size on both plots
so.generalAspectFactor = so.generalAspectFactor * min(so.fScale, so.yScaleGain) / min(so.fScale, so.yScalePhase)
# phase in degrees
if so.plotPhase:
phaseData = np.angle(Hresponse) * 180.0 / math.pi
# find limits multiples of so.yTickStepPhaseDeg
ylim = findMinMaxStep(phaseData, so.yTickStepPhaseDeg)
if so.pUnit == 'deg':
yTick = so.yTickStepPhaseDeg
extraTextPhase=''
if so.pUnit == 'rad':
phaseData = phaseData * math.pi / 180.0
yTick = so.yTickStepPhaseDeg * math.pi / 180.0
ylim = ylim * math.pi / 180.0
extraTextPhase=''
if so.pUnit == 'radPi':
phaseData = phaseData/ 180.0
yTick = so.yTickStepPhaseDeg / 180.0
ylim = ylim / 180.0
if self.useLatex:
extraTextPhase = r'\pi'
else:
extraTextPhase = 'ฯ'
if ylim[1] == 0.0:
extraDistY = 1.0
else:
extraDistY = 0.0
# phase plot
[graph, limits, origin] = inkPlot.plot.cartesian(self, root_layer, freqData, phaseData, position, xLabel=fLabel, yLabel=pLabel,
xlog10scale=freqLogScale, ylog10scale=False, xTicks=so.fTicks, yTicks=so.yTicksPhase,
xTickStep=fTickStep, yTickStep=yTick, xScale=so.fScale, yScale=so.yScalePhase,
xExtraText=extraTextfreq, yExtraText=extraTextPhase, xGrid=so.fGrid, yGrid=so.yGridPhase,
generalAspectFactorAxis=so.generalAspectFactor, lineStylePlot=lineStylePlot,
forceYlim=ylim, ExtraLengthAxisY=extraDistY)
position = [position[0], position[1] - limits[3][1] + limits[2][1]]
# zeros and poles
if so.plotZP:
[zeros, poles] = self.zero_and_pole(numerator, denominator)
# find limits of zeros and poles
temp = np.concatenate((zeros, poles))
if len(temp) == 0:
inkDraw.text.write(self, 'Warning: Transfer funcion has no zeros and no poles.\n Please check the coefficients', position, root_layer,
fontSize=5)
return
RealLim = np.array([min(np.real(temp)), max(np.real(temp))])
ImagLim = np.array([min(np.imag(temp)), max(np.imag(temp))])
if RealLim[0] == RealLim[1]:
RealLim += [- so.ZPTickStep / 2.0, so.ZPTickStep / 2.0]
if ImagLim[0] == ImagLim[1]:
ImagLim += [- so.ZPTickStep / 2.0, so.ZPTickStep / 2.0]
if self.typeTime == 'continuous':
# ensures one of the limits is the Re or Im axis
ImagLim = [min(ImagLim[0], 0), max(ImagLim[1], 0)]
RealLim = [min(RealLim[0], 0), max(RealLim[1], 0)]
if self.typeTime == 'discrete':
# ensures one of the limits is the Re or Im axis
ImagLim = [min(ImagLim[0], -1), max(ImagLim[1], 1)]
RealLim = [min(RealLim[0], -1), max(RealLim[1], 1)]
markSize = 2 * lineWidthPlot * so.markerAspectFactor
poleStyle = inkDraw.lineStyle.set(lineWidth=lineWidthPlot, lineColor=poleColor)
zeroStyle = inkDraw.lineStyle.set(lineWidth=lineWidthPlot, lineColor=zeroColor)
[graph, _, origin] = inkPlot.axis.cartesian(self, root_layer, xLim=RealLim, yLim=ImagLim, position=position, xLabel='Re', yLabel='Im',
xTicks=so.ZPTicks, yTicks=so.ZPTicks, xTickStep=so.ZPTickStep, yTickStep=so.ZPTickStep,
xScale=so.ZPScale, yScale=so.ZPScale, xGrid=so.ZPGrid, yGrid=so.ZPGrid)
# add zeros and poles
zeroPoleGroup = self.createGroup(graph, 'ZeroPoles')
for z in zeros:
zScaled = np.array([np.real(z), np.imag(z)]) * (so.ZPScale / so.ZPTickStep)
inkDraw.circle.centerRadius(zeroPoleGroup, centerPoint=zScaled, radius=markSize, offset=[0, 0], lineStyle=zeroStyle)
for p in poles:
pScaled = np.array([np.real(p), np.imag(p)]) * (so.ZPScale / so.ZPTickStep)
inkDraw.line.relCoords(zeroPoleGroup, [[-markSize, -markSize], [2 * markSize, 2 * markSize]], offset=pScaled, lineStyle=poleStyle)
inkDraw.line.relCoords(zeroPoleGroup, [[-markSize, markSize], [2 * markSize, -2 * markSize]], offset=pScaled, lineStyle=poleStyle)
if self.typeTime == 'discrete' and so.drawUnitCircle:
radius = 1.0 * so.ZPScale / so.ZPTickStep
dashedLineStyle = inkDraw.lineStyle.set(lineWidth=lineWidthPlot / 2.0, lineColor=inkDraw.color.defined('black'), fillColor=None,
strokeDashArray='2, 2')
inkDraw.circle.centerRadius(zeroPoleGroup, centerPoint=[0, 0], radius=radius, offset=[0, 0], label='circle',
lineStyle=dashedLineStyle)
def createLabelsLatex(self):
"""
create axis labels
:return:
"""
so = self.options
# ------------
# CONTINUOUS TIME
# ------------
if self.typeTime == 'continuous':
# frequency symbol
if so.fLabel.lower() == 'custom':
fSymbol = so.fLabelCustom
else:
if so.fUnit == 'hz':
if so.fLabel.lower() == 'upper':
fSymbol = 'F'
else:
fSymbol = 'f'
if so.fUnit == 'rad/s':
if so.fLabel.lower() == 'upper':
fSymbol = r'\Omega'
else:
fSymbol = r'\omega'
# frequency response symbol
if so.fUnit == 'hz':
respFreq = r'H(j2\pi ' + fSymbol + ')'
if so.fUnit == 'rad/s':
respFreq = 'H(j' + fSymbol + ')'
# units
if so.fUnit == 'hz':
freqUnit = r' (\si{\hertz})'
if so.fUnit == 'rad/s':
freqUnit = r' (\si{\rad\per\second})'
# ------------
# DISCRETE TIME
# ------------
if self.typeTime == 'discrete':
if so.fLabel.lower() == 'custom':
fSymbol = so.fLabelCustom
else:
if so.fLabel.lower() == 'upper':
fSymbol = r'\Omega'
else:
fSymbol = r'\omega'
# frequency response symbol
respFreq = r'H(e^{j' + fSymbol + '})'
# units
if so.fUnit == 'freqRad':
freqUnit = r' (\si{\rad\per sample})'
if so.fUnit == 'freqNorm':
freqUnit = r' (\times \pi \si{\rad\per sample})'
# GAIN AND PHASE UNITS
if so.pUnit == 'deg':
pUnit = r' (\si\degree)'
if so.pUnit == 'rad' or so.pUnit == 'radPi':
pUnit = r' (\si\rad)'
if so.gUnit == 'dB':
gUnit = r' (\text{dB})'
else:
gUnit = r' '
# build tuples (symbol, unit)
fLabel = '$' + fSymbol + freqUnit + '$'
if so.gLabel == 'custom':
gLabel = '$' + so.gLabelCustom + gUnit + '$'
else:
gLabel = '$' + '|%s|' % respFreq + gUnit + '$'
if so.pLabel == 'custom':
pLabel = '$' + so.pLabelCustom + pUnit + '$'
else:
pLabel = '$' + r'\phase{%s}' % respFreq + pUnit + '$'
return [fLabel, gLabel, pLabel]
def createLabelsNoLatex(self):
"""
create axis labels
:return:
"""
so = self.options
# ------------
# CONTINUOUS TIME
# ------------
if self.typeTime == 'continuous':
# frequency symbol
if so.fLabel.lower() == 'custom':
fSymbol = so.fLabelCustom
else:
if so.fUnit == 'hz':
if so.fLabel.lower() == 'upper':
fSymbol = 'F'
else:
fSymbol = 'f'
if so.fUnit == 'rad/s':
if so.fLabel.lower() == 'upper':
fSymbol = 'ฮฉ'
else:
fSymbol = 'ฯ'
# frequency response symbol
if so.fUnit == 'hz':
respFreq = 'H(j2ฯ' + fSymbol + ')'
if so.fUnit == 'rad/s':
respFreq = 'H(j' + fSymbol + ')'
# units
if so.fUnit == 'hz':
freqUnit = ' (Hz)'
if so.fUnit == 'rad/s':
freqUnit = ' (rad/s)'
# ------------
# DISCRETE TIME
# ------------
if self.typeTime == 'discrete':
if so.fLabel.lower() == 'custom':
fSymbol = so.fLabelCustom
else:
if so.fLabel.lower() == 'upper':
fSymbol = 'ฮฉ'
else:
fSymbol = 'ฯ'
# frequency response symbol
respFreq = 'H[exp(j' + fSymbol + ')]'
# units
if so.fUnit == 'freqRad':
freqUnit = ' (rad/sample)'
if so.fUnit == 'freqNorm':
freqUnit = ' (xฯ rad/sample)'
# GAIN AND PHASE UNITS
if so.pUnit == 'deg':
pUnit = ' (ยฐ)'
if so.pUnit == 'rad':
pUnit = ' (rad)'
if so.gUnit == 'dB':
gUnit = ' (dB)'
else:
gUnit = ' '
# build tuples (symbol, unit)
fLabel = fSymbol + freqUnit
if so.gLabel == 'custom':
gLabel = so.gLabelCustom + gUnit
else:
gLabel = '|%s|' % respFreq + gUnit
if so.pLabel == 'custom':
pLabel = so.pLabelCustom + pUnit
else:
pLabel = 'โ ' + respFreq + pUnit
return [fLabel, gLabel, pLabel]
def Bode_S(self, numerator, denominator):
# computes the frequency response of H(s)
# returns:
# xlabel: string for freq axis label
# freqData: frequency data for x axis
# Hresponse: values of H(j\Omega)
so = self.options
# generate x data
freqData = np.logspace(so.fMinS, so.fMaxS, so.nPoints)
if so.fUnit == 'rad/s':
freqDataRadS = freqData
if so.fUnit == 'hz':
freqDataRadS = freqData * 2 * math.pi # convert hertz to rad/s
# build s values
sData = freqDataRadS * 1j
# generate y data
num = np.polyval(np.array(numerator), sData)
den = np.polyval(np.array(denominator), sData)
Hresponse = np.divide(num, den)
return [freqData, Hresponse]
def Bode_Z(self, numerator, denominator):
# computes the frequency response of H(z)
# returns:
# xlabel: string for freq axis label
# freqData: frequency data for x axis
# Hresponse: values of H(e^{j \omega})
so = self.options
# generate x data
if so.fMaxZ == 'maxSampling':
freqDataRad = np.linspace(0, 2 * math.pi, so.nPoints) # pi = nyquist
else:
freqDataRad = np.linspace(0, math.pi, so.nPoints) # pi = nyquist
if so.fUnit == 'freqRad':
freqData = freqDataRad / math.pi # divides by pi bc the plot will be in function of pi.
xlabel = r'$%s$ (\si{\rad\per sample})' % so.fLabel
if so.fUnit == 'freqNorm':
freqData = freqDataRad / math.pi # convert from omega in rad to omega normalized (1.0 = nyquist)
xlabel = r'$%s$ ($\times \pi$ \si{\rad\per sample})' % so.fLabel
zData = np.exp(-1j * freqDataRad) # negative angles because the polinomials are powers of z^{-1}
# generate y data
num = np.polyval(np.array(numerator[::-1]), zData) # reverses the numerator bc polyval assumes first the highest powers.
den = np.polyval(np.array(denominator[::-1]), zData) # reverses the numerator bc polyval assumes first the highest powers.
Hresponse = np.divide(num, den)
return [freqData, Hresponse]
def zero_and_pole(self, numerator, denominator):
# computes the zeros and poles of H(s)
# numerator,denominator: lists with the coeficientes, in descending powers of s or increasing power of z^-1
# returns:
# zeros: numpy array with the zeros
# poles: numpy array with the poles
zeros = np.roots(np.array(numerator))
poles = np.roots(np.array(denominator))
return [zeros, poles]
if __name__ == '__main__':
plot = BodePlot()
plot.run()
|
class Solution:
def canTransform(self, start, end):
"""
:type start: str
:type end: str
:rtype: bool
"""
s = [(ch, idx) for idx, ch in enumerate(start) if ch in ('L', 'R')]
e = [(ch, idx) for idx, ch in enumerate(end) if ch in ('L', 'R')]
return len(s) == len(e) and all(ch1 == ch2 and ((ch1 == 'L' and idx1 >= idx2) or (ch1 == 'R' and idx2 >= idx1)) for (ch1, idx1), (ch2, idx2) in zip(s, e)) |
from plpred.preprocessing import compute_aa_composition
from Bio import SeqIO
import pandas as pd
import argparse
import pickle
def run_model(file_path:str, model_path:str) -> pd.DataFrame:
"""
Run a membrane protein prediction on a FASTA file.
Parameters
----------
file_path:str
path to proteins in FASTA format.
model_path:str
path to trained model in pickle format.
Returns
-------
df_predictions:pd.DataFrame
Pandas DataFrame containing the membrane proteins predictions.
"""
with open(model_path, 'rb') as handle:
model = pickle.load(handle)
handle = open(file_path)
parser = SeqIO.parse(handle, 'fasta')
df_aa_composition = pd.DataFrame()
df_predictions = pd.DataFrame(columns=['id', 'membrane'])
for record in parser:
aa_composition = compute_aa_composition(str(record.seq))
aa_composition['id'] = record.id
df_aa_composition = df_aa_composition.append(aa_composition, ignore_index=True)
X = df_aa_composition.drop(['id'], axis=1)
ids = df_aa_composition['id']
y_pred = model.predict(X)
df_predictions['id'] = ids
df_predictions['membrane'] = y_pred
return df_predictions
def main():
argument_parser = argparse.ArgumentParser(description='plpred-predict: subcellular location prediction tool')
argument_parser.add_argument('-i', '--input', required=True, help='input file (.fasta)')
argument_parser.add_argument('-o', '--output', required=True, help='output file (.csv)')
argument_parser.add_argument('-m', '--model', required=True, help='trained model (.pickle)')
arguments = argument_parser.parse_args()
df_predictions = run_model(file_path=arguments.input, model_path=arguments.model)
df_predictions.to_csv(arguments.output, index=False)
if __name__ == '__main__':
main()
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reversible Heun method from
https://arxiv.org/abs/2105.13493
Known to be strong order 0.5 in general and strong order 1.0 for additive noise.
Precise strong orders for diagonal/scalar noise, and weak order in general, are
for the time being unknown.
This solver uses some extra state such that it is _algebraically reversible_:
it is possible to reconstruct its input (y0, f0, g0, z0) given its output
(y1, f1, g1, z1).
This means we can backpropagate by (a) inverting these operations, (b) doing a local
forward operation to construct a computation graph, (c) differentiate the local
forward. This is what the adjoint method here does.
This is in contrast to standard backpropagation, which requires holding all of these
values in memory.
This is contrast to the standard continuous adjoint method (sdeint_adjoint), which
can only perform this procedure approximately, and only produces approximate gradients
as a result.
"""
import torch
from .. import adjoint_sde
from .. import base_solver
from .. import misc
from ...settings import SDE_TYPES, NOISE_TYPES, LEVY_AREA_APPROXIMATIONS, METHODS, METHOD_OPTIONS
class ReversibleHeun(base_solver.BaseSDESolver):
weak_order = 1.0
sde_type = SDE_TYPES.stratonovich
noise_types = NOISE_TYPES.all()
levy_area_approximations = LEVY_AREA_APPROXIMATIONS.all()
def __init__(self, sde, **kwargs):
self.strong_order = 1.0 if sde.noise_type == NOISE_TYPES.additive else 0.5
super(ReversibleHeun, self).__init__(sde=sde, **kwargs)
def init_extra_solver_state(self, t0, y0):
return self.sde.f_and_g(t0, y0) + (y0,)
def step(self, t0, t1, y0, extra0):
f0, g0, z0 = extra0
# f is a drift-like quantity
# g is a diffusion-like quantity
# z is a state-like quantity (like y)
dt = t1 - t0
dW = self.bm(t0, t1)
z1 = 2 * y0 - z0 + f0 * dt + self.sde.prod(g0, dW)
f1, g1 = self.sde.f_and_g(t1, z1)
y1 = y0 + (f0 + f1) * (0.5 * dt) + self.sde.prod(g0 + g1, 0.5 * dW)
return y1, (f1, g1, z1)
class AdjointReversibleHeun(base_solver.BaseSDESolver):
weak_order = 1.0
sde_type = SDE_TYPES.stratonovich
noise_types = NOISE_TYPES.all()
levy_area_approximations = LEVY_AREA_APPROXIMATIONS.all()
def __init__(self, sde, **kwargs):
if not isinstance(sde, adjoint_sde.AdjointSDE):
raise ValueError(f"{METHODS.adjoint_reversible_heun} can only be used for adjoint_method.")
self.strong_order = 1.0 if sde.noise_type == NOISE_TYPES.additive else 0.5
super(AdjointReversibleHeun, self).__init__(sde=sde, **kwargs)
self.forward_sde = sde.forward_sde
if self.forward_sde.noise_type == NOISE_TYPES.diagonal:
self._adjoint_of_prod = lambda tensor1, tensor2: tensor1 * tensor2
else:
self._adjoint_of_prod = lambda tensor1, tensor2: tensor1.unsqueeze(-1) * tensor2.unsqueeze(-2)
def init_extra_solver_state(self, t0, y0):
# We expect to always be given the extra state from the forward pass.
raise RuntimeError("Please report a bug to torchsde.")
def step(self, t0, t1, y0, extra0):
forward_f0, forward_g0, forward_z0 = extra0
dt = t1 - t0
dW = self.bm(t0, t1)
half_dt = 0.5 * dt
half_dW = 0.5 * dW
forward_y0, adj_y0, (adj_f0, adj_g0, adj_z0, *adj_params), requires_grad = self.sde.get_state(t0, y0,
extra_states=True)
adj_y0_half_dt = adj_y0 * half_dt
adj_y0_half_dW = self._adjoint_of_prod(adj_y0, half_dW)
forward_z1 = 2 * forward_y0 - forward_z0 - forward_f0 * dt - self.forward_sde.prod(forward_g0, dW)
adj_y1 = adj_y0
adj_f1 = adj_y0_half_dt
adj_f0 = adj_f0 + adj_y0_half_dt
adj_g1 = adj_y0_half_dW
adj_g0 = adj_g0 + adj_y0_half_dW
# TODO: efficiency. It should be possible to make one fewer forward call by re-using the forward computation
# in the previous step.
with torch.enable_grad():
if not forward_z0.requires_grad:
forward_z0 = forward_z0.detach().requires_grad_()
re_forward_f0, re_forward_g0 = self.forward_sde.f_and_g(-t0, forward_z0)
vjp_z, *vjp_params = misc.vjp(outputs=(re_forward_f0, re_forward_g0),
inputs=[forward_z0] + self.sde.params,
grad_outputs=[adj_f0, adj_g0],
allow_unused=True,
retain_graph=True,
create_graph=requires_grad)
adj_z0 = adj_z0 + vjp_z
adj_params = misc.seq_add(adj_params, vjp_params)
forward_f1, forward_g1 = self.forward_sde.f_and_g(-t1, forward_z1)
forward_y1 = forward_y0 - (forward_f0 + forward_f1) * half_dt - self.forward_sde.prod(forward_g0 + forward_g1,
half_dW)
adj_y1 = adj_y1 + 2 * adj_z0
adj_z1 = -adj_z0
adj_f1 = adj_f1 + adj_z0 * dt
adj_g1 = adj_g1 + self._adjoint_of_prod(adj_z0, dW)
y1 = misc.flatten([forward_y1, adj_y1, adj_f1, adj_g1, adj_z1] + adj_params).unsqueeze(0)
return y1, (forward_f1, forward_g1, forward_z1)
|
'''
vars and printing
'''
# this stores an int
my_int = 10
# this stores a float
my_float = 4.2
# this stores a string
my_string = "lalala"
'''
this is a multiline comment
yay!
large comment
also prints stuff below
'''
print my_int
print my_float
print my_string |
import numpy as np
import itertools
import pathfinder
import utils
import app
import buffering
import os
class DataGenerator(object):
def __init__(self, dataset, batch_size, img_ids, p_transform, data_prep_fun, label_prep_fun, rng,
random, infinite, full_batch, override_patch_size=None, version=1, **kwargs):
self.dataset = dataset
self.img_ids = img_ids
self.nsamples = len(img_ids)
self.batch_size = batch_size
self.p_transform = p_transform
self.data_prep_fun = data_prep_fun
self.label_prep_fun = label_prep_fun
self.rng = rng
self.random = random
self.infinite = infinite
self.full_batch = full_batch
self.override_patch_size = override_patch_size
if override_patch_size:
self.patch_size = override_patch_size
else:
self.patch_size = self.p_transform['patch_size']
self.labels = app.get_labels_array(version=version)
def generate(self):
while True:
rand_idxs = np.arange(len(self.img_ids))
if self.random:
self.rng.shuffle(rand_idxs)
for pos in xrange(0, len(rand_idxs), self.batch_size):
idxs_batch = rand_idxs[pos:pos + self.batch_size]
nb = len(idxs_batch)
# allocate batches
if self.p_transform['channels']:
x_batch = np.zeros((nb,self.p_transform['channels'],) + self.patch_size, dtype='float32')
else:
x_batch = np.zeros((nb,) + self.patch_size, dtype='float32')
if self.p_transform['n_labels']>1:
y_batch = np.zeros((nb, self.p_transform['n_labels']), dtype='float32')
else:
y_batch = np.zeros((nb,), dtype='float32')
batch_ids = []
for i, idx in enumerate(idxs_batch):
img_id = self.img_ids[idx]
batch_ids.append(img_id)
try:
img = app.read_compressed_image(self.dataset, img_id)
except Exception:
print 'cannot open ', img_id
x_batch[i] = self.data_prep_fun(x=img)
if 'train' in self.dataset:
y_batch[i] = self.label_prep_fun(self.labels[img_id])
#print 'i', i, 'img_id', img_id, y_batch[i]
if self.full_batch:
if nb == self.batch_size:
yield x_batch, y_batch, batch_ids
else:
yield x_batch, y_batch, batch_ids
if not self.infinite:
break
class AutoEncoderDataGenerator(object):
def __init__(self, batch_size, img_paths, labeled_img_paths, p_transform, data_prep_fun, label_prep_fun, rng,
random, infinite, full_batch, **kwargs):
self.img_paths = img_paths
self.labeled_img_paths = labeled_img_paths
self.nsamples = len(img_paths)
self.batch_size = batch_size
self.p_transform = p_transform
self.data_prep_fun = data_prep_fun
self.label_prep_fun = label_prep_fun
self.rng = rng
self.random = random
self.infinite = infinite
self.full_batch = full_batch
self.labels = app.get_labels_array()
def generate(self):
while True:
rand_idxs = np.arange(len(self.img_paths))
if self.random:
self.rng.shuffle(rand_idxs)
for pos in xrange(0, len(rand_idxs), self.batch_size):
idxs_batch = rand_idxs[pos:pos + self.batch_size]
nb = len(idxs_batch)
# allocate batches
if self.p_transform['channels']:
x_batch = np.zeros((nb,self.p_transform['channels'],) + self.p_transform['patch_size'], dtype='float32')
else:
x_batch = np.zeros((nb,) + self.p_transform['patch_size'], dtype='float32')
if self.p_transform['n_labels']>1:
y_batch = np.zeros((nb, self.p_transform['n_labels']), dtype='float32')
else:
y_batch = np.zeros((nb,), dtype='float32')
z_batch = np.zeros((nb,), dtype='float32')
batch_ids = []
for i, idx in enumerate(idxs_batch):
img_path = self.img_paths[idx]
batch_ids.append(img_path)
try:
img = app.read_image_from_path(img_path)
except Exception:
print 'cannot open ', img_id
x_batch[i] = self.data_prep_fun(x=img)
if img_path in self.labeled_img_paths:
z_batch[i] = 1.
img_id = app.get_id_from_path(img_path)
y_batch[i] = self.label_prep_fun(self.labels[img_id])
#print 'i', i, 'img_id', img_id, y_batch[i]
if self.full_batch:
if nb == self.batch_size:
yield x_batch, y_batch, z_batch, batch_ids
else:
yield x_batch, y_batch, z_batch, batch_ids
if not self.infinite:
break
class SlimDataGenerator(object):
def __init__(self, dataset, batch_size, img_ids, p_transform, data_prep_fun, label_prep_fun, rng,
random, infinite, full_batch, **kwargs):
self.dataset = dataset
self.img_ids = img_ids
self.nsamples = len(img_ids)
self.batch_size = batch_size
self.p_transform = p_transform
self.data_prep_fun = data_prep_fun
self.label_prep_fun = label_prep_fun
self.rng = rng
self.random = random
self.infinite = infinite
self.full_batch = full_batch
self.labels = app.get_labels_array()
def generate(self):
while True:
rand_idxs = np.arange(len(self.img_ids))
if self.random:
self.rng.shuffle(rand_idxs)
for pos in xrange(0, len(rand_idxs), self.batch_size):
idxs_batch = rand_idxs[pos:pos + self.batch_size]
nb = len(idxs_batch)
# allocate batches
x_batch = []
y_batch = []
batch_ids = []
for i, idx in enumerate(idxs_batch):
img_id = self.img_ids[idx]
batch_ids.append(img_id)
try:
img = app.read_compressed_image(self.dataset, img_id)
except Exception:
print 'cannot open ', img_id
x_batch.append(self.data_prep_fun(x=img))
if 'train' in self.dataset:
y_batch.append(self.label_prep_fun(self.labels[img_id]))
if self.full_batch:
if nb == self.batch_size:
yield x_batch, y_batch, batch_ids
else:
yield x_batch, y_batch, batch_ids
if not self.infinite:
break
def _test_data_generator():
#testing data iterator
p_transform = {'patch_size': (256, 256),
'channels': 4,
'n_labels': 17}
rng = np.random.RandomState(42)
def data_prep_fun(x):
x = np.array(x)
x = np.swapaxes(x,0,2)
return x
def label_prep_fun(labels):
return labels
folds = app.make_stratified_split(no_folds=5)
all_ids = folds[0] + folds[1] + folds[2] + folds[3] +folds[4]
bad_ids = [18772, 28173, 5023]
img_ids = [x for x in all_ids if x not in bad_ids]
dg = DataGenerator(dataset='train-jpg',
batch_size=10,
img_ids = img_ids,
p_transform=p_transform,
data_prep_fun = data_prep_fun,
label_prep_fun = label_prep_fun,
rng=rng,
full_batch=True, random=False, infinite=False)
for (x_chunk, y_chunk, id_train) in buffering.buffered_gen_threaded(dg.generate()):
print x_chunk.shape, y_chunk.shape, id_train
def _test_simple_data_generator():
#testing data iterator
p_transform = {'patch_size': (256, 256),
'channels': 4,
'n_labels': 1}
label_id = 4
rng = np.random.RandomState(42)
def data_prep_fun(x):
return x
def label_prep_fun(labels):
print labels
return labels[label_id]
folds = app.make_stratified_split(no_folds=5)
all_ids = folds[0] + folds[1] + folds[2] + folds[3] +folds[4]
bad_ids = []
img_ids = [x for x in all_ids if x not in bad_ids]
dg = SlimDataGenerator(dataset='train-jpg',
batch_size=10,
label_id = label_id,
img_ids = all_ids,
p_transform=p_transform,
data_prep_fun = data_prep_fun,
label_prep_fun = label_prep_fun,
rng=rng,
full_batch=True,
random=False,
infinite=False)
print 'start'
avgs = []
stds = []
ch_avgs = [[],[],[],[]]
ch_stds = [[],[],[],[]]
for (x_chunk, y_chunk, id_train) in dg.generate():
x_chunk = np.stack(x_chunk)
#x_chunk = x_chunk/255.
avgs.append(np.mean(x_chunk))
stds.append(np.std(x_chunk))
for ch in range(4):
ch_avgs[ch].append(np.mean(x_chunk[:,ch]))
ch_stds[ch].append(np.std(x_chunk[:,ch]))
print 'avgs', np.mean(np.stack(avgs))
print 'stds', np.mean(np.stack(stds))
for ch in range(4):
print 'ch', str(ch)
print 'mean of avgs', np.mean(ch_avgs[ch])
print 'mean of stds', np.mean(ch_stds[ch])
if __name__ == "__main__":
_test_simple_data_generator()
|
import os
import unittest
from tempfile import TemporaryDirectory
from thread_files.retriever import LinksRetriever, BatchDownloader
from tests.useful import *
from thread_files.utilities import IgnoreFilter
# todo: Speed up some of the tests...
class BatchDownloaderInstantiateTestCase(unittest.TestCase):
# def test_instantiate_with_non_iterable(self):
# with self.assertRaises(TypeError):
# downloader = BatchDownloader(123456)
def test_instantiate_with_link_retriever(self):
getter = LinksRetriever(TEST_THREAD_FILENAME)
downloader = BatchDownloader(getter)
self.assertEqual(downloader.links_retriever, getter)
def test_instantiate_with_destination_folder(self):
getter = LinksRetriever(TEST_THREAD_FILENAME)
destination_dir = os.path.expanduser('~/Downloads/')
downloader = BatchDownloader(getter, destination_dir)
self.assertEqual(downloader.destination_folder, destination_dir)
# def test_instantiate_with_many_links_retriever(self):
# getters = (LinksRetriever('test_thread.html'), LinksRetriever(requests.get(THREAD_URL)))
# destination_dir = os.path.expanduser('~/Downloads/')
# downloader = BatchDownloader(getters, destination_dir)
# self.assertEqual(downloader.retrievers, getters)
class BatchDownloaderTestCase(unittest.TestCase):
# pass
def setUp(self):
self.linkser = LinksRetriever(THREAD_URL)
self.download_dir = TemporaryDirectory(dir=TMP_DIRECTORY)
self.destination_directory = self.download_dir.name
self.downloader = BatchDownloader(self.linkser, self.destination_directory)
#self.thread_download_directory = os.path.join(self.destination_directory, BatchDownloader.THREAD_SAVE_NAME)
# def tearDown(self):
# # Delete all downloaded files
# utilities.delete_directory_tree(self.destination_directory)
def test_files_to_download(self):
pass
'''
In unit tests, downloaded shouldn't really download.
Instead, it should just present a list of things to be downloaded,
or the paths of the files that have been downloaded.
'''
def test_download_html(self):
self.downloader.save_html() # Downloads the HTML to the destination
self.assertTrue(os.path.exists(os.path.join(self.destination_directory, BatchDownloader.THREAD_SAVE_NAME)))
def test_pickle_details_save_exists(self):
self.downloader.pickle_details()
self.assertTrue(os.path.exists(os.path.join(self.destination_directory, BatchDownloader.THREAD_DETAILS_FILENAME)))
# @unittest.skipUnless(utilities.url_is_accessible(THREAD_URL), THREAD_GONE_REASON)
# class ThreadDownloaderDownloadOnceTestCase(unittest.TestCase):
@unittest.skipUnless(utilities.url_is_accessible(THREAD_URL), THREAD_GONE_REASON)
class BatchDownloaderDownloadingTestCase(unittest.TestCase):
GET_NUM_FILES = 5
def setUp(self):
self.linkser = LinksRetriever(THREAD_URL)
#self.destination_directory = os.path.expanduser('~/Downloads/TestDownloadThread/')
self.thread_dir = TemporaryDirectory(dir=TMP_DIRECTORY)
self.destination_directory = self.thread_dir.name
self.downloader = BatchDownloader(self.linkser, self.destination_directory)
self.download_files()
def download_files(self):
for url in self.linkser.get_all_file_links()[:self.GET_NUM_FILES]:
download_path = utilities.download_file(url, self.downloader.destination_folder)
assert os.path.exists(download_path)
def test_files_downloaded(self):
downloaded = self.downloader.get_files_downloaded()
self.assertEqual(len(downloaded), self.GET_NUM_FILES)
def test_files_not_downloaded_gen(self):
not_downloaded = self.downloader.links_not_downloaded()
self.assertIsNotNone(not_downloaded)
not_downloaded_tuple = tuple(not_downloaded)
self.assertEqual(len(not_downloaded_tuple), len(self.downloader.files_to_download) - len(self.downloader.get_files_downloaded()))
def test_compare_downloaded(self):
not_downloaded = tuple(self.downloader.get_links_not_downloaded())
self.assertEqual(len(not_downloaded), len(self.linkser.get_all_file_links()) - self.GET_NUM_FILES)
@unittest.skipUnless(utilities.url_is_accessible(THREAD_URL), THREAD_GONE_REASON)
class BatchDownloaderDetailsTestCase(unittest.TestCase):
def setUp(self):
self.linkser = LinksRetriever(THREAD_URL)
self.destination_directory = os.path.expanduser(TMP_DIRECTORY)
self.downloader = BatchDownloader(self.linkser, self.destination_directory)
self.downloader.pickle_details()
def test_construct_details_dict(self):
details = self.downloader.construct_details_dict()
keys = details.keys()
self.assertIn('last-modified', keys)
self.assertTrue(isinstance(details['last-modified'], str))
self.assertIn('url', keys)
self.assertTrue(isinstance(details['url'], str))
self.assertIn('thread_alive', keys)
self.assertTrue(isinstance(details['thread_alive'], bool))
def test_load_details(self):
down = BatchDownloader(self.linkser, TemporaryDirectory(dir=TMP_DIRECTORY).name)
details = down.construct_details_dict()
down.pickle_details()
loaded = BatchDownloader.load_details_into_dict(self.downloader.get_details_path())
self.assertTrue(isinstance(loaded, dict))
self.assertEqual(loaded, details)
def test_compare_details(self):
down = BatchDownloader(self.linkser, TemporaryDirectory(dir=TMP_DIRECTORY).name)
details = down.construct_details_dict()
down.pickle_details()
loaded = BatchDownloader.load_details_into_dict(self.downloader.get_details_path())
self.assertEqual(loaded['last-modified'], details['last-modified'])
self.assertEqual(loaded['url'], details['url'])
self.assertEqual(loaded['thread_alive'], details['thread_alive'])
def test_pickle_details_custom_details(self):
download_dir = TemporaryDirectory(dir=TMP_DIRECTORY)
custom_details = {'last-modified':'123456', 'thread_alive': False, 'url':THREAD_URL}
down = BatchDownloader(self.linkser, download_dir.name)
#details = down.construct_details_dict()
down.pickle_details(custom_details)
loaded = BatchDownloader.load_details_into_dict(down.get_details_path())
self.assertEqual(loaded['last-modified'], custom_details['last-modified'])
self.assertEqual(loaded['url'], custom_details['url'])
self.assertEqual(loaded['thread_alive'], custom_details['thread_alive'])
# LinksRetriever now raises error when it reaches a 404'd thraed
# def test_thread_404_but_has_details(self):
# fake_url = THREAD_URL + '404'
# self.linkser = LinksRetriever(fake_url)
# self.downloader = BatchDownloader(self.linkser, self.destination_directory)
# # update the details pickle
#
# self.assertTrue(self.linkser.thread_is_dead())
#
# details = BatchDownloader.load_details_into_dict(self.downloader.get_details_path())
# self.assertIsNotNone(details)
# self.assertTrue(details['thread_alive'], True) # Hasn't been updated yet...
# LinksRetriever now raises error when it reaches a 404'd thraed
# def test_thread_update_details_pickle_thread_dead(self):
# fake_url = THREAD_URL + '404'
# self.linkser = LinksRetriever(fake_url)
# self.downloader = BatchDownloader(self.linkser, self.destination_directory)
# details = BatchDownloader.load_details_into_dict(self.downloader.get_details_path())
# self.assertIsNotNone(details)
#
# details['thread_alive'] = False
# self.assertFalse(details['thread_alive'])
# self.downloader.pickle_details(details)
#
# loaded = BatchDownloader.load_details_into_dict(self.downloader.get_details_path())
# self.assertIsNotNone(loaded)
# self.assertEqual(loaded['thread_alive'], details['thread_alive'])
class ThreadDownloaderWithIgnoreFilteringTestCase(unittest.TestCase):
def setUp(self):
self.downloader = BatchDownloader(LinksRetriever(TEST_THREAD_FILENAME), TMP_DIRECTORY)
self.downloader.ifilter = IgnoreFilter(SOME_THREAD_FILE_URLS) # Just a normal one, without regular expressions
def test_get_links(self):
all_links = self.downloader.links_retriever.get_all_file_links()
downloaded = self.downloader.get_files_downloaded()
ignored = self.downloader.ifilter.filter_list
file_links = tuple(self.downloader.get_links()) # implied filtered=True (default)
self.assertEqual(len(file_links), len(all_links) - len(downloaded) - len(ignored))
def test_get_links_filtered_false(self):
all_links = self.downloader.links_retriever.get_all_file_links()
downloaded = self.downloader.get_files_downloaded()
ignored = self.downloader.ifilter.filter_list
file_links = self.downloader.get_links(filtered=False) # do not use ifilter.filter()
self.assertEqual(len(file_links), len(all_links) - len(downloaded))
class ThreadDownloaderInstantiateFromExistingFolder(unittest.TestCase):
def setUp(self):
self.links_retriever = None
self.tmpdir = TemporaryDirectory(dir=TMP_DIRECTORY)
# self.existing_directory = os.path.join(TMP_DIRECTORY, 'temp_download_dir')
self.existing_directory = self.tmpdir.name
self.num_files_to_download = 3
self.createTestEnvironment(self.existing_directory)
def createTestEnvironment(self, dirname):
create_test_environment(dirname, self.num_files_to_download)
def test_instantiate_from_existing_folder(self):
"""Downloader can instantiate self from an existing folder if that folder has a thread_details.pkl file that it can load,
and optionally, an ignore list text file, or a few already downloaded files. As long as the thread_details.pkl
file exists, it is ok to instantiate one from a directory."""
downloader = BatchDownloader.from_directory(self.existing_directory)
self.assertIsNotNone(downloader)
self.assertIsNotNone(downloader.links_retriever)
self.assertIsNotNone(downloader.ifilter)
def test_num_downloaded(self):
downloader = BatchDownloader.from_directory(self.existing_directory)
downloaded = downloader.get_files_downloaded()
self.assertEqual(len(downloaded), self.num_files_to_download)
def test_attempt_to_instantiate_on_directory_without_pickle_file(self):
tempdir = TemporaryDirectory(dir=TMP_DIRECTORY)
with self.assertRaises(FileNotFoundError):
downloader = BatchDownloader.from_directory(tempdir.name)
class DoNotDownloadIf404ResponseTestCase(unittest.TestCase):
def setUp(self):
BatchDownloader.DBG_DOWNLOAD = True
self.expired_thread_dir = TemporaryDirectory(dir=TMP_DIRECTORY)
self.alive_thread_dir = TemporaryDirectory(dir=TMP_DIRECTORY)
# create_test_environment(self.expired_thread_dir.name, 0, EXPIRED_THREAD_URL)
# LinksRetriever now raises error when it reaches a 404'd thread
# self.dead_downloader = BatchDownloader(LinksRetriever(EXPIRED_THREAD_URL), self.expired_thread_dir.name)
self.alive_downloader = BatchDownloader(LinksRetriever(STICKY_THREAD_URL), self.alive_thread_dir.name)
def test_reproduce_error(self):
"""Just reproducing error:
Error
Traceback (most recent call last):
File "/usr/lib/python3.5/unittest/case.py", line 58, in testPartExecutor
yield
File "/usr/lib/python3.5/unittest/case.py", line 600, in run
testMethod()
File "/home/marvin/PycharmProjects/thread_files/tests/test_batch_downloader.py", line 252, in test_crashes_program
self.downloader.construct_details_dict()
File "/home/marvin/PycharmProjects/thread_files/thread_files/retriever.py", line 96, in construct_details_dict
details_dict['last-modified'] = self.links_retriever.response.headers['last-modified']
File "/home/marvin/virtualenvs/lib/python3.5/site-packages/requests/structures.py", line 54, in __getitem__
return self._store[key.lower()][1]
KeyError: 'last-modified'
"""
# with self.assertRaises(KeyError):
# self.dead_downloader.construct_details_dict()
# Solved i think
pass
# LinksRetriever now raises error when it reaches a 404'd thread
# def test_dead_downloader_not_from_hard_drive(self):
# """Assert that the LinksRetriever object's from_hdd flag is false"""
# self.assertFalse(self.dead_downloader.links_retriever.from_hdd)
def test_should_download_on_alive_thread(self):
self.assertTrue(self.alive_downloader.should_download())
# LinksRetriever now raises error when it reaches a 404'd thread
# def test_should_download_on_expired404d_thread(self):
# self.assertFalse(self.dead_downloader.should_download())
class IgnoreFilteredLinksTestCase(unittest.TestCase):
def setUp(self):
self.thread_dir = TemporaryDirectory(dir=TMP_DIRECTORY)
self.downloader = BatchDownloader(LinksRetriever(STICKY_THREAD_URL), self.thread_dir.name)
self.ignore = [os.path.basename(url) for url in ('http://i.4cdn.org/wg/1489266876258.png', 'http://i.4cdn.org/wg/1489266748255.jpg')]
self.ifilter = IgnoreFilter(self.ignore, is_regex=True)
self.downloader.ifilter = self.ifilter
def test_num_links(self):
links = tuple(self.downloader.links())
self.assertGreater(len(links), 0)
self.assertEqual(len(links), len(self.downloader.get_links_not_downloaded()) - len(self.ignore))
print('Links tuple:', links) |
'''
All functions take as arguments
i) a p by l np.array of floats. They are the
coefficients of the BP as returned by the
integrator function of .make_solution
ii) the timestep h>0
iii) the sampling_rate >0. This is the spacing
between the points where the solutions are sampled
in order to plot the functions and perform the tests.
All functions return a tuple of np arrays. The first
is the time-series of sampling points and the rest
are the corresponding values of the (displacement, speed
and/or acceleration) of the solution at the points
of the time-series
'''
import numpy as np
import math
from .BernsteinPols import BP, dBP
def get_displacement(BP_coefs: np.array, h: float, sampling_rate = 0.001):
p = BP_coefs.shape[0]
l = BP_coefs.shape[1]
sample_one_step = np.arange(0, h, sampling_rate)
sample = np.arange(0,l * h, sampling_rate)
BP_sample = np.zeros((p, sample_one_step.shape[0]))
for i in range(p):
BP_sample[i,:] = [BP(t,i+1,p,h) for t in sample_one_step]
approx_disp = np.zeros((sample_one_step.shape[0],l))
for j in range(l):
approx_disp[:,j] = np.dot(BP_coefs[:,j],BP_sample)
approx_disp = approx_disp.flatten("F")
approx_disp = approx_disp[:sample.shape[0]]
print(approx_disp.shape)
return sample, approx_disp
def get_speed(BP_coefs: np.array, h: float, sampling_rate = 0.001):
p = BP_coefs.shape[0]
l = BP_coefs.shape[1]
sample_one_step = np.arange(0, h, sampling_rate)
sample = np.arange(0,l * h, sampling_rate)
dBP_sample = np.zeros((p, sample_one_step.shape[0]))
for i in range(p):
dBP_sample[i,:] = [dBP(t,i+1,p,h) for t in sample_one_step]
approx_speed = np.zeros((sample_one_step.shape[0],l))
for j in range(l):
approx_speed[:,j] = np.dot(BP_coefs[:,j],dBP_sample)
approx_speed = approx_speed.flatten("F")
approx_speed = approx_speed[:sample.shape[0]]
return sample, approx_speed
def get_acceleration(BP_coefs: np.array, c, k, f, h: float, sampling_rate = 0.001):
# p = BP_coefs.shape[0]
l = BP_coefs.shape[1]
sample = np.arange(0,l * h, sampling_rate)
f_sample = [f(t) for t in sample]
_, approx_disp = get_displacement(BP_coefs, h)
_, approx_speed = get_speed(BP_coefs, h)
return sample, f_sample - c * approx_speed - k * approx_disp
def get_solution(BP_coefs: np.array, c, k, f, h: float, sampling_rate = 0.001):
p = BP_coefs.shape[0]
l = BP_coefs.shape[1]
sample_one_step = np.arange(0, h, sampling_rate)
sample = np.arange(0,l * h, sampling_rate)
BP_sample = np.zeros((p, sample_one_step.shape[0]))
dBP_sample = np.zeros((p, sample_one_step.shape[0]))
f_sample = [f(t) for t in sample]
for i in range(p):
BP_sample[i,:] = [ BP(t,i+1,p,h) for t in sample_one_step]
dBP_sample[i,:] = [dBP(t,i+1,p,h) for t in sample_one_step]
approx_disp = np.zeros((sample_one_step.shape[0],l))
approx_speed = np.zeros((sample_one_step.shape[0],l))
for j in range(l):
approx_disp[:,j] = np.dot(BP_coefs[:,j],BP_sample)
approx_speed[:,j] = np.dot(BP_coefs[:,j],dBP_sample)
approx_disp = approx_disp.flatten("F")
approx_disp = approx_disp[:sample.shape[0]]
approx_speed = approx_speed.flatten("F")
approx_speed = approx_speed[:sample.shape[0]]
return sample, approx_disp, approx_speed, f_sample - c * approx_speed - k * approx_disp
|
import numpy as np
from tensorpack.dataflow import *
class ILSVRC12Files(RNGDataFlow):
"""
Same as :class:`ILSVRC12`, but produces filenames of the images instead of nparrays.
This could be useful when ``cv2.imread`` is a bottleneck and you want to
decode it in smarter ways (e.g. in parallel).
"""
def __init__(self, dir, name, meta_dir=None,
shuffle=None, dir_structure=None):
"""
Same as in :class:`ILSVRC12`.
"""
assert name in ['train', 'test', 'val'], name
assert os.path.isdir(dir), dir
self.full_dir = os.path.join(dir, name)
self.name = name
assert os.path.isdir(self.full_dir), self.full_dir
assert meta_dir is None or os.path.isdir(meta_dir), meta_dir
if shuffle is None:
shuffle = name == 'train'
self.shuffle = shuffle
if name == 'train':
dir_structure = 'train'
if dir_structure is None:
dir_structure = ILSVRCMeta.guess_dir_structure(self.full_dir)
meta = ILSVRCMeta(meta_dir)
self.imglist = meta.get_image_list(name, dir_structure)
for fname, _ in self.imglist[:10]:
fname = os.path.join(self.full_dir, fname)
assert os.path.isfile(fname), fname
def size(self):
return len(self.imglist)
def get_data(self):
idxs = np.arange(len(self.imglist))
if self.shuffle:
self.rng.shuffle(idxs)
for k in idxs:
fname, label = self.imglist[k]
fname = os.path.join(self.full_dir, fname)
yield [fname, label]
|
import discord
from discord.ext import commands
from discord.ext.commands import Cog, command
from discord import Embed, Member
from lib.notes import fwrite_note, db_read_note, flist_notes, fdel_note, updateable_check, fupdate_note, edit_note_check
# from lib.db.db import fwrite_note, db_read_note, flist_notes, fdel_note, updateable_check, fupdate_note
from datetime import datetime
class Notes(Cog):
def __init__(self, client):
self.client = client
### EVENTS ###
@Cog.listener()
async def on_ready(self):
print('Notes cog for bot is online.')
### COMMANDS ###
@commands.group(invoke_without_command=True, aliases=['notes'])
async def note(self, ctx):
await ctx.send("""Write and read notes, show a list of all notes, and edit or delete notes. Available commands:
`.note write <note_title> <note contents>`\n`.note read <note_title>`\n`.note list`\n`.note edit <note_title>`\n`.note delete <note_title>`""")
# TODO: Make this nicer
@note.command(name='help', aliases=['info'])
async def help_note(self, ctx):
await ctx.send("""Write and read notes, show a list of all notes, and edit or delete notes. Available commands:
`.note write <note_title> <note contents>`\n`.note read <note_title>`\n`.note list`\n`.note edit <note_title>`\n`.note delete <note_title>`""")
# TODO: Make this nicer
# # I'm sure there's a better name for this, but whatever. It works for now.
# @note.command(aliases=['uneditable', 'unchangeable', 'static', 'makestatic', 'locked', 'writelocked'])
# async def write_uneditable(self, ctx, note_title, *, args=None):
# if args is None:
# await ctx.send("Error: you can't write an empty note!")
# else:
# result = fwrite_note(ctx.author, ctx.author.display_name, ctx.author.id, ctx.author.guild.id, note_title, args)
# if result == 'Repeat title':
# await ctx.send("There is already a note with that name.")
# elif result is None:
# await ctx.send("Oops, something went wrong.")
# else:
# await ctx.send(f'''Note titled "{result}" has been saved.''')
@note.command(aliases=['make', 'new'])
async def write(self, ctx, note_title, *, args=None):
if args is None:
await ctx.send("Error: you can't write an empty note!")
else:
result = fwrite_note(ctx.author, ctx.author.display_name, ctx.author.id, ctx.author.guild.id, note_title, args, True)
if result == 'Repeat title':
await ctx.send("There is already a note with that name.")
elif result is None:
await ctx.send("Oops, something went wrong.")
else:
await ctx.send(f'''Note titled "{result}" has been saved.''')
@note.command(aliases=['see', 'view', 'select'])
async def read(self, ctx, note_title):
result = db_read_note(note_title, ctx.author.guild.id, True)
if result is None:
await ctx.send("That note doesn't exist.")
else:
if result[4] == 'False':
descr = f"*Created by {result[1]} on {result[2][:-10]} at {result[2][11:16]}*\n\n\n{result[0]}\n\u200b"
elif result[4] == 'True':
if result[3] == result[7]:
descr = f"""*Created by {result[1]} on {result[2][:-10]}*\n
--- *Edited on {result[8][:10]}* ---
\n\n{result[0]}\n\u200b"""
elif result[3] != result[7]:
editor_nick = ctx.author.guild.get_member(result[7]).display_name
descr = f"""*Created by {result[1]} on {result[2][:-10]}*\n
--- *Edited by {editor_nick} ({result[5][:-5]}) on {result[8][:10]}* ---
\n\n{result[0]}\n\u200b"""
embed = Embed(title=note_title,
description=descr,
colour=discord.Colour.random(),
timestamp=datetime.utcnow())
embed.set_footer(text=f"Requested by: {ctx.author.display_name}", icon_url=ctx.author.avatar_url)
embed.set_thumbnail(url=ctx.author.guild.get_member(result[3]).avatar_url)
# embed.add_field(name="\u200b", value=f"Length: {len(result[0])} characters", inline=False)
await ctx.send(embed=embed)
@note.command(name="list", aliases=['all'])
async def list_notes(self, ctx):
result = flist_notes(ctx.author.guild.id, True, 5)
if result is None:
await ctx.send("There are no notes to view. Why not try writing one of your own? Use the command `.note write (noteTitle) (note contents)` to write a note.")
else:
embed = Embed(title="Here are the notes available to read:",
description='Type `.note read (note title)` without brackets to read that note.',
colour=discord.Colour(0x9f6231),
timestamp=datetime.utcnow())
embed.set_footer(text=f"Requested by: {ctx.author.display_name}", icon_url=ctx.author.avatar_url)
embed.set_image(url='https://cdn.discordapp.com/attachments/823615456605896754/834291407807709224/pen_and_ink.jpg')
fields = result
for name, value, inline in fields:
embed.add_field(name=name, value=value, inline=inline)
await ctx.send(embed=embed)
# TODO:
@note.command(name='delete', aliases=['remove', 'del'])
async def delete_note(self, ctx, *, note_title):
await ctx.send(f'''
This command will **permanently delete** "{note_title}" from the database. The contents cannot be recovered once this action has been completed.\n
Please confirm by typing "yes". Type anything else to cancel.
''')
def check(msg):
return msg.author == ctx.author and msg.channel == ctx.channel
msg = await self.client.wait_for("message", check=check)
confirmation = msg.content
result = fdel_note(note_title, confirmation, ctx.author.guild.id)
await ctx.send(result)
@note.command(name='edit', aliases=['update', 'change', 'modify'])
async def edit_note(self, ctx, note_title):
### Plain text version ###
# check_note = edit_note_check(note_title, ctx.author.guild.id, ctx.author.id, False, False)
# if check_note[0] == 'Problem':
# await ctx.send(check_note[1])
# else:
# await ctx.send(check_note)
check_note = edit_note_check(note_title, ctx.author.guild.id, ctx.author.id)
if check_note[0] == 'Problem':
await ctx.send(check_note[1])
else:
embed = Embed(title=check_note[0],
description=check_note[1],
colour=discord.Colour.random(),
timestamp=datetime.utcnow())
embed.set_footer(text='\u200b', icon_url=ctx.author.avatar_url)
embed.set_thumbnail(url=ctx.author.guild.get_member(check_note[2]).avatar_url)
fields = check_note[3]
for name, value, inline in fields:
embed.add_field(name=name, value=value, inline=inline)
await ctx.send(embed=embed)
def check(msg):
return msg.author == ctx.author and msg.channel == ctx.channel
msg = await self.client.wait_for("message", check=check)
note_content = msg.content
# if note_content.lower() in ['no', 'cancel', 'n'] or note_content[0] == '.': # This might work too? And be easier to read.
if note_content.lower() == 'no' or note_content.lower() == 'cancel' or note_content.lower() == 'n' or note_content[0] == '.':
await ctx.send("Edit cancelled, note is unchanged.")
else:
result = fupdate_note(ctx.author.guild.id, ctx.author.id, ctx.author, ctx.author.display_name, note_title, note_content, True)
await ctx.send(result)
# @note.command(names='rename')
# async def rename_note(self, ctx, old_title, new_title):
# pass
def setup(client):
client.add_cog(Notes(client)) |
import os
import re
import sys
from setuptools import Extension
from setuptools import setup
if sys.platform == 'win32':
versions = [
var for var in os.environ
if var.startswith('VS') and var.endswith('COMNTOOLS')
]
vs = sorted(versions, key=lambda s: int(re.search(r'\d+', s).group()))[-1]
os.environ['VS90COMNTOOLS'] = os.environ[vs] # py2
os.environ['VS100COMNTOOLS'] = os.environ[vs] # py3
setup(
name='pyterminalsize',
description='Determines terminal size in a cross-platform way.',
url='https://github.com/asottile/pyterminalsize',
version='0.1.0',
author='Anthony Sottile',
author_email='asottile@umich.edu',
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
py_modules=['pyterminalsize'],
ext_modules=[Extension('_pyterminalsize', ['_pyterminalsize.c'])],
install_requires=[],
)
|
"""
To run the experiment:
$ python examples/demo_experiment/run.py examples/demo_experiment/params_experiment.yaml
To see more options:
$ python examples/demo_experiment/run.py
"""
from rlberry.experiment import experiment_generator
from rlberry.stats.multiple_stats import MultipleStats
mstats = MultipleStats()
for agent_stats in experiment_generator():
mstats.append(agent_stats)
# Alternatively:
# agent_stats.fit()
# agent_stats.save_results()
# agent_stats.save()
mstats.run()
mstats.save()
# Reading the results
del mstats
from rlberry.experiment import load_experiment_results
data = load_experiment_results('results', 'params_experiment')
print(data)
|
# -*- coding: utf-8 -*-
"""
File Name๏ผ hasGroupsSizeX
Author : jing
Date๏ผ 2020/3/27
https://leetcode-cn.com/problems/x-of-a-kind-in-a-deck-of-cards/
ๅก็ๅ็ป
"""
class Solution:
def hasGroupsSizeX(self, deck) -> bool:
if deck is None or len(deck) == 0:
return False
import collections
len_deck = len(deck)
count = collections.Counter(deck)
for X in range(2, len_deck+1):
if len_deck % X == 0:
if all(v % X == 0 for v in count.values()):
return True
return False
|
"""
Initialize the TCKDB backend module
"""
|
from flask import Flask, render_template
# app = Flask(__name__)
# app.config.from_object('config.default.Config')
@app.route('/')
def index():
return render_template('index.html')
@app.route('/depgraph')
def depgraph():
return "Unimplemented"
@app.route('/conll')
def conll():
return "Unimplemented"
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Sniff every types of files you want on your network interface and save it.
#
# 2015/10/10
# by Oros
#
# Setup :
# $ sudo apt-get install python-scapy dvi2ps
# http://secdev.org/projects/scapy/doc/usage.html
#
# Help :
# $ files_sniffer.py -h
#
# Default usage :
# Sniff every jpeg, png and git files and save it in /tmp/sniffer/
# $ sudo files_sniffer.py
#
# Example :
# $ sudo python files_sniffer.py -i eth0 -o /dev/shm/sniffer -c "text/html; charset=iso-8859-1,image/vnd.microsoft.icon,image/jpeg,image/png,image/gif" --min-size 100 --max-size 1000000
# Sniff on eth0 and save file in /dev/shm/sniffer, if :
# content-type is in :
# text/html; charset=iso-8859-1
# image/vnd.microsoft.icon
# image/jpeg
# image/png
# image/gif
# and Content-Length > 100 octets
# and Content-Length < 1 000 000 octets
#
# List of content-type : https://www.iana.org/assignments/media-types/media-types.xhtml
import time
import os
import optparse
try:
from scapy.all import sniff
from scapy.all import TCP
from scapy.all import Raw
from scapy.all import IP
except ImportError:
import sys
sys.exit("\033[31mYou need to setup python-scapy\033[0m\nsudo apt-get install python-scapy")
output_directory="/tmp/sniffer/"
min_size=0
max_size=5000000
content_type=["image/jpeg","image/png","image/gif"]
verbose=True
time_out=30
purge_time=10
def purge():
global last_purge
global headers
global packets
last_purge=time.time()
to_del=[]
for x in packets:
if packets[x]['up_time']+time_out < time.time():
to_del.append(x)
for x in to_del:
del packets[x]
to_del=[]
for x in headers:
if headers[x]['up_time']+time_out < time.time():
to_del.append(x)
for x in to_del:
del headers[x]
del to_del
def find_files(x):
global headers
global packets
if TCP in x:
src= x.sprintf("%IP.src%")
dst= x.sprintf("%IP.dst%")
sport= x.sprintf("%TCP.sport%")
dport= x.sprintf("%TCP.dport%")
seq= x.sprintf("%TCP.seq%")
chksum= x.sprintf("%TCP.chksum%")
ack= x.sprintf("%TCP.ack%")
flags= x.sprintf("%TCP.flags%")
packet_id=src+"#"+dst+"#"+sport+"#"+ack
if "Raw" in x[TCP]:
if packet_id not in packets:
r=x.sprintf("%Raw.load%").split('\\r\\n')
if len(r) >1:
if r[1][:6] == "Host: ":
# query
file_name=r[1][6:]
if r[0][:4] == "'GET":
file_name+=r[0][5:].split(' HTTP')[0]
# Not perfect
file_name=file_name.replace('/', '_').replace('.', '_').replace(':', '_').replace('?', '_').replace('<', '_').replace('>', '_').replace('&', '_')
headers[packet_id]={"seq":[seq],'name':file_name, 'up_time':time.time()}
if dst+"#"+src+"#"+dport+"#"+seq in headers:
raw=bytes(x.getlayer(Raw))
head=raw[:raw.find('\r\n\r\n')].split("\r\n")
data=raw[raw.find('\r\n\r\n')+4:]
content_length=0
is_ok=False
for d in head:
if d[:14] == "Content-Type: ":
if not d[14:] in content_type:
is_ok=False
if packet_id in headers:
del headers[packet_id]
if dst+"#"+src+"#"+dport+"#"+seq in headers:
del headers[dst+"#"+src+"#"+dport+"#"+seq]
break
else:
is_ok=True
elif len(d)>16 and d[:16] == "Content-Length: ":
content_length=int(d[16:])
if content_length > max_size or content_length < min_size:
is_ok=False
if packet_id in headers:
del headers[packet_id]
if dst+"#"+src+"#"+dport+"#"+seq in headers:
del headers[dst+"#"+src+"#"+dport+"#"+seq]
break
if is_ok:
packets[packet_id]={ 'seq':[seq],
'head':head,
'headers_key':dst+"#"+src+"#"+dport+"#"+seq,
'data':{int(seq):data},
'data_content_length':len(data),
'content_length':content_length,
'up_time':time.time()
}
if packet_id in packets:
if seq not in packets[packet_id]['seq']:
packets[packet_id]['seq'].append(seq)
packets[packet_id]['data'][int(seq)]=bytes(x.getlayer(Raw))
packets[packet_id]['data_content_length']+=len(bytes(x.getlayer(Raw)))
packets[packet_id]['up_time']=time.time()
headers[packets[packet_id]['headers_key']]['up_time']=time.time()
if packets[packet_id]['data_content_length'] == packets[packet_id]['content_length']:
if packet_id in packets:
headers_key=packets[packet_id]['headers_key']
if headers_key in headers:
if headers[headers_key]['name'] != '':
if len(headers[headers_key]['name']) > 255:
headers[headers_key]['name']=headers[headers_key]['name'][:126]+headers[headers_key]['name'][-126:]
if verbose:
print(output_directory+headers[headers_key]['name'])
f=open(output_directory+headers[headers_key]['name'], 'wb')
for data_seq in sorted(packets[packet_id]['data']):
f.write(packets[packet_id]['data'][data_seq])
f.close()
del packets[packet_id]
del headers[headers_key]
if headers_key in packets:
del packets[headers_key]
if packet_id in headers:
del headers[packet_id]
else:
del packets[packet_id]
elif packets[packet_id]['data_content_length'] > packets[packet_id]['content_length']:
if packet_id in packets:
del packets[packet_id]
if headers_key in headers:
del headers[headers_key]
if headers_key in packets:
del packets[headers_key]
if packet_id in headers:
del headers[packet_id]
if last_purge + purge_time < time.time():
purge()
parser = optparse.OptionParser(usage="%prog: [options]")
parser.add_option("-c", "--ctype", dest="content_type", default=','.join(content_type), help="""Content-type separate by ','.
List of content-type : https://www.iana.org/assignments/media-types/media-types.xhtml
Default : """+','.join(content_type))
parser.add_option("-i", "--iface", dest="iface", default='', help="Interface")
parser.add_option("", "--min-size", dest="min_size", default=str(min_size), help="Min file size in octets. Default : "+str(min_size))
parser.add_option("", "--max-size", dest="max_size", default=str(max_size), help="Max file size in octets. Default : "+str(max_size))
parser.add_option("-o", "--output", dest="directory", default=output_directory, help="Output directory. Default : "+output_directory)
parser.add_option("-q", "--quiet", action="store_false", dest="verbose", default=True, help="Quiet. Default : Off")
(options, args) = parser.parse_args()
if options.directory != "":
output_directory=options.directory
if output_directory[-1:] != "/":
output_directory+="/"
if not os.path.exists(output_directory):
os.makedirs(output_directory)
if options.content_type != "":
content_type=options.content_type.split(',')
if options.min_size != "":
min_size=int(options.min_size)
if options.max_size != "":
max_size=int(options.max_size)
verbose=options.verbose
packets={}
headers={}
last_purge=time.time()
if options.iface != "":
sniff(store=0, prn=find_files, iface=options.iface)
else:
sniff(store=0, prn=find_files)
|
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cmath
import random
import numpy as np
import pytest
import cirq
from cirq import value
from cirq.optimizers.two_qubit_decompositions import (
_parity_interaction, _is_trivial_angle
)
@pytest.mark.parametrize('rad,expected', (lambda err, largeErr: [
(np.pi/4, True),
(np.pi/4 + err, True),
(np.pi/4 + largeErr, False),
(np.pi/4 - err, True),
(np.pi/4 - largeErr, False),
(-np.pi/4, True),
(-np.pi/4 + err, True),
(-np.pi/4 + largeErr, False),
(-np.pi/4 - err, True),
(-np.pi/4 - largeErr, False),
(0, True),
(err, True),
(largeErr, False),
(-err, True),
(-largeErr, False),
(np.pi/8, False),
(-np.pi/8, False),
])(1e-8*2/3, 1e-8*4/3))
def test_is_trivial_angle(rad, expected):
tolerance = 1e-8
out = _is_trivial_angle(rad, tolerance)
assert out == expected, 'rad = {}'.format(rad)
def _operations_to_matrix(operations, qubits):
return cirq.Circuit(operations).unitary(
qubit_order=cirq.QubitOrder.explicit(qubits),
qubits_that_should_be_present=qubits)
def _random_single_partial_cz_effect():
return cirq.dot(
cirq.kron(cirq.testing.random_unitary(2),
cirq.testing.random_unitary(2)),
np.diag([1, 1, 1, cmath.exp(2j * random.random() * np.pi)]),
cirq.kron(cirq.testing.random_unitary(2),
cirq.testing.random_unitary(2)))
def _random_double_partial_cz_effect():
return cirq.dot(
cirq.kron(cirq.testing.random_unitary(2),
cirq.testing.random_unitary(2)),
np.diag([1, 1, 1, cmath.exp(2j * random.random() * np.pi)]),
cirq.kron(cirq.testing.random_unitary(2),
cirq.testing.random_unitary(2)),
np.diag([1, 1, 1, cmath.exp(2j * random.random() * np.pi)]),
cirq.kron(cirq.testing.random_unitary(2),
cirq.testing.random_unitary(2)))
def _random_double_full_cz_effect():
return cirq.dot(
cirq.kron(cirq.testing.random_unitary(2),
cirq.testing.random_unitary(2)),
cirq.unitary(cirq.CZ),
cirq.kron(cirq.testing.random_unitary(2),
cirq.testing.random_unitary(2)),
cirq.unitary(cirq.CZ),
cirq.kron(cirq.testing.random_unitary(2),
cirq.testing.random_unitary(2)))
def assert_cz_depth_below(operations, threshold, must_be_full):
total_cz = 0
for op in operations:
assert len(op.qubits) <= 2
if len(op.qubits) == 2:
assert cirq.op_gate_of_type(op, cirq.CZPowGate)
e = value.canonicalize_half_turns(op.gate.exponent)
if must_be_full:
assert e == 1
total_cz += abs(e)
assert total_cz <= threshold
def assert_ops_implement_unitary(q0, q1, operations, intended_effect,
atol=0.01):
actual_effect = _operations_to_matrix(operations, (q0, q1))
assert cirq.allclose_up_to_global_phase(actual_effect, intended_effect,
atol=atol)
@pytest.mark.parametrize('max_partial_cz_depth,max_full_cz_depth,effect', [
(0, 0, np.eye(4)),
(0, 0, np.array([
[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
[1, 0, 0, 0j],
])),
(0, 0, cirq.unitary(cirq.CZ**0.00000001)),
(0.5, 2, cirq.unitary(cirq.CZ**0.5)),
(1, 1, cirq.unitary(cirq.CZ)),
(1, 1, cirq.unitary(cirq.CNOT)),
(1, 1, np.array([
[1, 0, 0, 1j],
[0, 1, 1j, 0],
[0, 1j, 1, 0],
[1j, 0, 0, 1],
]) * np.sqrt(0.5)),
(1, 1, np.array([
[1, 0, 0, -1j],
[0, 1, -1j, 0],
[0, -1j, 1, 0],
[-1j, 0, 0, 1],
]) * np.sqrt(0.5)),
(1, 1, np.array([
[1, 0, 0, 1j],
[0, 1, -1j, 0],
[0, -1j, 1, 0],
[1j, 0, 0, 1],
]) * np.sqrt(0.5)),
(1.5, 3, cirq.map_eigenvalues(cirq.unitary(cirq.SWAP),
lambda e: e**0.5)),
(2, 2, cirq.unitary(cirq.SWAP).dot(cirq.unitary(cirq.CZ))),
(3, 3, cirq.unitary(cirq.SWAP)),
(3, 3, np.array([
[0, 0, 0, 1],
[0, 1, 0, 0],
[0, 0, 1, 0],
[1, 0, 0, 0j],
])),
] + [
(1, 2, _random_single_partial_cz_effect()) for _ in range(10)
] + [
(2, 2, _random_double_full_cz_effect()) for _ in range(10)
] + [
(2, 3, _random_double_partial_cz_effect()) for _ in range(10)
] + [
(3, 3, cirq.testing.random_unitary(4)) for _ in range(10)
])
def test_two_to_ops_equivalent_and_bounded_for_known_and_random(
max_partial_cz_depth,
max_full_cz_depth,
effect):
q0 = cirq.NamedQubit('q0')
q1 = cirq.NamedQubit('q1')
operations_with_partial = cirq.two_qubit_matrix_to_operations(
q0, q1, effect, True)
operations_with_full = cirq.two_qubit_matrix_to_operations(
q0, q1, effect, False)
assert_ops_implement_unitary(q0, q1, operations_with_partial, effect)
assert_ops_implement_unitary(q0, q1, operations_with_full, effect)
assert_cz_depth_below(operations_with_partial, max_partial_cz_depth, False)
assert_cz_depth_below(operations_with_full, max_full_cz_depth, True)
def test_trivial_parity_interaction_corner_case():
q0 = cirq.NamedQubit('q0')
q1 = cirq.NamedQubit('q1')
nearPi4 = np.pi/4 * 0.99
tolerance = 1e-2
circuit = cirq.Circuit(_parity_interaction(q0, q1, -nearPi4, tolerance))
assert len(circuit) == 2
def test_kak_decomposition_depth_full_cz():
a, b = cirq.LineQubit.range(2)
# Random.
u = cirq.testing.random_unitary(4)
operations_with_full = cirq.two_qubit_matrix_to_operations(a, b, u, False)
c = cirq.Circuit(operations_with_full)
# 3 CZ, 3+1 PhasedX, 1 Z
assert len(c) <= 8
# Double-axis interaction.
u = cirq.unitary(cirq.Circuit(cirq.CNOT(a, b), cirq.CNOT(b, a)))
operations_with_part = cirq.two_qubit_matrix_to_operations(a, b, u, False)
c = cirq.Circuit(operations_with_part)
# 2 CZ, 2+1 PhasedX, 1 Z
assert len(c) <= 6
# Test unoptimized/un-cleaned length of Double-axis interaction.
u = cirq.unitary(cirq.Circuit(cirq.CNOT(a, b), cirq.CNOT(b, a)))
operations_with_part = cirq.two_qubit_matrix_to_operations(a, b, u, False,
1e-8, False)
c = cirq.Circuit(operations_with_part)
assert len(c) > 6 # Length should be 13 with extra Pauli gates
# Partial single-axis interaction.
u = cirq.unitary(cirq.CNOT**0.1)
operations_with_part = cirq.two_qubit_matrix_to_operations(a, b, u, False)
c = cirq.Circuit(operations_with_part)
# 2 CZ, 2+1 PhasedX, 1 Z
assert len(c) <= 6
# Full single-axis interaction.
u = cirq.unitary(cirq.ControlledGate(cirq.Y))
operations_with_part = cirq.two_qubit_matrix_to_operations(a, b, u, False)
c = cirq.Circuit(operations_with_part)
# 1 CZ, 1+1 PhasedX, 1 Z
assert len(c) <= 4
def test_kak_decomposition_depth_partial_cz():
a, b = cirq.LineQubit.range(2)
# Random.
u = cirq.testing.random_unitary(4)
operations_with_full = cirq.two_qubit_matrix_to_operations(a, b, u, True)
c = cirq.Circuit(operations_with_full)
# 3 CP, 3+1 PhasedX, 1 Z
assert len(c) <= 8
# Double-axis interaction.
u = cirq.unitary(cirq.Circuit(cirq.CNOT(a, b), cirq.CNOT(b, a)))
operations_with_part = cirq.two_qubit_matrix_to_operations(a, b, u, True)
c = cirq.Circuit(operations_with_part)
# 2 CP, 2+1 PhasedX, 1 Z
assert len(c) <= 6
# Partial single-axis interaction.
u = cirq.unitary(cirq.CNOT**0.1)
operations_with_part = cirq.two_qubit_matrix_to_operations(a, b, u, True)
c = cirq.Circuit(operations_with_part)
# 1 CP, 1+1 PhasedX, 1 Z
assert len(c) <= 4
# Full single-axis interaction.
u = cirq.unitary(cirq.ControlledGate(cirq.Y))
operations_with_part = cirq.two_qubit_matrix_to_operations(a, b, u, True)
c = cirq.Circuit(operations_with_part)
# 1 CP, 1+1 PhasedX, 1 Z
assert len(c) <= 4
|
"""
Posterior sampling for 2D-Gaussian Mixture Model using Mean-Field VI
"""
import numpy as np
import scipy.stats as st
import matplotlib.pyplot as plt
K = 2
# Generate data
X1 = np.random.multivariate_normal([-5, -5], np.diag([2, 0.5]), size=20)
X2 = np.random.multivariate_normal([18, 18], np.diag([0.7, 1]), size=20)
X = np.vstack([X1, X2])
N = X.shape[0]
# GMM params initialization
mu = np.array([[1, 1], [2, 2]], dtype=float)
var = np.array([1, 1], dtype=float)
phi = np.zeros([N, K]) + 1/K
c = np.random.randint(K, size=N) # Assignments
# Priors
var0 = 1
for it in range(5):
# Update variational param phi, the assignment probs
for k in range(K):
phi[:, k] = np.exp(X @ mu[k] - (2*var[k] + mu[k].T @ mu[k])/2)
# Normalize
phi /= np.sum(phi, axis=1)[:, np.newaxis]
# Update assignments
c = np.argmax(phi, axis=1)
# Update variational param mu and var, the params of Gaussian component
for k in range(K):
sum_phi = np.sum(phi[:, k])
mu[k] = phi[:, k] @ X / (1/var0 + sum_phi)
var[k] = 1 / (1/var0 + sum_phi)
# Expected output:
# ----------------
# 20 data in cluster-0, mean: [ -5 -5 ]
# 20 data in cluster-1, mean: [ 18 18 ]
for k in range(K):
n = np.sum(c == k)
print('{} data in cluster-{}, mean: {}'.format(n, k, mu[k]))
|
import cv2
import numpy as np
from utils.generate_priors import generate_priors
def main(image_size=320):
anchors = generate_priors()
print(anchors)
for anchor in anchors:
ymin, xmin, ymax, xmax = anchor
left = int(max(xmin, 0) * image_size)
top = int(max(ymin, 0) * image_size)
right = int(min(xmax, 1.0) * image_size)
bottom = int(min(ymax, 1.0) * image_size)
img = np.zeros((image_size, image_size, 3), dtype=np.uint8)
cv2.rectangle(img, (left, top), (right, bottom), (0, 0, 255), -1)
cv2.imshow('', img)
cv2.waitKey(1)
if __name__ == '__main__':
main()
|
# Generated by Django 2.1.2 on 2019-05-06 19:44
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0004_auto_20190502_1658'),
]
operations = [
migrations.RenameField(
model_name='paciente',
old_name='pertimetroAbdominal',
new_name='perimetroAbdominal',
),
]
|
import datetime as dt
from functools import cached_property
from itertools import product
from typing import Dict, List, Sequence, Union
import pandas as pd
from dateutil.relativedelta import relativedelta
from singleton_decorator import singleton
from . import date_utils
from .config import get_db_interface
from .database_interface import DBInterface
from .factor import CompactFactor, CompactRecordFactor, IndustryFactor, OnTheRecordFactor
from .utils import StockSelectionPolicy, TickerSelector
@singleton
class FundInfo(object):
def __init__(self, db_interface: DBInterface = None):
super().__init__()
if db_interface is None:
db_interface = get_db_interface()
self.data = db_interface.read_table('ๅบ้ๅ่กจ')
class TickersBase(object):
"""่ฏๅธไปฃ็ ๅบ็ฑป"""
def __init__(self, db_interface: DBInterface = None) -> None:
self.db_interface = db_interface if db_interface else get_db_interface()
self.cache = None
def all_ticker(self) -> List[str]:
""" return ALL ticker for the asset class"""
return sorted(self.cache.ID.unique().tolist())
@date_utils.dtlize_input_dates
def ticker(self, date: date_utils.DateType = None) -> List[str]:
""" return tickers that are alive on `date`, `date` default to today"""
if date is None:
date = dt.datetime.today()
stock_ticker_df = self.cache.loc[self.cache.DateTime <= date]
tmp = stock_ticker_df.groupby('ID').tail(1)
return sorted(tmp.loc[tmp['ไธๅธ็ถๆ'] == 1, 'ID'].tolist())
def list_date(self) -> Dict[str, dt.datetime]:
""" return the list date of all tickers"""
first_list_info = self.cache.groupby('ID').head(1)
return dict(zip(first_list_info.ID, first_list_info.DateTime))
def get_list_date(self, tickers: Union[str, Sequence[str]]) -> Union[pd.Series, dt.datetime]:
""" return the list date of a ticker"""
if isinstance(tickers, str):
tickers = [tickers]
info = self.cache.loc[self.cache.ID.isin(tickers) & self.cache['ไธๅธ็ถๆ'] == 1, :].set_index('ID')
ret = info.DateTime.iloc[0] if info.shape[0] == 1 else info.DateTime
return ret
def new_ticker(self, start_date: dt.datetime, end_date: dt.datetime = None) -> List[str]:
if end_date is None:
end_date = dt.datetime.today()
if start_date is None:
start_date = dt.datetime(1990, 12, 10)
u_data = self.cache.loc[(start_date <= self.cache.DateTime) & (self.cache.DateTime <= end_date), :]
tmp = u_data.groupby('ID').tail(1)
return sorted(tmp.loc[tmp['ไธๅธ็ถๆ'] == 1, 'ID'].tolist())
class DiscreteTickers(TickersBase):
"""็ป็ฑป่ฏๅธไปฃ็ ๅบ็ฑป"""
def __init__(self, asset_type: str, db_interface: DBInterface = None) -> None:
super().__init__(db_interface)
self.cache = self.db_interface.read_table('่ฏๅธไปฃ็ ', text_statement=f'่ฏๅธ็ฑปๅ="{asset_type}"').reset_index()
class StockTickers(DiscreteTickers):
"""่ก็ฅจไปฃ็ """
def __init__(self, db_interface: DBInterface = None) -> None:
super().__init__('A่ก่ก็ฅจ', db_interface)
class ConvertibleBondTickers(DiscreteTickers):
"""ๅฏ่ฝฌๅบไปฃ็ """
def __init__(self, db_interface: DBInterface = None) -> None:
super().__init__('ๅฏ่ฝฌๅบ', db_interface)
class FutureTickers(DiscreteTickers):
"""ๆ่ดงๅ็บฆไปฃ็ """
def __init__(self, db_interface: DBInterface = None) -> None:
super().__init__('ๆ่ดง', db_interface)
class StockIndexFutureIndex(FutureTickers):
"""่กๆๆ่ดงๅ็บฆไปฃ็ """
def __init__(self, db_interface: DBInterface = None) -> None:
super().__init__(db_interface)
mask = self.cache.ID.str.startswith('IH') | self.cache.ID.str.startswith('IF') | self.cache.ID.str.startswith(
'IC')
self.cache = self.cache.loc[mask, :]
class ETFOptionTickers(DiscreteTickers):
"""ๆๆๅ็บฆไปฃ็ """
def __init__(self, db_interface: DBInterface = None) -> None:
super().__init__('ETFๆๆ', db_interface)
class IndexOptionTickers(DiscreteTickers):
"""ๆๆฐๆๆๅ็บฆไปฃ็ """
def __init__(self, db_interface: DBInterface = None) -> None:
super().__init__('ๆๆฐๆๆ', db_interface)
class FutureOptionTickers(DiscreteTickers):
"""ๅๅๆๆๅ็บฆไปฃ็ """
def __init__(self, db_interface: DBInterface = None) -> None:
super().__init__('ๅๅๆๆ', db_interface)
class ExchangeStockETFTickers(DiscreteTickers):
"""ๅบๅ
่ก็ฅจETFๅบ้ไปฃ็ """
def __init__(self, db_interface: DBInterface = None) -> None:
super().__init__('ๅบๅ
ๅบ้', db_interface)
fund_info = FundInfo(db_interface)
all_tickers = fund_info.data.loc[(fund_info.data['ETF'] == True) & (fund_info.data['ๆ่ต็ฑปๅ'] == '่ขซๅจๆๆฐๅๅบ้'), :]
self.cache = self.cache.loc[self.cache.ID.isin(all_tickers.index.tolist()), :]
class BondETFTickers(DiscreteTickers):
"""ๅบๅธETFๅบ้ไปฃ็ """
def __init__(self, db_interface: DBInterface = None) -> None:
super().__init__('ๅบๅ
ๅบ้', db_interface)
fund_info = FundInfo(db_interface)
all_tickers = fund_info.data.loc[(fund_info.data['ETF'] == True) & (fund_info.data['ๆ่ต็ฑปๅ'] == '่ขซๅจๆๆฐๅๅบๅธๅบ้'), :]
self.cache = self.cache.loc[self.cache.ID.isin(all_tickers.index.tolist()), :]
class ConglomerateTickers(TickersBase):
"""่ๅ็ฑป่ฏๅธไปฃ็ ๅบ็ฑป"""
def __init__(self, sql_statement: str, db_interface: DBInterface = None) -> None:
super().__init__(db_interface)
self.cache = self.db_interface.read_table('่ฏๅธไปฃ็ ', text_statement=sql_statement).reset_index()
class OptionTickers(ConglomerateTickers):
"""ๆๆ"""
def __init__(self, db_interface: DBInterface = None) -> None:
super().__init__('่ฏๅธ็ฑปๅ like "%ๆๆ"', db_interface)
class FundTickers(ConglomerateTickers):
"""ๅบ้"""
def __init__(self, db_interface: DBInterface = None) -> None:
super().__init__('่ฏๅธ็ฑปๅ like "%ๅบ้"', db_interface)
class ETFTickers(DiscreteTickers):
"""ETF"""
def __init__(self, db_interface: DBInterface = None) -> None:
super().__init__('ๅบๅ
ๅบ้', db_interface)
fund_info = FundInfo(db_interface)
all_tickers = fund_info.data.loc[fund_info.data['ETF'] == True, :]
self.cache = self.cache.loc[self.cache.ID.isin(all_tickers.index.tolist()), :]
class ExchangeFundTickers(DiscreteTickers):
"""ๅบๅ
ๅบ้"""
def __init__(self, db_interface: DBInterface = None) -> None:
super().__init__('ๅบๅ
ๅบ้', db_interface)
class OTCFundTickers(DiscreteTickers):
"""ๅบๅคๅบ้"""
def __init__(self, db_interface: DBInterface = None) -> None:
super().__init__('ๅบๅคๅบ้', db_interface)
class InvestmentStyleFundTicker(DiscreteTickers):
def __init__(self, investment_type: Sequence[str], otc: bool = False, db_interface: DBInterface = None) -> None:
""" ๆไบๆ่ต้ฃๆ ผ็ๅบ้
:param investment_type: [ๆฎ้่ก็ฅจๅๅบ้, ็ตๆดป้
็ฝฎๅๅบ้, ๅ่กๆททๅๅๅบ้, ๅนณ่กกๆททๅๅๅบ้, ่ขซๅจๆๆฐๅๅบ้, ๅขๅผบๆๆฐๅๅบ้, ่ก็ฅจๅค็ฉบ,
็ญๆ็บฏๅบๅๅบ้, ไธญ้ฟๆ็บฏๅบๅๅบ้, ๆททๅๅบๅธๅไธ็บงๅบ้, ๆททๅๅบๅธๅไบ็บงๅบ้, ๅๅบๆททๅๅๅบ้, ่ขซๅจๆๆฐๅๅบๅธๅบ้, ๅขๅผบๆๆฐๅๅบๅธๅบ้,
ๅๅๅๅบ้,
่ดงๅธๅธๅบๅๅบ้,
ๅฝ้
(QDII)่ก็ฅจๅๅบ้, ๅฝ้
ๅขๅผบๆๆฐๅๅบ้, (QDII)ๆททๅๅๅบ้, ๅฝ้
(QDII)ๅบๅธๅๅบ้, ๅฝ้
(QDII)ๅฆ็ฑปๆ่ตๅบ้,
REITs]
:param otc: ้ๆฉ OTC ๅบ้ไปฃ็ ๆ .SH / .SZ ็ๅบ้ไปฃ็
:param db_interface: DBInterface
"""
type_name = 'ๅบๅคๅบ้' if otc else 'ๅบๅ
ๅบ้'
super().__init__(type_name, db_interface)
self.fund_info = FundInfo(db_interface)
all_tickers = self.fund_info.data.loc[self.fund_info.data['ๆ่ต็ฑปๅ'].isin(investment_type), :]
self.cache = self.cache.loc[self.cache.ID.isin(all_tickers.index.tolist()), :]
def get_next_open_day(self, ids: Union[Sequence[str], str], date: dt.datetime = None):
if date is None:
date = dt.datetime.combine(dt.date.today(), dt.time())
if isinstance(ids, str):
ids = [ids]
list_date = self.get_list_date(ids)
period = self.fund_info.data.loc[ids, 'ๅฎๅผๆถ้ฟ(ๆ)']
df = pd.concat([list_date, period], axis=1)
storage = []
for ticker, row in df.iterrows():
if pd.isna(row['ๅฎๅผๆถ้ฟ(ๆ)']):
storage.append(pd.NaT)
continue
open_day = row.DateTime
while open_day < date:
open_day = open_day + relativedelta(months=row['ๅฎๅผๆถ้ฟ(ๆ)'])
storage.append(open_day)
return pd.Series(storage, index=df.index)
class StockFundTickers(InvestmentStyleFundTicker):
"""
่ก็ฅจๅๅบ้
ไปฅ่ก็ฅจไธบไธป่ฆ(>=50%)ๆ่ตๆ ็็ๅบ้
"""
def __init__(self, otc: bool = False, db_interface: DBInterface = None) -> None:
stock_investment_type = ['ๅ่กๆททๅๅๅบ้', '่ขซๅจๆๆฐๅๅบ้', '็ตๆดป้
็ฝฎๅๅบ้', 'ๅขๅผบๆๆฐๅๅบ้', 'ๆฎ้่ก็ฅจๅๅบ้', '่ก็ฅจๅค็ฉบ', 'ๅนณ่กกๆททๅๅๅบ้']
super().__init__(stock_investment_type, otc, db_interface)
class FundWithStocksTickers(InvestmentStyleFundTicker):
"""ๅฏไปฅๆ่ต่ก็ฅจ็ๅบ้ """
def __init__(self, otc: bool = False, db_interface: DBInterface = None) -> None:
stock_investment_type = ['ๅ่กๆททๅๅๅบ้', '่ขซๅจๆๆฐๅๅบ้', '็ตๆดป้
็ฝฎๅๅบ้', 'ๅขๅผบๆๆฐๅๅบ้', 'ๆฎ้่ก็ฅจๅๅบ้', '่ก็ฅจๅค็ฉบ', 'ๅนณ่กกๆททๅๅๅบ้', 'ๆททๅๅบๅธๅไบ็บงๅบ้',
'ๆททๅๅบๅธๅไธ็บงๅบ้', 'ๅๅบๆททๅๅๅบ้']
super().__init__(stock_investment_type, otc, db_interface)
class EnhancedIndexFund(InvestmentStyleFundTicker):
"""่ก็ฅจๆๆฐๅขๅผบๅบ้"""
def __init__(self, otc: bool = False, db_interface: DBInterface = None) -> None:
stock_investment_type = ['ๅขๅผบๆๆฐๅๅบ้']
super().__init__(stock_investment_type, otc, db_interface)
class IndexFund(InvestmentStyleFundTicker):
"""ๆๆฐๅบ้"""
def __init__(self, otc: bool = False, db_interface: DBInterface = None) -> None:
stock_investment_type = ['่ขซๅจๆๆฐๅๅบ้']
super().__init__(stock_investment_type, otc, db_interface)
class ActiveManagedStockFundTickers(InvestmentStyleFundTicker):
"""ไปฅ่ก็ฅจไธบไธป่ฆ(>=50%)ๆ่ตๆ ็็ไธปๅจ็ฎก็ๅๅบ้"""
def __init__(self, otc: bool = False, db_interface: DBInterface = None) -> None:
stock_investment_type = ['ๅ่กๆททๅๅๅบ้', '็ตๆดป้
็ฝฎๅๅบ้', 'ๅขๅผบๆๆฐๅๅบ้', 'ๆฎ้่ก็ฅจๅๅบ้', '่ก็ฅจๅค็ฉบ', 'ๅนณ่กกๆททๅๅๅบ้']
super().__init__(stock_investment_type, otc, db_interface)
class StockTickerSelector(TickerSelector):
"""่ก็ฅจไปฃ็ ้ๆฉๅจ"""
def __init__(self, policy: StockSelectionPolicy, db_interface: DBInterface = None) -> None:
"""
:param db_interface: BDInterface
:param policy: ้่กๆกไปถ
"""
super().__init__()
self.db_interface = db_interface if db_interface else get_db_interface()
self.calendar = date_utils.SHSZTradingCalendar(self.db_interface)
self.stock_ticker = StockTickers(self.db_interface)
self.policy = policy
@cached_property
def paused_stock_selector(self):
return OnTheRecordFactor('่ก็ฅจๅ็', self.db_interface)
@cached_property
def const_limit_selector(self):
return OnTheRecordFactor('ไธๅญๆถจ่ทๅ', self.db_interface)
@cached_property
def risk_warned_stock_selector(self):
tmp = CompactFactor('่ฏๅธๅ็งฐ', self.db_interface)
ids = tmp.data.index.get_level_values('ID')
tmp.data = tmp.data.loc[ids.str.endswith('.SH') | ids.str.endswith('.SZ')]
tmp.data = tmp.data.map(lambda x: 'PT' in x or 'ST' in x or '้' in x)
return CompactRecordFactor(tmp, '้ฃ้ฉ่ญฆ็คบ่ก')
@cached_property
def negative_book_value_stock_selector(self):
return CompactFactor('่ดๅ่ตไบง่ก็ฅจ', self.db_interface)
@cached_property
def industry_info(self):
if self.policy.industry:
return IndustryFactor(self.policy.industry_provider, self.policy.industry_level, self.db_interface)
@date_utils.dtlize_input_dates
def ticker(self, date: date_utils.DateType, ids: Sequence[str] = None) -> List[str]:
""" select stocks that matched selection policy on `date`(amongst `ids`)
:param date: query date
:param ids: tickers to select from
:return: list of ticker that satisfy the stock selection policy
"""
if ids is None:
ids = set(self.stock_ticker.ticker(date))
if self.policy.ignore_new_stock_period or self.policy.select_new_stock_period:
start_date, end_date = None, None
if self.policy.ignore_new_stock_period:
end_date = self.calendar.offset(date, -self.policy.ignore_new_stock_period)
if self.policy.select_new_stock_period:
start_date = self.calendar.offset(date, -self.policy.select_new_stock_period - 1)
ids = set(self.stock_ticker.new_ticker(start_date=start_date, end_date=end_date)) & ids
if self.industry_info and self.policy.industry:
ids = ids & set(self.industry_info.list_constitutes(date=date, industry=self.policy.industry))
if self.policy.ignore_const_limit:
ids = ids - set(self.const_limit_selector.get_data(date))
if self.policy.ignore_pause:
ids = ids - set(self.paused_stock_selector.get_data(date))
elif self.policy.select_pause:
ids = ids & set(self.paused_stock_selector.get_data(date))
if self.policy.max_pause_days:
pause_days, period_length = self.policy.max_pause_days
start_date = self.calendar.offset(date, -period_length)
end_date = self.calendar.offset(date, -1)
pause_counts = self.paused_stock_selector.get_counts(start_date=start_date, end_date=end_date)
pause_counts = pause_counts.loc[pause_counts > pause_days]
ids = ids - set(pause_counts.index.get_level_values('ID'))
if self.policy.select_st:
ids = ids & set(self.risk_warned_stock_selector.get_data(date))
if self.policy.st_defer_period:
start_date = self.calendar.offset(date, -self.policy.st_defer_period - 1)
ids = ids & set(self.risk_warned_stock_selector.get_data(start_date))
if self.policy.ignore_st:
ids = ids - set(self.risk_warned_stock_selector.get_data(date))
if self.policy.ignore_negative_book_value_stock:
data = self.negative_book_value_stock_selector.get_data(dates=date)
ids = ids - set(data.loc[data == True].index.get_level_values('ID').tolist())
ids = sorted(list(ids))
return ids
def generate_index(self, start_date: date_utils.DateType = None, end_date: date_utils.DateType = None,
dates: Union[date_utils.DateType, Sequence[date_utils.DateType]] = None) -> pd.MultiIndex:
storage = []
if dates is None:
dates = self.calendar.select_dates(start_date, end_date)
for date in dates:
ids = self.ticker(date)
storage.extend(list(product([date], ids)))
return pd.MultiIndex.from_tuples(storage, names=['DateTime', 'ID'])
|
import json
from enum import Enum
from itertools import chain
from pathlib import Path
from typing import Any, Union, Callable
import yaml
from openapi_core.schema.specs.models import Spec
from typing_extensions import Protocol
class SpecFileTypes(tuple, Enum):
JSON = ("json",)
YAML = ("yaml", "yml")
def get_spec_from_file(path: Union[Path, str]) -> Spec:
"""Loads a local file and creates an OpenAPI `Spec` object."""
path = Path(path)
suffix = path.suffix[1:].lower()
if suffix in SpecFileTypes.JSON:
spec_load: Callable = json.load
elif suffix in SpecFileTypes.YAML:
spec_load = yaml.safe_load
else:
raise RuntimeError(
f"Unknown specification file type."
f" Accepted types: {', '.join(chain(*SpecFileTypes))}"
)
with open(path) as spec_file:
return spec_load(spec_file)
class Requestable(Protocol): # pragma: no cover
"""Implements the `request` method compatible with the `requests` library."""
def request(self, method: str, url: str, **kwargs) -> Any:
...
|
import numpy as np
def _bootstrap_replicate_1d(data, reducer):
bs_sample = np.random.choice(data, len(data))
return reducer(bs_sample)
def _draw_bs_reps(data,
reducer,
n_reps):
perm_reps = np.empty(n_reps)
for i in range(n_reps):
perms = _bootstrap_replicate_1d(data, reducer=reducer)
perm_reps[i] = perms
return perm_reps
|
#!/usr/bin/python3
import subprocess
import sys
def cleanUpChroot(chrootPath):
returnVal, listmountpoints = findmountpoints(chrootPath)
if not returnVal:
return False
sortmountpoints(listmountpoints)
print(listmountpoints)
if not unmountmountpoints(listmountpoints):
return False
if not removeAllFilesFromChroot(chrootPath):
return False
return True
def removeAllFilesFromChroot(chrootPath):
cmd = "rm -rf " + chrootPath
process = subprocess.Popen("%s" %cmd, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
retval = process.wait()
if retval != 0:
print("Unable to remove files from chroot " + chrootPath)
return False
return True
def unmountmountpoints(listmountpoints):
if listmountpoints is None:
return True
result = True
for mountpoint in listmountpoints:
cmd = "umount " + mountpoint
process = subprocess.Popen("%s" %cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
retval = process.wait()
if retval != 0:
result = False
print("Unable to unmount " + mountpoint)
break
if not result:
print("Unable to unmount all mounts. Unable to clean up the chroot")
return False
return True
def findmountpoints(chrootPath):
if not chrootPath.endswith("/"):
chrootPath = chrootPath + "/"
cmd = "mount | grep " + chrootPath + " | cut -d' ' -s -f3"
process = subprocess.Popen("%s" %cmd, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
retval = process.wait()
if retval != 0:
print("Unable to find mountpoints in chroot")
return False, None
mountpoints = process.communicate()[0].decode()
mountpoints = mountpoints.replace("\n", " ").strip()
if mountpoints == "":
print("No mount points found")
return True, None
listmountpoints = mountpoints.split(" ")
return True, listmountpoints
def sortmountpoints(listmountpoints):
if listmountpoints is None:
return True
sortedmountpoints = listmountpoints
sorted(sortedmountpoints)
sortedmountpoints.reverse()
def main():
if len(sys.argv) < 2:
print("Usage: ./clean-up-chroot.py <chrootpath>")
sys.exit(1)
if not cleanUpChroot(sys.argv[1]):
sys.exit(1)
sys.exit(0)
if __name__ == "__main__":
main()
|
# encoding: utf-8
from datetime import datetime, timedelta
from django.conf import settings
from django.core.management.base import BaseCommand
from django.utils.timezone import now
from dateutil.tz import tzlocal
from core.utils import slugify
class Setup(object):
def __init__(self):
self._ordering = 0
def get_ordering_number(self):
self._ordering += 10
return self._ordering
def setup(self, test=False):
self.test = test
self.tz = tzlocal()
self.setup_core()
self.setup_labour()
self.setup_programme()
def setup_core(self):
from core.models import Venue, Event
self.venue, unused = Venue.objects.get_or_create(name='Tampereen yliopisto', defaults=dict(
name_inessive='Tampereen yliopistolla', # not really inessive though
))
self.event, unused = Event.objects.get_or_create(slug='finncon2016', defaults=dict(
name='Finncon 2016',
name_genitive='Finncon 2016 -tapahtuman',
name_illative='Finncon 2016 -tapahtumaan',
name_inessive='Finncon 2016 -tapahtumassa',
homepage_url='http://2016.finncon.org',
organization_name='Finncon-yhdistys ry',
organization_url='http://www.finncon.org',
start_time=datetime(2016, 7, 1, 12, 0, tzinfo=self.tz),
end_time=datetime(2016, 7, 3, 18, 0, tzinfo=self.tz),
venue=self.venue,
))
def setup_labour(self):
from core.models import Person
from labour.models import (
AlternativeSignupForm,
InfoLink,
Job,
JobCategory,
LabourEventMeta,
Perk,
PersonnelClass,
Qualification,
WorkPeriod,
)
from ...models import SignupExtra, SpecialDiet
from django.contrib.contenttypes.models import ContentType
labour_admin_group, = LabourEventMeta.get_or_create_groups(self.event, ['admins'])
if self.test:
from core.models import Person
person, unused = Person.get_or_create_dummy()
labour_admin_group.user_set.add(person.user)
content_type = ContentType.objects.get_for_model(SignupExtra)
labour_event_meta_defaults = dict(
signup_extra_content_type=content_type,
work_begins=datetime(2016, 7, 1, 8, 0, tzinfo=self.tz),
work_ends=datetime(2016, 7, 3, 20, 0, tzinfo=self.tz),
admin_group=labour_admin_group,
contact_email='Finncon 2016 -tyรถvoimatiimi <tyovoima@finncon.org>',
)
if self.test:
t = now()
labour_event_meta_defaults.update(
registration_opens=t - timedelta(days=60),
registration_closes=t + timedelta(days=60),
)
else:
# labour_event_meta_defaults.update(
# registration_opens=datetime(2015, 1, 22, 0, 0, tzinfo=self.tz),
# registration_closes=datetime(2015, 3, 14, 0, 0, tzinfo=self.tz),
# )
pass
labour_event_meta, unused = LabourEventMeta.objects.get_or_create(
event=self.event,
defaults=labour_event_meta_defaults,
)
for pc_name, pc_slug, pc_app_label in [
(u'Conitea', 'conitea', 'labour'),
(u'Ylivรคnkรคri', 'ylivankari', 'labour'),
(u'Tyรถvoima', 'tyovoima', 'labour'),
(u'Ohjelmanjรคrjestรคjรค', 'ohjelma', 'programme'),
(u'Guest of Honour', 'goh', 'programme'),
(u'Media', 'media', 'badges'),
(u'Myyjรค', 'myyja', 'badges'),
(u'Vieras', 'vieras', 'badges'),
]:
personnel_class, created = PersonnelClass.objects.get_or_create(
event=self.event,
slug=pc_slug,
defaults=dict(
name=pc_name,
app_label=pc_app_label,
priority=self.get_ordering_number(),
),
)
tyovoima = PersonnelClass.objects.get(event=self.event, slug='tyovoima')
conitea = PersonnelClass.objects.get(event=self.event, slug='conitea')
ylivankari = PersonnelClass.objects.get(event=self.event, slug='ylivankari')
ohjelma = PersonnelClass.objects.get(event=self.event, slug='ohjelma')
for name, description, pcs in [
(u'Conitea', u'Tapahtuman jรคrjestelytoimikunnan eli Conitean jรคsen', [conitea]),
(u'Info', u'Infopisteen henkilรถkunta vastaa kรคvijรถiden kysymyksiin ja ratkaisee heidรคn ongelmiaan tapahtuman paikana. Tehtรคvรค edellyttรครค asiakaspalveluasennetta, tervettรค jรคrkeรค ja ongelmanratkaisukykyรค.', [tyovoima, ylivankari]),
(u'Narikka', u'Narikassa ja isotavara- eli asenarikassa sรคilytetรครคn tapahtuman aikana kรคvijรถiden omaisuutta. Tehtรคvรค ei vaadi erikoisosaamista.', [tyovoima, ylivankari]),
(u'Green room', u'Tyรถvoiman ruokahuolto green roomissa. Edellyttรครค hygieniapassia.', [tyovoima, ylivankari]),
(u'Salivรคnkรคri', u'Salivรคnkรคri vastaa ohjelmasalien toiminnasta. He pitรคvรคt huolen, ettรค ohjelmat alkavat ja loppuvat ajallaan ja ettรค ohjelmanjรคrjestรคjillรค on kaikki mitรค he tarvitsevat salissa.', [tyovoima, ylivankari]),
(u'Yleisvรคnkรคri', u'Sekalaisia tehtรคviรค laidasta laitaan, jotka eivรคt vaadi erikoisosaamista. Voit halutessasi kirjata lisรคtietoihin, mitรค osaat ja haluaisit tehdรค.', [tyovoima, ylivankari]),
(u'Jรคrjestyksenvalvoja', u'Kรคvijรถiden turvallisuuden valvominen conipaikalla. Edellyttรครค voimassa olevaa JV-korttia ja asiakaspalveluasennetta. HUOM! Et voi valita tรคtรค tehtรคvรครค hakemukseesi, ellet ole tรคyttรคnyt tietoihisi JV-kortin numeroa (oikealta ylhรครคltรค oma nimesi > Pรคtevyydet).', [tyovoima, ylivankari]),
(u'Iltabileiden lipunmyyjรค', u'Iltabileiden pรครคsylippujen myyntiรค sekรค tarkastamista. Myyjiltรค edellytetรครคn tรคysi-ikรคisyyttรค, asiakaspalveluhenkeรค ja huolellisuutta rahankรคsittelyssรค. Vuoroja myรถs perjantaina.', [tyovoima, ylivankari]),
(u'Iltabileiden jรคrjestyksenvalvoja', u'Kรคvijรถiden turvallisuuden valvominen iltabileissรค. Edellyttรครค voimassa olevaa JV-korttia ja asiakaspalveluasennetta. HUOM! Et voi valita tรคtรค tehtรคvรครค hakemukseesi, ellet ole tรคyttรคnyt tietoihisi JV-kortin numeroa (oikealta ylhรครคltรค oma nimesi > Pรคtevyydet).', [tyovoima, ylivankari]),
(u'Ensiapu', 'Toimit osana tapahtuman omaa ensiapuryhmรครค. Vuoroja pรคivisin ja รถisin tapahtuman aukioloaikoina. Vaaditaan vรคhintรครคn voimassa oleva EA1 -kortti ja osalta myรถs voimassa oleva EA2 -kortti. Kerro Tyรถkokemus -kohdassa osaamisestasi, esim. oletko toiminut EA-tehtรคvissรค tapahtumissa tai oletko sairaanhoitaja/lรคhihoitaja koulutuksestaltasi.', [tyovoima, ylivankari]),
(u'Erikoistehtรคvรค', u'Mikรคli olet sopinut erikseen tyรถtehtรคvistรค ja/tai sinut on ohjeistettu tรคyttรคmรครคn lomake, valitse tรคmรค ja kerro tarkemmin Vapaa alue -kentรคssรค mihin tehtรคvรครคn ja kenen toimesta sinut on valittu.', [tyovoima, ylivankari]),
(u'Ohjelmanpitรคjรค', u'Luennon tai muun vaativan ohjelmanumeron pitรคjรค', [ohjelma]),
]:
job_category, created = JobCategory.objects.get_or_create(
event=self.event,
name=name,
defaults=dict(
description=description,
slug=slugify(name),
)
)
if created:
job_category.personnel_classes = pcs
job_category.save()
labour_event_meta.create_groups()
for slug in [u'conitea']:
JobCategory.objects.filter(event=self.event, slug=slug).update(public=False)
for jc_name, qualification_name in [
(u'Jรคrjestyksenvalvoja', u'JV-kortti'),
(u'Iltabileiden jรคrjestyksenvalvoja', u'JV-kortti'),
]:
jc = JobCategory.objects.get(event=self.event, name=jc_name)
qual = Qualification.objects.get(name=qualification_name)
jc.required_qualifications = [qual]
jc.save()
period_length = timedelta(hours=8)
for period_description, period_start in [
(u'TODO', None),
# (u"Perjantain kasaus (pe klo 14-18)", None),
# (u"Lauantain aamuvuoro (la klo 08-11)", None),
# (u"Lauantain pรคivรคvuoro (la klo 11-15)", None),
# (u"Lauantain iltapรคivรคvuoro (la klo 15-18)", None),
# (u"Sunnuntain aamuvuoro (su klo 08-11)", None),
# (u"Sunnuntain pรคivรคvuoro (su klo 11-15)", None),
# (u"Sunnuntain purkuvuoro (su klo 15-17)", None),
]:
WorkPeriod.objects.get_or_create(
event=self.event,
description=period_description,
defaults=dict(
start_time=period_start,
end_time=(period_start + period_length) if period_start else None,
)
)
for diet_name in [
u'Gluteeniton',
u'Laktoositon',
u'Maidoton',
u'Vegaaninen',
u'Lakto-ovo-vegetaristinen',
]:
SpecialDiet.objects.get_or_create(name=diet_name)
AlternativeSignupForm.objects.get_or_create(
event=self.event,
slug=u'conitea',
defaults=dict(
title=u'Conitean ilmoittautumislomake',
signup_form_class_path='events.finncon2016.forms:OrganizerSignupForm',
signup_extra_form_class_path='events.finncon2016.forms:OrganizerSignupExtraForm',
active_from=datetime(2015, 8, 18, 0, 0, 0, tzinfo=self.tz),
active_until=None,
),
)
for wiki_space, link_title, link_group in [
('FINNCONWORK', 'Tyรถvoimawiki', 'accepted'),
]:
InfoLink.objects.get_or_create(
event=self.event,
title=link_title,
defaults=dict(
url='https://confluence.tracon.fi/display/{wiki_space}'.format(wiki_space=wiki_space),
group=labour_event_meta.get_group(link_group),
)
)
def setup_programme(self):
from labour.models import PersonnelClass
from programme.models import (
Category,
Programme,
ProgrammeEventMeta,
Role,
Room,
SpecialStartTime,
TimeBlock,
View,
)
programme_admin_group, = ProgrammeEventMeta.get_or_create_groups(self.event, ['admins'])
programme_event_meta, unused = ProgrammeEventMeta.objects.get_or_create(event=self.event, defaults=dict(
public=False,
admin_group=programme_admin_group,
contact_email='Finncon 2016 -ohjelmatiimi <ohjelma@2016.finncon.org>',
))
personnel_class = PersonnelClass.objects.get(event=self.event, slug='ohjelma')
role, unused = Role.objects.get_or_create(
personnel_class=personnel_class,
title=u'Ohjelmanjรคrjestรคjรค',
defaults=dict(
is_default=True,
require_contact_info=True,
)
)
have_categories = Category.objects.filter(event=self.event).exists()
if not have_categories:
for title, style in [
(u'Puheohjelma', u'anime'),
(u'Akateeminen ohjelma', u'cosplay'),
(u'Miitti', u'miitti'),
(u'Tyรถpaja', u'rope'),
(u'Muu ohjelma', u'muu'),
]:
Category.objects.get_or_create(
event=self.event,
style=style,
defaults=dict(
title=title,
)
)
for start_time, end_time in [
(
datetime(2016, 7, 1, 12, 0, 0, tzinfo=self.tz),
datetime(2016, 7, 1, 18, 0, 0, tzinfo=self.tz),
),
(
datetime(2016, 7, 2, 10, 0, 0, tzinfo=self.tz),
datetime(2016, 7, 2, 18, 0, 0, tzinfo=self.tz),
),
(
datetime(2016, 7, 3, 10, 0, 0, tzinfo=self.tz),
datetime(2016, 7, 3, 18, 0, 0, tzinfo=self.tz),
),
]:
TimeBlock.objects.get_or_create(
event=self.event,
start_time=start_time,
defaults=dict(
end_time=end_time
)
)
# SpecialStartTime.objects.get_or_create(
# event=self.event,
# start_time=datetime(2016, 9, 5, 10, 30, 0, tzinfo=self.tz),
# )
for view_name, room_names in [
(u'Pรครคohjelmatilat', [
u'Juhlasali',
u'Auditorio A1',
u'Luentosali A3',
u'Luentosali A4',
]),
(u'Toissijaiset ohjelmatilat', [
u'Auditorio D10a',
u'Auditorio D10b',
u'Auditorio D11',
u'Luentosali A05',
]),
]:
rooms = [
Room.objects.get_or_create(
venue=self.venue,
name=room_name,
defaults=dict(
order=self.get_ordering_number(),
)
)[0]
for room_name in room_names
]
view, created = View.objects.get_or_create(event=self.event, name=view_name)
if created:
view.rooms = rooms
view.save()
class Command(BaseCommand):
args = ''
help = 'Setup finncon2016 specific stuff'
def handle(self, *args, **opts):
Setup().setup(test=settings.DEBUG)
|
from __future__ import annotations
import asyncio
from datetime import datetime
from typing import AsyncIterator, Awaitable, Callable, Generic, List, Optional, TypeVar
T = TypeVar('T')
U = TypeVar('U')
V = TypeVar('V')
class Middleware(Generic[T, U]): # pylint: disable=unsubscriptable-object
async def __call__(self, inp: AsyncIterator[T]) -> AsyncIterator[U]: # pragma: no cover
# ensure function to be generator
empty_list: List[U] = []
for x in empty_list:
yield x
raise NotImplementedError
def __or__(
self,
other: Middleware[U, V]
) -> _Composition[T, U, V]:
return _Composition(first=self, second=other)
@staticmethod
def from_callable(
func: Callable[[AsyncIterator[T]], AsyncIterator[U]],
) -> _FromCallable[T, U]:
return _FromCallable(func)
class _Composition(Middleware[T, V], Generic[T, U, V]): # pylint: disable=unsubscriptable-object
first: Middleware[T, U]
second: Middleware[U, V]
def __init__(
self,
first: Middleware[T, U],
second: Middleware[U, V],
) -> None:
self.first = first
self.second = second
async def __call__(self, inp: AsyncIterator[T]) -> AsyncIterator[V]:
async for item in self.second(self.first(inp)):
yield item
class _FromCallable(Middleware[T, U]):
def __init__(
self,
func: Callable[[AsyncIterator[T]], AsyncIterator[U]],
) -> None:
self._func = func
async def __call__(self, inp: AsyncIterator[T]) -> AsyncIterator[U]:
async for item in self._func(inp):
yield item
class ToBulks(Middleware[T, List[T]]):
max_bulk_size: Optional[int]
bulk_timeout: Optional[float]
def __init__(self, max_bulk_size: Optional[int] = None, bulk_timeout: Optional[float] = None) -> None:
assert (
max_bulk_size is not None
or bulk_timeout is not None
), '`max_bulk_size` or `bulk_timeout` must be specified'
self.max_bulk_size = max_bulk_size
self.bulk_timeout = bulk_timeout
async def __call__(self, inp: AsyncIterator[T]) -> AsyncIterator[List[T]]:
items: List[T] = []
bulk_start: Optional[datetime] = None
nxt = asyncio.ensure_future(inp.__anext__())
try:
while True:
timeout: Optional[float] = None
if bulk_start is not None and self.bulk_timeout is not None:
timeout = self.bulk_timeout - (datetime.now() - bulk_start).total_seconds()
try:
item = await asyncio.wait_for(asyncio.shield(nxt), timeout)
except asyncio.TimeoutError:
yield items
items = []
bulk_start = None
continue
except StopAsyncIteration:
break
bulk_start = bulk_start or datetime.now()
items.append(item)
if self.max_bulk_size is not None and len(items) == self.max_bulk_size:
yield items
items = []
bulk_start = None
nxt = asyncio.ensure_future(inp.__anext__())
finally:
nxt.cancel()
if items:
yield items
class Filter(Middleware[T, T]):
def __init__(self, predicate: Callable[[T], Awaitable[bool]]) -> None:
self._predicate = predicate
async def __call__(self, inp: AsyncIterator[T]) -> AsyncIterator[T]:
async for item in inp:
if await self._predicate(item):
yield item
class Map(Middleware[T, U]):
def __init__(self, func: Callable[[T], Awaitable[U]]) -> None:
self._func = func
async def __call__(self, inp: AsyncIterator[T]) -> AsyncIterator[U]:
async for item in inp:
yield await self._func(item)
class FilterNones(Middleware[Optional[T], T]):
async def __call__(self, inp: AsyncIterator[Optional[T]]) -> AsyncIterator[T]:
async for item in inp:
if item:
yield item
class SkipAll(Middleware[T, None]):
async def __call__(self, inp: AsyncIterator[T]) -> AsyncIterator[None]:
async for _ in inp:
pass
# ensure function to be generator
empty_list: List[None] = []
for x in empty_list:
yield x # pragma: no cover
|
from sp_api.api import Reports
from sp_api.base import Marketplaces, Schedules, SellingApiBadRequestException, SellingApiServerException
def test_create_report():
res = Reports().create_report(
reportType='GET_MERCHANT_LISTINGS_ALL_DATA',
dataStartTime='2019-12-10T20:11:24.000Z',
marketplaceIds=[
"A1PA6795UKMFR9",
"ATVPDKIKX0DER"
])
assert res.payload.get('reportId') == 'ID323'
def test_create_report_expect_400():
try:
res = Reports().create_report(
reportType="BAD_FEE_DISCOUNTS_REPORT",
dataStartTime="2019-12-10T20:11:24.000Z",
marketplaceIds=[
"A1PA6795UKMFR9",
"ATVPDKIKX0DER"
])
except SellingApiBadRequestException as br:
assert br.code == 400
def test_create_report_expect_500():
try:
res = Reports().create_report(
reportType="BAD_FEE_DISCasdafsdsfsdfsdOUNTS_REPORT",
dataStartTime="2019-12-10T20:11:24.000Z",
marketplaceIds=[
"A1PA6asfd795UKMFR9",
"ATVPDKIKX0DER"
])
except SellingApiServerException as br:
assert br.code == 500
def test_get_report():
res = Reports().get_report('ID323')
assert res.payload.get('reportId') == 'ReportId1'
assert res.payload.get('reportType') == 'FEE_DISCOUNTS_REPORT'
def test_get_report_document_n_decrypt():
res = Reports().get_report_document('0356cf79-b8b0-4226-b4b9-0ee058ea5760', decrypt=False)
assert res.errors is None
assert 'document' not in res.payload
def test_create_report_schedule():
res = Reports().create_report_schedule(reportType='FEE_DISCOUNTS_REPORT',
period=Schedules.MINUTES_5.value,
nextReportCreationTime="2019-12-10T20:11:24.000Z",
marketplaceIds=["A1PA6795UKMFR9", "ATVPDKIKX0DER"])
assert res.errors is None
assert 'reportScheduleId' in res.payload
def test_delete_schedule_by_id():
res = Reports().delete_report_schedule('ID')
assert res.errors is None
def test_get_schedule_by_id():
res = Reports().get_report_schedule('ID323')
assert res.errors is None
assert 'period' in res.payload
assert res.payload.get('reportType') == 'FEE_DISCOUNTS_REPORT'
|
# -*- coding: utf-8 -*-
import json
import warnings
from typing import List
from requests import Response
from TM1py.Exceptions.Exceptions import TM1pyException
from TM1py.Objects.Dimension import Dimension
from TM1py.Services.HierarchyService import HierarchyService
from TM1py.Services.ObjectService import ObjectService
from TM1py.Services.ProcessService import ProcessService
from TM1py.Services.RestService import RestService
from TM1py.Services.SubsetService import SubsetService
from TM1py.Utils.Utils import case_and_space_insensitive_equals, format_url, CaseAndSpaceInsensitiveSet
class DimensionService(ObjectService):
""" Service to handle Object Updates for TM1 Dimensions
"""
def __init__(self, rest: RestService):
super().__init__(rest)
self.hierarchies = HierarchyService(rest)
self.subsets = SubsetService(rest)
def create(self, dimension: Dimension, **kwargs) -> Response:
""" Create a dimension
:param dimension: instance of TM1py.Dimension
:return: response
"""
# If Dimension exists. throw Exception
if self.exists(dimension.name):
raise RuntimeError("Dimension '{}' already exists".format(dimension.name))
# If not all subsequent calls successful -> undo everything that has been done in this function
try:
# Create Dimension, Hierarchies, Elements, Edges.
url = "/api/v1/Dimensions"
response = self._rest.POST(url, dimension.body, **kwargs)
# Create ElementAttributes
for hierarchy in dimension:
if not case_and_space_insensitive_equals(hierarchy.name, "Leaves"):
self.hierarchies.update_element_attributes(hierarchy, **kwargs)
except TM1pyException as e:
# undo everything if problem in step 1 or 2
if self.exists(dimension.name, **kwargs):
self.delete(dimension.name)
raise e
return response
def get(self, dimension_name: str, **kwargs) -> Dimension:
""" Get a Dimension
:param dimension_name:
:return:
"""
url = format_url("/api/v1/Dimensions('{}')?$expand=Hierarchies($expand=*)", dimension_name)
response = self._rest.GET(url, **kwargs)
return Dimension.from_json(response.text)
def update(self, dimension: Dimension, **kwargs):
""" Update an existing dimension
:param dimension: instance of TM1py.Dimension
:return: None
"""
# delete hierarchies that have been removed from the dimension object
hierarchies_to_be_removed = CaseAndSpaceInsensitiveSet(
*self.hierarchies.get_all_names(dimension.name, **kwargs))
for hierarchy in dimension.hierarchy_names:
hierarchies_to_be_removed.discard(hierarchy)
# update all Hierarchies except for the implicitly maintained 'Leaves' Hierarchy
for hierarchy in dimension:
if not case_and_space_insensitive_equals(hierarchy.name, "Leaves"):
if self.hierarchies.exists(hierarchy.dimension_name, hierarchy.name, **kwargs):
self.hierarchies.update(hierarchy, **kwargs)
else:
self.hierarchies.create(hierarchy, **kwargs)
# Edge case: elements in leaves hierarchy that do not exist in other hierarchies
if "Leaves" in dimension:
existing_leaves = CaseAndSpaceInsensitiveSet(
self.hierarchies.elements.get_leaf_element_names(dimension.name, "Leaves"))
leaves_to_create = list()
for leaf in dimension.get_hierarchy("Leaves"):
if leaf.name not in existing_leaves:
leaves_to_create.append(leaf)
if leaves_to_create:
self.hierarchies.elements.add_elements(
dimension_name=dimension.name,
hierarchy_name="Leaves",
elements=leaves_to_create)
for hierarchy_name in hierarchies_to_be_removed:
if not case_and_space_insensitive_equals(hierarchy_name, "Leaves"):
self.hierarchies.delete(dimension_name=dimension.name, hierarchy_name=hierarchy_name, **kwargs)
def update_or_create(self, dimension: Dimension, **kwargs):
""" update if exists else create
:param dimension:
:return:
"""
if self.exists(dimension_name=dimension.name, **kwargs):
self.update(dimension=dimension, **kwargs)
else:
self.create(dimension=dimension, **kwargs)
def delete(self, dimension_name: str, **kwargs) -> Response:
""" Delete a dimension
:param dimension_name: Name of the dimension
:return:
"""
url = format_url("/api/v1/Dimensions('{}')", dimension_name)
return self._rest.DELETE(url, **kwargs)
def exists(self, dimension_name: str, **kwargs) -> bool:
""" Check if dimension exists
:return:
"""
url = format_url("/api/v1/Dimensions('{}')", dimension_name)
return self._exists(url, **kwargs)
def get_all_names(self, skip_control_dims: bool = False, **kwargs) -> List[str]:
"""Ask TM1 Server for list of all dimension names
:skip_control_dims: bool, True to skip control dims
:Returns:
List of Strings
"""
url = format_url(
"/api/v1/{}?$select=Name",
'ModelDimensions()' if skip_control_dims else 'Dimensions'
)
response = self._rest.GET(url, **kwargs)
dimension_names = list(entry['Name'] for entry in response.json()['value'])
return dimension_names
def get_number_of_dimensions(self, skip_control_dims: bool = False, **kwargs) -> int:
"""Ask TM1 Server for number of dimensions
:skip_control_dims: bool, True to exclude control dims from count
:return: Number of dimensions
"""
if skip_control_dims:
response = self._rest.GET("/api/v1/ModelDimensions()?$select=Name&$top=0&$count", **kwargs)
return response.json()['@odata.count']
return int(self._rest.GET("/api/v1/Dimensions/$count", **kwargs).text)
def execute_mdx(self, dimension_name: str, mdx: str, **kwargs) -> List:
""" Execute MDX against Dimension.
Requires }ElementAttributes_ Cube of the dimension to exist !
:param dimension_name: Name of the Dimension
:param mdx: valid Dimension-MDX Statement
:return: List of Element names
"""
warnings.warn("execute_mdx() will be deprecated; use ElementService execute_set_mdx.", DeprecationWarning,
stacklevel=2)
mdx_skeleton = "SELECT " \
"{} ON ROWS, " \
"{{ [}}ElementAttributes_{}].DefaultMember }} ON COLUMNS " \
"FROM [}}ElementAttributes_{}]"
mdx_full = mdx_skeleton.format(mdx, dimension_name, dimension_name)
request = '/api/v1/ExecuteMDX?$expand=Axes(' \
'$filter=Ordinal eq 1;' \
'$expand=Tuples($expand=Members($select=Ordinal;$expand=Element($select=Name))))'
payload = {"MDX": mdx_full}
response = self._rest.POST(request, json.dumps(payload, ensure_ascii=False), **kwargs)
raw_dict = response.json()
return [row_tuple['Members'][0]['Element']['Name'] for row_tuple in raw_dict['Axes'][0]['Tuples']]
def create_element_attributes_through_ti(self, dimension: Dimension, **kwargs):
"""
:param dimension. Instance of TM1py.Objects.Dimension class
:return:
"""
process_service = ProcessService(self._rest)
for h in dimension:
statements = ["AttrInsert('{}', '', '{}', '{}');".format(dimension.name, ea.name, ea.attribute_type[0])
for ea
in h.element_attributes]
process_service.execute_ti_code(lines_prolog=statements, **kwargs)
|
from rest_framework import routers
from shipping.api.v1.viewsets import ContinentViewSet, CountryViewSet, ShipperViewSet, PackageViewSet, \
OnlineShipmentViewSet, PackageShipmentViewSet
router = routers.DefaultRouter()
router.register(r'continents', ContinentViewSet)
router.register(r'countries', CountryViewSet)
router.register(r'shippers', ShipperViewSet)
router.register(r'packages', PackageViewSet)
router.register(r'shipments/packages', PackageShipmentViewSet)
router.register(r'shipments/online', OnlineShipmentViewSet)
|
import json
import frappe
from frappe import _
from frappe.utils import now_datetime
from erpnext.selling.doctype.customer import customer
from erpnext.regional.india.utils import PAN_NUMBER_FORMAT
custom_make_address = customer.make_address
gst_category_map = {
"Regular": "Registered Regular",
"Input Service Distributor (ISD)": "Registered Regular",
"Composition": "Registered Composition",
"Tax Deductor": "Tax Deductor",
"SEZ Unit": "SEZ",
"SEZ Developer": "SEZ",
"United Nation Body": "UIN Holders",
"Consulate or Embassy of Foreign Country": "UIN Holders",
"URP": "Unregistered",
}
# ####### SAMPLE DATA ########
# "Composition" - 36AASFP8573D2ZN
# "Input Service Distributor (ISD)" - 29AABCF8078M2ZW - Flipkart
# "Tax Deductor" - 06DELI09652G1DA, 09ALDN00287A1DD, 27AAFT56212B1DO, 19AAACI1681G1DV
# "SEZ Developer" - 27AAJCS5738D1Z6 -
# "United Nation Body" - 0717UNO00157UNO, 0717UNO00211UN2, 2117UNO00002UNF
# "Consulate or Embassy of Foreign Country" - 0717UNO00154UNU
# "Tax Collector (Electronic Commerce Operator)" - 29AABCF8078M1C8, 27AAECG3736E1C2 - Cannot be a part of GSTR1
# "Non Resident Online Services Provider" - 9917SGP29001OST - Google - Cannot be a part of GSTR1
# "Non Resident Taxable Person" -
# "Government Department ID" -
def make_address(args, is_primary_address=1):
reqd_fields = []
for field in ["city", "country"]:
if not args.get(field):
reqd_fields.append("<li>" + field.title() + "</li>")
if reqd_fields:
msg = _("Following fields are mandatory to create address:")
frappe.throw(
"{0} <br><br> <ul>{1}</ul>".format(msg, "\n".join(reqd_fields)),
title=_("Missing Values Required"),
)
gst_category = get_gst_category(args)
address = frappe.get_doc(
{
"doctype": "Address",
"address_title": args.get("name"),
"address_line1": args.get("address_line1"),
"address_line2": args.get("address_line2"),
"city": args.get("city"),
"state": args.get("state"),
"pincode": args.get("pincode_custom"),
"country": args.get("country"),
"gstin": args.get("gstin_custom"),
"gst_category": gst_category,
"links": [
{"link_doctype": args.get("doctype"), "link_name": args.get("name")}
],
}
).insert()
return address
customer.make_address = make_address
def validate_party(self, method=None):
if self.get("gstin_info"):
self.gstin_info = json.loads(self.get("gstin_info"))
self.gstin_info_updated_on = now_datetime()
self.ctb = self.gstin_info.get("ctb")
self.sts = self.gstin_info.get("sts")
self.default_gstin = self.gstin_info.get("gstin")
self.trade_name = self.gstin_info.get("tradeNam")
self.gstin_info = json.dumps(self.get("gstin_info"))
if self.get("gstin_custom") and (
self.get("gstin_custom") != "URP" or self.get("gstin_custom") != "NA"
):
pan = self.get("gstin_custom")[2:12]
if PAN_NUMBER_FORMAT.match(pan):
self.pan = pan
def validate_address(self, method=None):
if self.get("gstin_custom"):
self.gstin = self.gstin_custom
gst_category = get_gst_category(self)
if gst_category:
self.gst_category = gst_category
del self.gstin_info
if self.get("pincode_custom"):
self.pincode = self.pincode_custom
if self.get("customer"):
self.address_title = self.get("customer")
self.append(
"links", {"link_doctype": "Customer", "link_name": self.get("customer")}
)
if self.get("supplier"):
self.address_title = self.get("supplier")
self.append(
"links", {"link_doctype": "Supplier", "link_name": self.get("supplier")}
)
def get_gst_category(self):
if self.get("gstin_info"):
self.gstin_info = json.loads(self.get("gstin_info"))
if self.get("gstin_custom") == "URP" or self.get("gstin_custom") == "NA":
gst_category = gst_category_map["URP"]
else:
gst_category = gst_category_map[self.get("gstin_info").get("dty")]
self.gstin_info = json.dumps(self.get("gstin_info"))
return gst_category
|
# misc python stuff
import os
import re
import configparser
from slackclient import SlackClient # slack messaging when model is finished training
import pathlib # detecting if file exists in a path
import pickle # saving and loading keras tokenizer
# graphics libraries
import matplotlib.pyplot as plt
# numerical libraries
import numpy as np
import pandas as pd
# keras model building
import keras
from keras.models import Model, Sequential
from keras.layers import (Input, Dense, Embedding, SpatialDropout1D, concatenate, RepeatVector, Flatten, Conv1D,
GRU, Bidirectional, GlobalAveragePooling1D, GlobalMaxPooling1D, CuDNNGRU, CuDNNLSTM, MaxPooling1D, Layer,
Dropout, K, Activation, BatchNormalization, PReLU, add, Reshape)
from keras.preprocessing import text, sequence
from keras import optimizers
from keras.callbacks import Callback, EarlyStopping, ModelCheckpoint
# sklearn feature/model building
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, TfidfTransformer
from sklearn.base import ClassifierMixin, BaseEstimator
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline
from sklearn.model_selection import cross_val_score, GridSearchCV, RandomizedSearchCV, StratifiedKFold, KFold
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score, classification_report
# embedding similar to t-SNE but with custom metrics e.g. cosine distance
import umap
# NLP library
import nltk
from nltk.stem import WordNetLemmatizer
from nltk.corpus import wordnet, stopwords
|
import uuid
from datetime import datetime, timedelta
from django.db import models
from django.forms import ValidationError
from utils.icao import ICAO
# from account.models import User
# Create your models here.
def generate_uuid():
"""
generate a unique id using uuid whenever a new aircraft is created
"""
return uuid.uuid4()
def generate_icao(airport):
"""
generate a unique icao code whenever a new airport is created,
that represent the airport, this code is going to be unique for
each airport.
"""
return ICAO[airport]
def validate_future_flight(date):
"""
validate if the datetime is in the future, meaning at least one day
from the date it was created.
"""
now = datetime.now().replace(tzinfo=None).strftime("%Y-%m-%d %H:%M:%S")
# print(date.replace(tzinfo=None)+ timedelta(minutes=33))
# print(datetime.fromisoformat(now))
if date.replace(tzinfo=None) > datetime.fromisoformat(now) + timedelta(days=1):
return True
else:
raise ValidationError("you can only create a flight form 24 hours into the future")
# print(generate_icao('ABE'))
class CustomDateTimeField(models.DateTimeField):
def value_to_string(self, obj):
val = self.value_from_object(obj)
if val:
val.replace(microsecond=0)
return val.isoformat()
return ""
# Create your models here.
class Aircraft(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=100, blank=True, null=True)
serial_number = models.CharField(max_length=100, unique=True, default=generate_uuid)
manufacturer = models.CharField(blank=False, null=False, max_length=200)
model = models.TextField(blank=False, null=False, max_length=400, default="")
def __str__(self):
return f"{self.manufacturer} {self.model}"
class Airport(models.Model):
id = models.AutoField(primary_key=True)
location = models.TextField(blank=False, null=False, max_length=400, default="")
name = models.CharField(blank=False, null=False, unique=True, max_length=200)
code = models.CharField(max_length=100, blank=True, null=True)
def __str__(self):
return f"{self.name}"
def save(self, *args, **kwargs):
"""
Override the save method to generate a unique icao code
"""
self.code = generate_icao(self.name)
super(Airport, self).save(*args, **kwargs)
class Flight(models.Model):
id = models.AutoField(primary_key=True)
description = models.TextField(blank=False, null=False, max_length=400, default="")
aircraft = models.ForeignKey(to=Aircraft, blank=True, null=True, on_delete=models.CASCADE)
departure_airport = models.ForeignKey(to=Airport, blank=True, null=True, related_name="depart", on_delete=models.CASCADE)
arrival_airport = models.ForeignKey(to=Airport, blank=True, null=True, related_name="arrive", on_delete=models.CASCADE)
# user = models.ForeignKey(to=Employee, on_delete=models.CASCADE)
departure_time = models.DateTimeField(validators=[validate_future_flight])
arrival_time = models.DateTimeField(blank=False, null=False)
def clean(self) -> None:
if self.arrival_time is None:
raise ValidationError("arrival time is required")
departure = self.departure_time.replace(tzinfo=None)
arrive = self.arrival_time.replace(tzinfo=None)
if arrive - departure < timedelta(minutes=30):
raise ValidationError("arrival time must be at least 30 minutes after departure time")
return super().clean()
def save(self, *args, **kwargs):
"""
class the overridden clean method to validate the flight arrival time
"""
self.full_clean()
super(Flight, self).save(*args, **kwargs)
|
#!/usr/bin/env python
#######################################################################
#######################################################################
## Skeleton copied on March 25, 2020 from nf-core/atacseq
#######################################################################
#######################################################################
from __future__ import print_function
import os
import sys
import requests
import argparse
############################################
############################################
## FUNCTIONS
############################################
############################################
def file_base_name(file_name):
if '.' in file_name:
separator_index = file_name.index('.')
base_name = file_name[:separator_index]
return base_name
else:
return file_name
def path_base_name(path):
file_name = os.path.basename(path)
return file_base_name(file_name)
############################################
############################################
## PARSE ARGUMENTS
############################################
############################################
Description = 'Reformat nfcore/slamseq design file and check its contents.'
Epilog = "Example usage: python check_design.py <DESIGN_FILE_IN> <DESIGN_FILE_OUT>"
argParser = argparse.ArgumentParser(description=Description, epilog=Epilog)
## REQUIRED PARAMETERS
argParser.add_argument('DESIGN_FILE_IN', help="Input design file.")
argParser.add_argument('DESIGN_FILE_OUT', help="Output design file.")
args = argParser.parse_args()
############################################
############################################
## MAIN FUNCTION
############################################
############################################
ERROR_STR = 'ERROR: Please check design file'
HEADER = ['group', 'condition', 'control', 'reads']
EXTHEADER = ['group', 'condition', 'control', 'reads','name','type','time']
fout = open(args.DESIGN_FILE_OUT,'w')
with open(args.DESIGN_FILE_IN, 'r') as f:
header = next(f)
header = header.rstrip().split("\t")
if header != HEADER and header != EXTHEADER:
print("{} header: {} != {}".format(ERROR_STR,','.join(header),','.join(HEADER)))
sys.exit(1)
fout.write("\t".join(EXTHEADER) + "\n")
regularDesign = False
if len(header) == 7:
regularDesign = True
for line in f:
fields = line.rstrip().split("\t")
group = fields[0]
condition = fields[1]
control = fields[2]
reads = fields[3]
if regularDesign and len(fields) == 7:
name = fields[4]
type = fields[5]
time = fields[6]
else :
name = ""
type = ""
time = ""
if name == "":
name = path_base_name(reads)
if type == "":
type = "pulse"
if time == "":
time = "0"
if type != "pulse" and type != "chase":
print("{} type needs to be either 'pulse' or 'chase'!\nLine: '{}'".format(ERROR_STR,line.strip()))
sys.exit(1)
if control != "0" and control != "1":
print("{} control needs to be either '0' or '1'!\nLine: '{}'".format(ERROR_STR,line.strip()))
sys.exit(1)
## CHECK REPLICATE COLUMN IS INTEGER
if not time.isdigit():
print("{}: Time needs to be an integer!\nLine: '{}'".format(ERROR_STR,line.strip()))
sys.exit(1)
if reads[-9:] != '.fastq.gz' and reads[-6:] != '.fq.gz':
print("{}: Reads FastQ file has incorrect extension (has to be '.fastq.gz' or 'fq.gz') - {}\nLine: '{}'".format(ERROR_STR,fastq,line.strip()))
sys.exit(1)
fout.write("\t".join([group, condition, control, reads, name, type, time]) + "\n")
fout.close()
|
#!/usr/bin/env python3
#python
import time
from os import mkdir, listdir
from os.path import isdir, isfile
from itertools import chain
from pickle import load
from random import choice
#external
import numpy as np
from scipy.optimize import least_squares, minimize, approx_fprime
from scipy.signal import hilbert
from matplotlib import pyplot as plt
import h5py
#mine
from LoLIM.prettytable import PrettyTable
from LoLIM.utilities import logger, processed_data_dir, v_air, SId_to_Sname, Sname_to_SId_dict, RTD
#from LoLIM.IO.binary_IO import read_long, write_long, write_double_array, write_string, write_double
from LoLIM.antenna_response import LBA_ant_calibrator
from LoLIM.porta_code import code_logger, pyplot_emulator
from LoLIM.signal_processing import parabolic_fit, remove_saturation, data_cut_at_index
from LoLIM.IO.raw_tbb_IO import filePaths_by_stationName, MultiFile_Dal1
from LoLIM.findRFI import window_and_filter
from LoLIM.stationTimings.autoCorrelator_tools import stationDelay_fitter
#from RunningStat import RunningStat
inv_v_air = 1.0/v_air
#### some random utilities
def none_max(lst):
"""given a list of numbers, return maximum, ignoreing None"""
ret = -np.inf
for a in lst:
if (a is not None) and (a>ret):
ret=a
return ret
def get_radius_ze_az( XYZ ):
radius = np.linalg.norm( XYZ )
ze = np.arccos( XYZ[2]/radius )
az = np.arctan2( XYZ[1], XYZ[0] )
return radius, ze, az
#### main code
class stochastic_fitter_dt:
def __init__(self, source_object_list, initial_guess=None, quick_kill=None):
print("running stochastic fitter")
self.quick_kill = quick_kill
self.source_object_list = source_object_list
## assume globals:
# max_itters_per_loop
# itters_till_convergence
# max_jitter_width
# min_jitter_width
# cooldown
# sorted_antenna_names
self.num_antennas = len(sorted_antenna_names)
self.num_measurments = self.num_antennas*len(source_object_list)
self.num_delays = len(station_order)
self.station_indeces = np.empty( len(ant_locs), dtype=np.int )
for station_index, index_range in enumerate(station_to_antenna_index_list):
first,last = index_range
self.station_indeces[first:last] = station_index
self.fitter = stationDelay_fitter(ant_locs, self.station_indeces, len(self.source_object_list), self.num_delays)
for source in self.source_object_list:
self.fitter.set_event( source.pulse_times )
# self.one_fitter = stationDelay_fitter(ant_locs, self.station_indeces, 1, self.num_delays)
# self.one_fitter.set_event( self.source_object_list[0] )
#### make guess ####
self.num_DOF = -self.num_delays
self.solution = np.zeros( self.num_delays+4*len(source_object_list) )
self.solution[:self.num_delays] = current_delays_guess
param_i = self.num_delays
for PSE in source_object_list:
self.solution[param_i:param_i+4] = PSE.guess_XYZT
param_i += 4
self.num_DOF += PSE.num_DOF()
if initial_guess is not None: ## use initial guess instead, if given
self.solution = initial_guess
self.fitter.prep_for_random_pert()
def rerun(self, deviation, antenna_error_deviation):
self.fitter.random_perturbation( deviation, antenna_error_deviation )
new_guess = np.array(self.solution)
new_guess[:self.num_delays] += np.random.normal(scale=100E-9, size=self.num_delays)
new_guess[self.num_delays+3::4] += np.random.normal(scale=100E-9, size=len(self.source_object_list))
new_guess[self.num_delays::4] = 0.0
new_guess[self.num_delays+1::4] = 0.0
new_guess[self.num_delays+2::4] = 0.0
fit_res = least_squares(self.fitter.objective_fun, self.solution, jac='2-point', method='lm', xtol=1.0E-15, ftol=1.0E-15, gtol=1.0E-15, x_scale='jac')
self.last_fit = fit_res.x
new_station_delays = fit_res.x[:self.num_delays]
total_RMS = self.fitter.RMS(fit_res.x, self.num_DOF)
return new_station_delays, total_RMS
def employ_result(self, source_object_list):
param_i = self.num_delays
for PSE in source_object_list:
PSE.append_solution( self.last_fit[param_i:param_i+4] )
param_i += 4
class stochastic_fitter_dt_loc:
def __init__(self, source_object_list):
self.source_object_list = source_object_list
## assume globals:
# max_itters_per_loop
# itters_till_convergence
# max_jitter_width
# min_jitter_width
# cooldown
# sorted_antenna_names
self.num_antennas = len(sorted_antenna_names)
self.num_measurments = self.num_antennas*len(source_object_list)
self.num_delays = len(station_order)
self.station_indeces = np.empty( len(ant_locs), dtype=np.int )
for station_index, index_range in enumerate(station_to_antenna_index_list):
first,last = index_range
self.station_indeces[first:last] = station_index
self.fitter = stationDelay_fitter(ant_locs, self.station_indeces, len(self.source_object_list), self.num_delays)
for source in self.source_object_list:
self.fitter.set_event( source.pulse_times )
# self.one_fitter = stationDelay_fitter(ant_locs, self.station_indeces, 1, self.num_delays)
# self.one_fitter.set_event( self.source_object_list[0] )
#### make guess ####
self.num_DOF = -self.num_delays
self.solution = np.zeros( 4*len(source_object_list) )
param_i = 0
for PSE in source_object_list:
self.solution[param_i:param_i+4] = PSE.guess_XYZT
param_i += 4
self.num_DOF += PSE.num_DOF()
self.fitter.prep_for_random_pert()
self.tmp_array = np.zeros( self.num_delays+4*len(source_object_list) )
self.current_delays = current_delays_guess
def obj_func(self, vals):
self.tmp_array[:self.num_delays] = self.current_delays
self.tmp_array[self.num_delays:] = vals
return self.fitter.objective_fun( self.tmp_array )
def rerun(self, station_delays, deviation, antenna_error_deviation):
self.fitter.random_perturbation( deviation, antenna_error_deviation )
self.current_delays = station_delays
new_guess = np.array(self.solution)
new_guess[3::4] += np.random.normal(scale=100E-9, size=len(self.source_object_list))
new_guess[::4] = 0.0
new_guess[1::4] = 0.0
new_guess[2::4] = 0.0
fit_res = least_squares(self.obj_func, self.solution, jac='2-point', method='lm', xtol=1.0E-15, ftol=1.0E-15, gtol=1.0E-15, x_scale='jac')
self.last_fit = fit_res.x
self.tmp_array[:self.num_delays] = self.current_delays
self.tmp_array[self.num_delays:] = fit_res.x
total_RMS = self.fitter.RMS(self.tmp_array, self.num_DOF)
return total_RMS
def employ_result(self, source_object_list):
param_i = 0
for PSE in source_object_list:
PSE.append_solution( self.last_fit[param_i:param_i+4] )
param_i += 4
#### source object ####
## represents a potential source
## keeps track of a stations on the prefered station, and stations on other stations that could correlate and are considered correlated
## contains utilities for fitting, and for finding RMS in total and for each station
## also contains utilities for plotting and saving info
## need to handle inseartion of random error, and that choosen SSPE can change
class source_object():
## assume: guess_location , ant_locs, station_to_antenna_index_list, station_to_antenna_index_dict, referance_station, station_order,
# sorted_antenna_names, station_locations,
# are global
def __init__(self, ID, input_fname, location, stations_to_exclude, antennas_to_exclude, num_runs ):
self.ID = ID
self.stations_to_exclude = stations_to_exclude
self.antennas_to_exclude = antennas_to_exclude
self.data_file = h5py.File(input_fname, "r")
self.guess_XYZT = np.array( location )
self.solutions = np.empty( (num_runs, 4), dtype=np.double )
self.sol_i = 0
def prep_for_fitting(self, polarization, station_delay_guess_dict):
self.polarization = polarization
self.pulse_times = np.empty( len(sorted_antenna_names) )
self.pulse_times[:] = np.nan
#### first add times from referance_station
for sname in chain(station_order):
if sname not in self.stations_to_exclude:
self.add_known_station(sname, station_delay_guess_dict[sname])
if referance_station not in self.stations_to_exclude:
self.add_known_station(referance_station, 0.0)
#### setup some temp storage for fitting
self.tmp_LS2_data = np.empty( len(sorted_antenna_names) )
def remove_station(self, sname):
antenna_index_range = station_to_antenna_index_dict[sname]
self.pulse_times[ antenna_index_range[0]:antenna_index_range[1] ] = np.nan
#
# def has_station(self, sname):
# antenna_index_range = station_to_antenna_index_dict[sname]
# return np.sum(np.isfinite(self.pulse_times[ antenna_index_range[0]:antenna_index_range[1] ])) > 0
def add_known_station(self, sname, delay):
self.remove_station( sname )
if sname in self.data_file:
station_group= self.data_file[sname]
else:
return 0
antenna_index_range = station_to_antenna_index_dict[sname]
for ant_i in range(antenna_index_range[0], antenna_index_range[1]):
ant_name = sorted_antenna_names[ant_i]
if ant_name in station_group:
ant_data = station_group[ant_name]
# start_time = ant_data.attrs['starting_index']*5.0E-9
pt = ant_data.attrs['PolE_peakTime'] if self.polarization==0 else ant_data.attrs['PolO_peakTime']
waveform = ant_data[1,:] if self.polarization==0 else ant_data[3,:]
# start_time += ant_data.attrs['PolE_timeOffset'] if self.polarization==0 else ant_data.attrs['PolO_timeOffset']
amp = np.max(waveform)
if not np.isfinite(pt):
pt = np.nan
if amp<min_antenna_amplitude or (ant_name in self.antennas_to_exclude) or (ant_name in bad_antennas):
pt = np.nan
self.pulse_times[ ant_i ] = np.linalg.norm( ant_locs[ant_i] - self.guess_XYZT[0:3] )*inv_v_air + self.guess_XYZT[3] + delay
return np.sum(np.isfinite( self.pulse_times[antenna_index_range[0]:antenna_index_range[1]] ) )
def num_DOF(self):
return np.sum( np.isfinite(self.pulse_times) ) - 3 ## minus three or four?
def append_solution(self, new_solution):
self.solutions[self.sol_i, : ] = new_solution
self.sol_i += 1
# def try_location_LS(self, delays, XYZT_location, out):
# X,Y,Z,T = XYZT_location
# Z = np.abs(Z)
#
# delta_X_sq = ant_locs[:,0]-X
# delta_Y_sq = ant_locs[:,1]-Y
# delta_Z_sq = ant_locs[:,2]-Z
#
# delta_X_sq *= delta_X_sq
# delta_Y_sq *= delta_Y_sq
# delta_Z_sq *= delta_Z_sq
#
#
# out[:] = T - self.pulse_times
#
# ##now account for delays
# for index_range, delay in zip(station_to_antenna_index_list, delays):
# first,last = index_range
#
# out[first:last] += delay ##note the wierd sign
#
#
# out *= v_air
# out *= out ##this is now delta_t^2 *C^2
#
# out -= delta_X_sq
# out -= delta_Y_sq
# out -= delta_Z_sq
# def try_location_JAC(self, delays, XYZT_location, out_loc, out_delays):
# X,Y,Z,T = XYZT_location
# Z = np.abs(Z)
#
# out_loc[:,0] = X
# out_loc[:,0] -= ant_locs[:,0]
# out_loc[:,0] *= -2
#
# out_loc[:,1] = Y
# out_loc[:,1] -= ant_locs[:,1]
# out_loc[:,1] *= -2
#
# out_loc[:,2] = Z
# out_loc[:,2] -= ant_locs[:,2]
# out_loc[:,2] *= -2
#
#
# out_loc[:,3] = T - self.pulse_times
# out_loc[:,3] *= 2*v_air*v_air
#
# delay_i = 0
# for index_range, delay in zip(station_to_antenna_index_list, delays):
# first,last = index_range
#
# out_loc[first:last,3] += delay*2*v_air*v_air
# out_delays[first:last,delay_i] = out_loc[first:last,3]
#
# delay_i += 1
#
# def try_location_LS2(self, delays, XYZT_location, out):
# X,Y,Z,T = XYZT_location
## Z = np.abs(Z)
#
# self.tmp_LS2_data[:] = ant_locs[:,0]
# self.tmp_LS2_data[:] -= X
# self.tmp_LS2_data[:] *= self.tmp_LS2_data[:]
# out[:] = self.tmp_LS2_data
#
# self.tmp_LS2_data[:] = ant_locs[:,1]
# self.tmp_LS2_data[:] -= Y
# self.tmp_LS2_data[:] *= self.tmp_LS2_data[:]
# out[:] += self.tmp_LS2_data
#
# self.tmp_LS2_data[:] = ant_locs[:,2]
# self.tmp_LS2_data[:] -= Z
# self.tmp_LS2_data[:] *= self.tmp_LS2_data[:]
# out[:] += self.tmp_LS2_data
#
# np.sqrt( out, out=out )
# out *= inv_v_air
#
# out += T
# out -= self.pulse_times
#
# ##now account for delays
# for index_range, delay in zip(station_to_antenna_index_list, delays):
# first,last = index_range
#
# out[first:last] += delay ##note the wierd sign
#
# def try_location_JAC2(self, delays, XYZT_location, out_loc, out_delays):
# X,Y,Z,T = XYZT_location
## Z = np.abs(Z)
#
# out_loc[:,0] = X
# out_loc[:,0] -= ant_locs[:,0]
#
# out_loc[:,1] = Y
# out_loc[:,1] -= ant_locs[:,1]
#
# out_loc[:,2] = Z
# out_loc[:,2] -= ant_locs[:,2]
#
#
# out_delays[:,0] = out_loc[:,0] ## use as temporary storage
# out_delays[:,0] *= out_delays[:,0]
# out_loc[:,3] = out_delays[:,0] ## also use as temporary storage
#
# out_delays[:,0] = out_loc[:,1] ## use as temporary storage
# out_delays[:,0] *= out_delays[:,0]
# out_loc[:,3] += out_delays[:,0]
#
# out_delays[:,0] = out_loc[:,2] ## use as temporary storage
# out_delays[:,0] *= out_delays[:,0]
# out_loc[:,3] += out_delays[:,0]
#
# np.sqrt( out_loc[:,3], out = out_loc[:,3] )
#
# out_loc[:,0] /= out_loc[:,3]
# out_loc[:,1] /= out_loc[:,3]
# out_loc[:,2] /= out_loc[:,3]
#
# out_loc[:,0] *= inv_v_air
# out_loc[:,1] *= inv_v_air
# out_loc[:,2] *= inv_v_air
#
# out_loc[:,3] = 1
#
# delay_i = 0
# out_delays[:] = 0.0
# for index_range, delay in zip(station_to_antenna_index_list, delays):
# first,last = index_range
#
# out_delays[first:last,delay_i] = 1
#
# delay_i += 1
#
#
# def estimate_T(self, delays, XYZT_location):
# X,Y,Z,T = XYZT_location
# Z = np.abs(Z)
#
# delta_X_sq = ant_locs[:,0]-X
# delta_Y_sq = ant_locs[:,1]-Y
# delta_Z_sq = ant_locs[:,2]-Z
#
# delta_X_sq *= delta_X_sq
# delta_Y_sq *= delta_Y_sq
# delta_Z_sq *= delta_Z_sq
#
#
# workspace = delta_X_sq+delta_Y_sq
# workspace += delta_Z_sq
#
## print(delta_X_sq)
# np.sqrt(workspace, out=workspace)
#
## print(self.pulse_times)
## print(workspace)
# workspace[:] -= self.pulse_times*v_air ## this is now source time
#
# ##now account for delays
# for index_range, delay in zip(station_to_antenna_index_list, delays):
# first,last = index_range
# workspace[first:last] += delay*v_air ##note the wierd sign
#
## print(workspace)
# ave_error = np.nanmean( workspace )
# return -ave_error/v_air
# def SSqE_fit(self, delays, XYZT_location):
# X,Y,Z,T = XYZT_location
# Z = np.abs(Z)
#
# delta_X_sq = ant_locs[:,0]-X
# delta_Y_sq = ant_locs[:,1]-Y
# delta_Z_sq = ant_locs[:,2]-Z
#
# delta_X_sq *= delta_X_sq
# delta_Y_sq *= delta_Y_sq
# delta_Z_sq *= delta_Z_sq
#
# distance = delta_X_sq
# distance += delta_Y_sq
# distance += delta_Z_sq
#
# np.sqrt(distance, out=distance)
# distance *= 1.0/v_air
#
# distance += T
# distance -= self.pulse_times
#
# ##now account for delays
# for index_range, delay in zip(station_to_antenna_index_list, delays):
# first,last = index_range
# if first is not None:
# distance[first:last] += delay ##note the wierd sign
#
# distance *= distance
# return np.nansum(distance)
# def RMS_fit_byStation(self, delays, XYZT_location):
# X,Y,Z,T = XYZT_location
# Z = np.abs(Z)
#
# delta_X_sq = ant_locs[:,0]-X
# delta_Y_sq = ant_locs[:,1]-Y
# delta_Z_sq = ant_locs[:,2]-Z
#
# delta_X_sq *= delta_X_sq
# delta_Y_sq *= delta_Y_sq
# delta_Z_sq *= delta_Z_sq
#
# distance = delta_X_sq
# distance += delta_Y_sq
# distance += delta_Z_sq
#
# np.sqrt(distance, out=distance)
# distance *= 1.0/v_air
#
# distance += T
# distance -= self.pulse_times
#
# ##now account for delays
# for index_range, delay in zip(station_to_antenna_index_list, delays):
# first,last = index_range
# if first is not None:
# distance[first:last] += delay ##note the wierd sign
#
# distance *= distance
#
# ret = []
# for index_range in station_to_antenna_index_list:
# first,last = index_range
#
# data = distance[first:last]
# nDOF = np.sum( np.isfinite(data) )
# if nDOF == 0:
# ret.append( None )
# else:
# ret.append( np.sqrt( np.nansum(data)/nDOF ) )
#
# ## need to do referance station
# first,last = station_to_antenna_index_dict[ referance_station ]
# data = distance[first:last]
# nDOF = np.sum( np.isfinite(data) )
# if nDOF == 0:
# ret.append( None )
# else:
# ret.append( np.sqrt( np.nansum(data)/nDOF ) )
#
# return ret
#
# def plot_waveforms(self, station_timing_offsets, fname=None):
#
# if fname is None:
# plotter = plt
# else:
# CL = code_logger(fname)
# CL.add_statement("import numpy as np")
# plotter = pyplot_emulator(CL)
#
# most_min_t = np.inf
# snames_not_plotted = []
#
# for sname, offset in zip( chain(station_order,[referance_station]), chain(station_timing_offsets,[0.0]) ):
# index_range = station_to_antenna_index_dict[sname]
#
# if sname in self.data_file:
# station_data = self.data_file[sname]
# else:
# continue
#
#
# min_T = np.inf
#
# max_T = -np.inf
# for ant_i in range(index_range[0], index_range[1]):
# ant_name = sorted_antenna_names[ant_i]
# if ant_name not in station_data:
# continue
#
# ant_data = station_data[ant_name]
#
# PolE_peak_time = ant_data.attrs['PolE_peakTime'] - offset
# PolO_peak_time = ant_data.attrs['PolO_peakTime'] - offset
#
# PolE_hilbert = ant_data[1,:]
# PolO_hilbert = ant_data[3,:]
#
# PolE_trace = ant_data[0,:]
# PolO_trace = ant_data[2,:]
#
# PolE_T_array = (np.arange(len(PolE_hilbert)) + ant_data.attrs['starting_index'] )*5.0E-9 + ant_data.attrs['PolE_timeOffset']
# PolO_T_array = (np.arange(len(PolO_hilbert)) + ant_data.attrs['starting_index'] )*5.0E-9 + ant_data.attrs['PolO_timeOffset']
#
# PolE_T_array -= offset
# PolO_T_array -= offset
#
# PolE_amp = np.max(PolE_hilbert)
# PolO_amp = np.max(PolO_hilbert)
# amp = max(PolE_amp, PolO_amp)
# PolE_hilbert = PolE_hilbert/(amp*3.0)
# PolO_hilbert = PolO_hilbert/(amp*3.0)
# PolE_trace = PolE_trace/(amp*3.0)
# PolO_trace = PolO_trace/(amp*3.0)
#
#
# if PolE_amp < min_antenna_amplitude:
# PolE_peak_time = np.inf
# if PolO_amp < min_antenna_amplitude:
# PolO_peak_time = np.inf
#
# plotter.plot( PolE_T_array, ant_i+PolE_hilbert, 'g' )
# plotter.plot( PolE_T_array, ant_i+PolE_trace, 'g' )
# plotter.plot( [PolE_peak_time, PolE_peak_time], [ant_i, ant_i+2.0/3.0], 'g')
#
# plotter.plot( PolO_T_array, ant_i+PolO_hilbert, 'm' )
# plotter.plot( PolO_T_array, ant_i+PolO_trace, 'm' )
# plotter.plot( [PolO_peak_time, PolO_peak_time], [ant_i, ant_i+2.0/3.0], 'm')
#
# plotter.annotate( ant_name, xy=[PolO_T_array[-1], ant_i], size=7)
#
# max_T = max(max_T, PolE_T_array[-1], PolO_T_array[-1])
# min_T = min(min_T, PolE_T_array[0], PolO_T_array[0])
# most_min_t = min(most_min_t, min_T)
#
# if min_T<np.inf:
# plotter.annotate( sname, xy=[min_T, np.average(index_range)], size=15)
# else:
# snames_not_plotted.append( sname )
#
# for sname in snames_not_plotted:
# index_range = station_to_antenna_index_dict[sname]
# plotter.annotate( sname, xy=[most_min_t, np.average(index_range)], size=15)
#
# plotter.show()
#
# if fname is not None:
# CL.save()
#
# def plot_selected_waveforms(self, station_timing_offsets, fname=None):
#
# if fname is None:
# plotter = plt
# else:
# CL = code_logger(fname)
# CL.add_statement("import numpy as np")
# plotter = pyplot_emulator(CL)
#
# most_min_t = np.inf
# snames_not_plotted = []
#
# for sname, offset in zip( chain(station_order,[referance_station]), chain(station_timing_offsets,[0.0]) ):
# index_range = station_to_antenna_index_dict[sname]
#
# min_T = np.inf
# max_T = -np.inf
# for ant_i in range(index_range[0], index_range[1]):
# ant_name = sorted_antenna_names[ant_i]
#
# pulse_time = self.pulse_times[ ant_i ]
# waveform = self.waveforms[ ant_i ]
# startTime = self.waveform_startTimes[ ant_i ]
#
# if not np.isfinite( pulse_time ):
# continue
#
# T_array = np.arange(len(waveform))*5.0E-9 + (startTime - offset)
#
#
# amp = np.max(waveform)
# waveform = waveform/(amp*3.0)
#
# plotter.plot( T_array, ant_i+waveform, 'g' )
# plotter.plot( [pulse_time-offset, pulse_time-offset], [ant_i, ant_i+2.0/3.0], 'm')
#
# plotter.annotate( ant_name, xy=[T_array[-1], ant_i], size=7)
#
# max_T = max(max_T, T_array[-1])
# min_T = min(min_T, T_array[0])
# most_min_t = min(most_min_t, min_T)
#
# if min_T<np.inf:
# plotter.annotate( sname, xy=[min_T, np.average(index_range)], size=15)
# else:
# snames_not_plotted.append( sname )
#
# for sname in snames_not_plotted:
# index_range = station_to_antenna_index_dict[sname]
# plotter.annotate( sname, xy=[most_min_t, np.average(index_range)], size=15)
#
# plotter.show()
#
# if fname is not None:
# CL.save()
class Part1_input_manager:
def __init__(self, input_files):
self.max_num_input_files = 10
if len(input_files) > self.max_num_input_files:
print("TOO MANY INPUT FOLDERS!!!")
quit()
self.input_files = input_files
self.input_data = []
for folder_i, folder in enumerate(input_files):
input_folder = processed_data_folder + "/" + folder +'/'
file_list = [(int(f.split('_')[1][:-3])*self.max_num_input_files+folder_i ,input_folder+f) for f in listdir(input_folder) if f.endswith('.h5')] ## get all file names, and get the 'ID' for the file name
file_list.sort( key=lambda x: x[0] ) ## sort according to ID
self.input_data.append( file_list )
def known_source(self, ID):
file_i = int(ID/self.max_num_input_files)
folder_i = ID - file_i*self.max_num_input_files
file_list = self.input_data[ folder_i ]
return [info for info in file_list if info[0]==ID][0]
np.set_printoptions(precision=10, threshold=np.inf)
## some global settings
num_stat_per_table = 10
#### these globals are holdovers
#station_locations = None ## to be set
#station_to_antenna_index_list = None## to be set
#stations_with_fits = None## to be set
#station_to_antenna_index_dict = None
def run_fitter(timeID, output_folder, pulse_input_folders, guess_timings, souces_to_fit, guess_source_locations,
source_polarizations, source_stations_to_exclude, source_antennas_to_exclude, bad_ants,
ref_station="CS002", min_ant_amplitude=10, num_itters=1000, error_deviation=0.5E-9, antenna_error=0.5E-9,
source_XYZ_to_test=[]):
##### holdovers. These globals need to be fixed, so not global....
global station_locations, station_to_antenna_index_list, stations_with_fits, station_to_antenna_index_dict
global referance_station, station_order, sorted_antenna_names, min_antenna_amplitude, ant_locs, bad_antennas
global current_delays_guess, processed_data_folder
referance_station = ref_station
min_antenna_amplitude = min_ant_amplitude
bad_antennas = bad_ants
if referance_station in guess_timings:
ref_T = guess_timings[referance_station]
guess_timings = {station:T-ref_T for station,T in guess_timings.items() if station != referance_station}
processed_data_folder = processed_data_dir(timeID)
data_dir = processed_data_folder + "/" + output_folder
if not isdir(data_dir):
mkdir(data_dir)
logging_folder = data_dir + '/logs_and_plots'
if not isdir(logging_folder):
mkdir(logging_folder)
#Setup logger and open initial data set
log = logger()
log.set(logging_folder + "/log_out.txt") ## TODo: save all output to a specific output folder
log.take_stderr()
log.take_stdout()
print("timeID:", timeID)
print("date and time run:", time.strftime("%c") )
print("input folders:", pulse_input_folders)
print("source IDs to fit:", souces_to_fit)
print("guess locations:", guess_source_locations)
print("polarization to use:", source_polarizations)
print("source stations to exclude:", source_stations_to_exclude)
print("source antennas to exclude:", source_antennas_to_exclude)
print("bad antennas:", bad_ants)
print("referance station:", ref_station)
print("guess delays:", guess_timings)
print('pulse error:', error_deviation)
print('antenna error:', antenna_error)
print()
#### open data and data processing stuff ####
print("loading data")
raw_fpaths = filePaths_by_stationName(timeID)
raw_data_files = {sname:MultiFile_Dal1(fpaths, force_metadata_ant_pos=True) for sname,fpaths in raw_fpaths.items() if sname in chain(guess_timings.keys(), [referance_station]) }
#### sort antennas and stations ####
station_order = list(guess_timings.keys())## note this doesn't include reference station
sorted_antenna_names = []
station_to_antenna_index_dict = {}
ant_loc_dict = {}
for sname in station_order + [referance_station]:
first_index = len(sorted_antenna_names)
stat_data = raw_data_files[sname]
even_ant_names = stat_data.get_antenna_names()[::2]
even_ant_locs = stat_data.get_LOFAR_centered_positions()[::2]
sorted_antenna_names += even_ant_names
for ant_name, ant_loc in zip(even_ant_names,even_ant_locs):
ant_loc_dict[ant_name] = ant_loc
station_to_antenna_index_dict[sname] = (first_index, len(sorted_antenna_names))
ant_locs = np.zeros( (len(sorted_antenna_names), 3))
for i, ant_name in enumerate(sorted_antenna_names):
ant_locs[i] = ant_loc_dict[ant_name]
station_locations = {sname:ant_locs[station_to_antenna_index_dict[sname][0]] for sname in station_order + [referance_station]}
station_to_antenna_index_list = [station_to_antenna_index_dict[sname] for sname in station_order + [referance_station]]
#### sort the delays guess, and account for station locations ####
current_delays_guess = np.array([guess_timings[sname] for sname in station_order])
# original_delays = np.array( current_delays_guess )
#### open info from part 1 ####
input_manager = Part1_input_manager( pulse_input_folders )
#### first we fit the known sources ####
current_sources = []
# next_source = 0
for knownID in souces_to_fit:
source_ID, input_name = input_manager.known_source( knownID )
print("prep fitting:", source_ID)
location = guess_source_locations[source_ID]
## make source
source_to_add = source_object(source_ID, input_name, location, source_stations_to_exclude[source_ID], source_antennas_to_exclude[source_ID], num_itters )
current_sources.append( source_to_add )
polarity = source_polarizations[source_ID]
source_to_add.prep_for_fitting(polarity, guess_timings)
print("prepping test sources")
test_sources = []
for XYZ in source_XYZ_to_test:
XYZT = np.append(XYZ, [0.0])
base_ID = choice(souces_to_fit)
## make source
source_ID, input_name = input_manager.known_source( base_ID )
source_to_add = source_object(source_ID, input_name, XYZT, source_stations_to_exclude[source_ID], source_antennas_to_exclude[source_ID], num_itters )
polarity = source_polarizations[source_ID]
source_to_add.prep_for_fitting(polarity, guess_timings)
test_sources.append( source_to_add )
fitter = stochastic_fitter_dt(current_sources)
location_fitter = stochastic_fitter_dt_loc( test_sources )
all_delays = np.empty( (num_itters, fitter.num_delays), dtype=np.double )
all_RMSs = np.empty( num_itters, dtype=np.double )
loc_RMSs = np.empty( num_itters, dtype=np.double )
for i in range(num_itters):
all_delays[ i, :], all_RMSs[i] = fitter.rerun(error_deviation, antenna_error)
fitter.employ_result( current_sources )
print('run', i, 'RMS:', all_RMSs[i])
if len(test_sources) != 0:
station_delays = all_delays[ i ]
loc_RMSs[i] = location_fitter.rerun(station_delays, error_deviation, antenna_error)
location_fitter.employ_result( test_sources )
print(" loc. RMS", loc_RMSs[i])
print()
print()
print("station timing errors:")
for i, sname in zip( range(fitter.num_delays), station_order):
print(sname, ":", np.std(all_delays[:,i]) )
print()
print()
### get average X, Y, Z for each itteraion
ave_X = np.zeros( num_itters )
ave_Y = np.zeros( num_itters )
ave_Z = np.zeros( num_itters )
for source in current_sources:
ave_X += source.solutions[: , 0]
ave_Y += source.solutions[: , 1]
ave_Z += source.solutions[: , 2]
ave_X /= len(current_sources)
ave_Y /= len(current_sources)
ave_Z /= len(current_sources)
print("absolute location errors:")
print("X", np.std(ave_X), "Y", np.std(ave_Y), "Z", np.std(ave_Z))
print()
print()
print("relative location errors")
for source in current_sources:
source.solutions[: , 0] -= ave_X
source.solutions[: , 1] -= ave_Y
source.solutions[: , 2] -= ave_Z
print("source", source.ID)
print(" ", np.std(source.solutions[:,0]), np.std(source.solutions[:,1]), np.std(source.solutions[:,2]))
print()
print()
print("average RMS", np.average(all_RMSs), "std of RMS", np.std(all_RMSs))
### same for location fits
if len(test_sources) != 0:
print()
print()
print("location source tests")
ave_X = np.zeros( num_itters )
ave_Y = np.zeros( num_itters )
ave_Z = np.zeros( num_itters )
for source in test_sources:
ave_X += source.solutions[: , 0]
ave_Y += source.solutions[: , 1]
ave_Z += source.solutions[: , 2]
ave_X /= len(test_sources)
ave_Y /= len(test_sources)
ave_Z /= len(test_sources)
print("absolute location errors:")
print("X", np.std(ave_X), "Y", np.std(ave_Y), "Z", np.std(ave_Z))
print()
print()
print("relative location errors")
for i, source in enumerate(test_sources):
source.solutions[: , 0] -= ave_X
source.solutions[: , 1] -= ave_Y
source.solutions[: , 2] -= ave_Z
print("loc. source", i)
print(" ", np.std(source.solutions[:,0]), np.std(source.solutions[:,1]), np.std(source.solutions[:,2]))
print()
print()
print("average RMS", np.average(loc_RMSs), "std of RMS", np.std(loc_RMSs))
|
# python3 steven
import cv2
import argparse
import numpy as np
# ----------------------------------------------
# usgae: python yolo_opencv.py -i dog.jpg -c yolov3.cfg -w ./darknet-master/yolov3.weights -cl yolov3.txt
# usgae: python yolo_opencv.py -i dog.jpg -c yolov3.cfg -w ./darknet-master/yolov3.weights -cl yolov3.txt -s test.jpg
# usgae: python yolo_opencv.py --image dog.jpg --config yolov3.cfg --weights ./darknet-master/yolov3.weights --classes yolov3.txt -s test.jpg
# ----------------------------------------------
def cmd_line():
# handle command line arguments
ap = argparse.ArgumentParser()
ap.add_argument('-i', '--image', required=True,
help='path to input image')
ap.add_argument('-c', '--config', required=True,
help='path to yolo config file')
ap.add_argument('-w', '--weights', required=True,
help='path to yolo pre-trained weights')
ap.add_argument('-cl', '--classes', required=True,
help='path to text file containing class names')
ap.add_argument('-s', '--save', help='save the detection image')
return ap.parse_args()
def get_output_layers(net):
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
return output_layers
def draw_prediction(img, class_id, confidence, x, y, x_plus_w, y_plus_h, classes, colors):
label = str(classes[class_id])
color = colors[class_id]
cv2.rectangle(img, (x, y), (x_plus_w, y_plus_h), color, 2)
colorTextBg = (10, 160, 10)
# print('color=',color)
percentage = "{0:.1%}".format(confidence)
text = '%s %s' % (label, percentage)
font_scale = 0.5
font = cv2.FONT_HERSHEY_SIMPLEX # cv2.FONT_HERSHEY_PLAIN #
fontColor = (255, 255, 255)
# fill text background
(text_width, text_height) = cv2.getTextSize(text, font, fontScale=font_scale, thickness=1)[0]
text_offset_x = x - 2
text_offset_y = y - 24
box_coords = ((text_offset_x, text_offset_y), (text_offset_x + text_width + 5, text_offset_y + text_height + 12))
cv2.rectangle(img, box_coords[0], box_coords[1], colorTextBg, cv2.FILLED)
# draw text label
cv2.putText(img, text, (x, y - 8), font, font_scale, fontColor, 1, cv2.LINE_AA)
def detectionImg(image, net, classes, colors):
width = image.shape[1]
height = image.shape[0]
scale = 0.00392
blob = cv2.dnn.blobFromImage(image, scale, (416, 416), (0, 0, 0), True, crop=False)
net.setInput(blob)
# cv2.dnn_Net.dumpToFile(net,'./net.text')
# net.dumpToFile('./net.text')
outs = net.forward(get_output_layers(net))
class_ids = []
confidences = []
boxes = []
conf_threshold = 0.5
nms_threshold = 0.4
# print('outs=',len(outs))
for out in outs:
# print('out=',len(out))
for detection in out:
# print('detection=',len(detection))
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
# print('scores=',scores,'class_id=',class_id,'confidence=',confidence)
if confidence > 0.5:
print('scores=', scores, 'class_id=', class_id, 'confidence=', confidence)
center_x = int(detection[0] * width)
center_y = int(detection[1] * height)
w = int(detection[2] * width)
h = int(detection[3] * height)
x = center_x - w / 2
y = center_y - h / 2
class_ids.append(class_id)
confidences.append(float(confidence))
boxes.append([x, y, w, h])
indices = cv2.dnn.NMSBoxes(boxes, confidences, conf_threshold, nms_threshold)
# print('indices=',len(indices))
# print('class_ids=',class_ids)
for i in indices:
i = i[0]
box = boxes[i]
x = box[0]
y = box[1]
w = box[2]
h = box[3]
draw_prediction(image, class_ids[i], confidences[i], round(x), round(y), round(x + w), round(y + h), classes, colors)
return image
def getCalsses(file):
classes = None
with open(file, 'r') as f:
classes = [line.strip() for line in f.readlines()]
return classes
def main():
args = cmd_line()
image = cv2.imread(args.image)
classes = getCalsses(args.classes)
colors = np.random.uniform(0, 255, size=(len(classes), 3))
# colors = [200,0,0]
net = cv2.dnn.readNet(args.weights, args.config)
image = detectionImg(image, net, classes, colors)
cv2.imshow("object detection", image)
cv2.waitKey()
if args.save:
print(args.save)
cv2.imwrite(args.save, image)
else:
cv2.imwrite("object-detection.jpg", image)
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
|
"""
Copyright (c) 2019 Imperial College London.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
import torch.nn as nn
import common.mytorch as mytorch
from common.mytorch.fft import fft2, ifft2
from common.mytorch.mri import (
adjointSoftSenseOpNoShift,
forwardSoftSenseOpNoShift,
)
class DataIDLayer(nn.Module):
"""
Placeholder for data layer
"""
def __init__(self, *args, **kwargs):
super(DataIDLayer, self).__init__()
def forward(self, x, *args, **kwargs):
return x
def __repr__(self):
return f'DataIDLayer()'
class DataGDLayer(nn.Module):
"""
DataLayer computing the gradient on the L2 dataterm.
"""
def __init__(self, lambda_init, learnable=True):
"""
Args:
lambda_init (float): Init value of data term weight lambda.
"""
super(DataGDLayer, self).__init__()
self.lambda_init = lambda_init
self.data_weight = torch.nn.Parameter(torch.Tensor(1))
self.data_weight.data = torch.tensor(
lambda_init,
dtype=self.data_weight.dtype,
)
self.data_weight.requires_grad = learnable
def forward(self, x, y, smaps, mask):
A_x_y = forwardSoftSenseOpNoShift(x, smaps, mask) - y
gradD_x = adjointSoftSenseOpNoShift(A_x_y, smaps, mask)
return x - self.data_weight * gradD_x
def __repr__(self):
return f'DataLayer(lambda_init={self.data_weight.item():.4g})'
class DataProxCGLayer(torch.nn.Module):
""" Solving the prox wrt. dataterm using Conjugate Gradient as proposed by
Aggarwal et al.
"""
def __init__(self, lambda_init, tol=1e-6, iter=10, learnable=True):
super(DataProxCGLayer, self).__init__()
self.lambdaa = torch.nn.Parameter(torch.Tensor(1))
self.lambdaa.data = torch.tensor(lambda_init)
self.lambdaa_init = lambda_init
self.lambdaa.requires_grad = learnable
self.tol = tol
self.iter = iter
self.op = MyCG
def forward(self, x, f, smaps, mask):
return self.op.apply(
x,
self.lambdaa,
f,
smaps,
mask,
self.tol,
self.iter,
)
def extra_repr(self):
return (f"lambda_init={self.lambdaa.item():.4g}, tol={self.tol}"
f" iter={self.iter} learnable={self.lambdaa.requires_grad}")
def set_learnable(self, flag):
self.lambdaa.requires_grad = flag
class MyCG(torch.autograd.Function):
@staticmethod
def complexDot(data1, data2):
nBatch = data1.shape[0]
mult = mytorch.complex.complex_mult_conj(data1, data2)
re, im = torch.unbind(mult, dim=-1)
return torch.stack([torch.sum(re.view(nBatch, -1), dim=-1),
torch.sum(im.view(nBatch, -1), dim=-1)], -1)
@staticmethod
def solve(x0, M, tol, max_iter):
nBatch = x0.shape[0]
x = torch.zeros(x0.shape).to(x0.device)
r = x0.clone()
p = x0.clone()
x0x0 = (x0.pow(2)).view(nBatch, -1).sum(-1)
rr = torch.stack([
(r.pow(2)).view(nBatch, -1).sum(-1),
torch.zeros(nBatch).to(x0.device)
], dim=-1)
it = 0
while torch.min(rr[..., 0] / x0x0) > tol and it < max_iter:
it += 1
q = M(p)
alpha = mytorch.complex.complex_div(rr, MyCG.complexDot(p, q))
# alpha = torch.stack([rr[...,0] / MyCG.complexDot(p, q)[...,0],
# torch.zeros(nBatch).to(x0.device)], dim=-1)
x += mytorch.complex.complex_mult(
alpha.reshape(nBatch, 1, 1, 1, -1), p.clone())
r -= mytorch.complex.complex_mult(
alpha.reshape(nBatch, 1, 1, 1, -1), q.clone())
rr_new = torch.stack([
(r.pow(2)).view(nBatch, -1).sum(-1),
torch.zeros(nBatch).to(x0.device)
], dim=-1)
beta = torch.stack([
rr_new[..., 0] / rr[..., 0],
torch.zeros(nBatch).to(x0.device)
], dim=-1)
p = r.clone() + mytorch.complex.complex_mult(
beta.reshape(nBatch, 1, 1, 1, -1), p)
rr = rr_new.clone()
# print(it, rr[...,0]/x0x0)
return x
@staticmethod
def forward(ctx, z, lambdaa, y, smaps, mask, tol, max_iter):
ctx.tol = tol
ctx.max_iter = max_iter
def A(x):
return mytorch.mri.forwardSoftSenseOpNoShift(x, smaps, mask)
def AT(y):
return mytorch.mri.adjointSoftSenseOpNoShift(y, smaps, mask)
def M(p):
return lambdaa * AT(A(p)) + p
x0 = lambdaa * AT(y) + z
ctx.save_for_backward(AT(y), x0, smaps, mask, lambdaa)
return MyCG.solve(x0, M, ctx.tol, ctx.max_iter)
@staticmethod
def backward(ctx, grad_x):
ATy, rhs, smaps, mask, lambdaa = ctx.saved_tensors
def A(x):
return mytorch.mri.forwardSoftSenseOpNoShift(x, smaps, mask)
def AT(y):
return mytorch.mri.adjointSoftSenseOpNoShift(y, smaps, mask)
def M(p):
return lambdaa * AT(A(p)) + p
Qe = MyCG.solve(grad_x, M, ctx.tol, ctx.max_iter)
QQe = MyCG.solve(Qe, M, ctx.tol, ctx.max_iter)
grad_z = Qe
grad_lambdaa = mytorch.complex.complex_dotp(Qe, ATy).sum() \
- mytorch.complex.complex_dotp(QQe, rhs).sum()
return grad_z, grad_lambdaa, None, None, None, None, None
class DataVSLayer(nn.Module):
"""
DataLayer using variable splitting formulation
"""
def __init__(self, alpha_init, beta_init, learnable=True):
"""
Args:
alpha_init (float): Init value of data consistency block (DCB)
beta_init (float): Init value of weighted averaging block (WAB)
"""
super(DataVSLayer, self).__init__()
self.alpha = torch.nn.Parameter(torch.Tensor(1))
self.alpha.data = torch.tensor(alpha_init, dtype=self.alpha.dtype)
self.beta = torch.nn.Parameter(torch.Tensor(1))
self.beta.data = torch.tensor(beta_init, dtype=self.beta.dtype)
self.learnable = learnable
self.set_learnable(learnable)
def forward(self, x, y, smaps, mask):
A_x = mytorch.mri.forwardSoftSenseOpNoShift(x, smaps, 1.)
k_dc = (1 - mask) * A_x + mask * (
self.alpha * A_x + (1 - self.alpha) * y)
x_dc = mytorch.mri.adjointSoftSenseOpNoShift(k_dc, smaps, 1.)
x_wab = self.beta * x + (1 - self.beta) * x_dc
return x_wab
def extra_repr(self):
return (
f"alpha={self.alpha.item():.4g},"
f"beta={self.beta.item():.4g},"
f"learnable={self.learnable}"
)
def set_learnable(self, flag):
self.learnable = flag
self.alpha.requires_grad = self.learnable
self.beta.requires_grad = self.learnable
class DCLayer(nn.Module):
"""
Data Consistency layer from DC-CNN, apply for single coil mainly
"""
def __init__(self, lambda_init=0., learnable=True):
"""
Args:
lambda_init (float): Init value of data consistency block (DCB)
"""
super(DCLayer, self).__init__()
self.lambda_ = torch.nn.Parameter(torch.Tensor(1))
self.lambda_.data = torch.tensor(lambda_init, dtype=self.lambda_.dtype)
self.learnable = learnable
self.set_learnable(learnable)
def forward(self, x, y, mask):
A_x = fft2(x)
k_dc = (1 - mask) * A_x + mask * (
self.lambda_ * A_x + (1 - self.lambda_) * y)
x_dc = ifft2(k_dc)
return x_dc
def extra_repr(self):
return f"lambda={self.lambda_.item():.4g},learnable={self.learnable}"
def set_learnable(self, flag):
self.learnable = flag
self.lambda_.requires_grad = self.learnable
|
from concurrent.futures import ThreadPoolExecutor, as_completed, TimeoutError, ProcessPoolExecutor
import time
import pymtg
# Docs: https://docs.python.org/3/library/concurrent.futures.html
class WorkParallelizer(object):
"""
TODO: proper document that
wp = WorkParallelizer()
for i in range(30):
wp.add_task(my_function, i, i + 1, kwarg1='one', kwarg2='two')
wp.run()
if wp.num_tasks_failed > 0:
print('\nErrors:')
wp.show_errors()
"""
def __init__(self, show_widgets=True, use_threads=False):
"""
"""
self.tasks = []
self.futures = []
self.executor = None
self.starttime = None
self.progress_widget = None
self.show_widgets = show_widgets
if use_threads:
self.pool_executor=ThreadPoolExecutor
else:
self.pool_executor=ProcessPoolExecutor
def add_task(self, fn, *args, **kwargs):
'''
Use special kwarg task_id to specify an id for the task. Otherwise will use a count number.
'''
if self.has_started_computing:
print('Can\'t add new tasks once computing has started.')
return False
if 'task_id' in kwargs:
tid = kwargs['task_id']
del kwargs['task_id']
else:
tid = len(self.tasks)
self.tasks.append((fn, args, kwargs, tid))
return True
def tasks_running(self):
"""
Return a list of `future` objects corresponding to all tasks that are
currently being processed.
"""
return [future for future in self.futures if future.running()]
def tasks_completed(self):
"""
Return a list of `future` objects corresponding to all tasks that have
completed processing, including those that failed.
"""
return [future for future in self.futures if future.done()]
def tasks_failed(self):
"""
Returns a list of `future` objects corresponding to all tasks that have
raised an exception (i.e. that have failed).
"""
to_return = []
for future in self.futures:
try:
if future.exception(timeout=0.0) is not None:
to_return.append(future)
except TimeoutError:
# concurrent.futures.TimeoutError, meaning that task has not been finished (or has not started)
pass
return to_return
def tasks_succeeded(self):
"""
Returns a list of `future` objects corresponding to all tasks that have been
completed successfully. Tasks are considered to have finished successfully
as long as no exceptions happened during their computation.
Given a `future` object, the corresponding task result can be retrieved as `future.result()`
Given a `future` object, you can get its given id by using `future.id`
"""
to_return = []
for future in self.futures:
try:
future.result(timeout=0.0) # If task has not been successful, this will raise an Exception
to_return.append(future)
except TimeoutError:
# concurrent.futures.TimeoutError, meaning that task has not been finished (or has not started)
pass
except Exception:
# Intentionally catch broad exception here as it could be any exception triggered by the function
pass
return to_return
@property
def num_tasks(self):
return len(self.tasks)
@property
def num_tasks_completed(self):
return len(self.tasks_completed())
@property
def num_tasks_running(self):
return len(self.tasks_running())
@property
def num_tasks_failed(self):
return len(self.tasks_failed())
@property
def num_tasks_succeeded(self):
return len(self.tasks_succeeded())
@property
def has_started_computing(self):
return len(self.futures) > 0
def start(self, num_workers=4):
"""
Starts the computation of the tasts. Returns False if computation couldn't start.
Calling this method does not block the main thread.
"""
if self.has_started_computing:
print('Computing has already started, can\'t start again.')
return False
print('Submitting {0} tasks to {1} workers'.format(self.num_tasks, num_workers))
self.tasks_succeeded_cache = []
self.executor = self.pool_executor(max_workers=num_workers)
self.start_time = time.time()
for (fn, args, kwargs, tid) in self.tasks:
future = self.executor.submit(fn, *args, **kwargs)
future.id = tid
future.command = '{0}({1}{2})'.format(
fn.__name__,
', '.join([str(arg) for arg in args]) if args else '',
', ' + ', '.join(['{0}={1}'.format(str(key), str(value)) for key, value in kwargs.items()]) if kwargs else ''
)
self.futures.append(future)
return True
def show_progress(self, in_blocking_loop=False):
"""
Get number of completed tasks and compute estimated remaining time. Display that information
on screen. Uses FloatProgress widget if available.
Returns true if computation of all tasks has fininshed.
"""
num_tasks_completed = self.num_tasks_completed # Do this here to iterate over the futures only once
if num_tasks_completed > 0:
_, remaining_time = pymtg.time.time_stats(num_tasks_completed, self.num_tasks, self.start_time)
remaining_time += ' remaining'
else:
remaining_time = '-'
if self.show_widgets:
try:
from ipywidgets import FloatProgress
from IPython.display import display
use_widgets = True
except ImportError:
use_widgets = False
else:
use_widgets = False
if use_widgets:
if self.progress_widget is None or not in_blocking_loop:
self.progress_widget = FloatProgress(min=0, max=self.num_tasks)
display(self.progress_widget)
self.progress_widget.value = num_tasks_completed
print('\r[{0}/{1}, {2} running] {3}'.format(
num_tasks_completed, self.num_tasks, self.num_tasks_running, remaining_time
), end='')
if num_tasks_completed == self.num_tasks: # All tasks have been completed
return True
print('\rAll tasks compelted! [{0} succeeded, {1} failed]'.format(
self.num_tasks_succeeded, self.num_tasks_failed))
return False
def show_progress_blocking(self, interval_seconds=0.2):
"""
Check the progreess of the computation every `interval_seconds` and display it
on screen.
"""
if not self.has_started_computing:
print('Computinng has not started yet, can\'t show progress.')
return
while True:
time.sleep(interval_seconds)
finished = self.show_progress(in_blocking_loop=True)
if finished:
break
def show_errors(self):
"""
Displays on screen information about the tasks that failed, including the command
that was run and the exception that was raised.
"""
for task in self.tasks_failed():
print('* Task {0}\nCommand: {1}\nException: {2}\n'.format(
task.id,
task.command,
task.exception()
))
def run(self, num_workers=4):
"""
Runs all the tasks that have been added to WorkParallelizer and shows the overall
progress in periodic updates. This method blocks the main thread.
"""
started = self.start(num_workers=num_workers)
if started:
self.show_progress_blocking() |
from datetime import date, datetime, timedelta
from xm_s_common.raw_sql import exec_sql
from .frequent import fix_dimension
def fix_data():
start_date = date(2020, 10, 15)
while start_date < date(2021, 4, 21):
if start_date == date(2020, 10, 15):
sql_query_base = "SELECT msg_value FROM xm_s_explorer.value WHERE date=%s; "
base_val = exec_sql(sql_query_base, (start_date,))[0][0]
sql_update_base = "UPDATE xm_s_explorer.fad_scores SET ref_data =%s where day =%s and tag=1 and sub_dimension_id=9;"
exec_sql(sql_update_base, (base_val, start_date))
sql_query_base = "SELECT msg_value FROM xm_s_explorer.value WHERE date=%s; "
val = exec_sql(sql_query_base, (start_date,))[0][0]
sql_check = "SELECT * FROM xm_s_explorer.fad_scores WHERE tag = 0 and day = %s and sub_dimension_id=9 "
res = exec_sql(sql_check, (start_date,))
if res:
sql_update_real = "UPDATE xm_s_explorer.fad_scores SET weighting_factor=1, ref_data=%s, real_time_data=%s, " \
"basic_scores=%s, weighing_scores=%s where day =%s and tag=0 and sub_dimension_id=9 ;"
exec_sql(sql_update_real, (base_val, val, val / base_val, val / base_val, start_date))
else:
sql_insert_new = "INSERT INTO xm_s_explorer.fad_scores(weighting_factor, ref_data, real_time_data, basic_scores, " \
"weighing_scores, `day`, create_time, tag, sub_dimension_id)VALUES(1, %s, %s, %s, %s, %s, %s, 0, 9);"
exec_sql(sql_insert_new, (base_val, val, val / base_val, val / base_val, start_date,
datetime(start_date.year, start_date.month, start_date.day, 0, 0, 0)))
start_date += timedelta(days=1)
fix_dimension()
|
"""
A toy example of playing against random bot on Mocsรกr
Using env "mocsar" and 'human_mode'. It implies using random agent.
"""
import rlcard3
# Make environment and enable human mode
env = rlcard3.make(env_id='mocsar', config={'human_mode': True})
# Reset environment
state = env.reset()
while not env.is_over():
legal_actions = state['legal_actions']
legal_actions.insert(0, 0)
action = input('>> You choose action (integer): ')
if action == '-1':
print('Break the game...')
break
while not action.isdigit() \
or int(action) not in legal_actions:
print('Action illegal...')
action = input('>> Re-choose action (integer): ')
state, reward, done = env.step(int(action))
|
#Task https://adventofcode.com/2020/day/9
Input="""35
20
15
25
47
40
62
55
65
95
102
117
150
182
127
219
299
277
309
576"""
def parseInput(i):
return([int(n) for n in i.split("\n")])
somearray = parseInput(Input)
def preambule(arr,size):
for idx,n in enumerate(arr):
if(idx+1>size):
pairsums = []
for i in arr[idx-size:idx+1]:
for j in arr[idx-size:idx+1]:
if i+j not in pairsums:
pairsums.append(i+j)
if(int(arr[idx+1]) not in pairsums):
return int(arr[idx+1])
print("1st part:",preambule(parseInput(Input),5))
def findNumbers(invalid_num,arr,):
numbers = []
total = 0
for i,a in enumerate(arr):
numbers.append(a)
total = sum(numbers)
while(total > invalid_num and len(numbers)>0):
numbers.pop(0)
total = sum(numbers)
if(total == invalid_num):
return min(numbers),max(numbers),"Sum:",min(numbers)+max(numbers)
#Change 5 to your prefered size of preamble
print("2nd part:",findNumbers(preambule(parseInput(Input),5),parseInput(Input)))
|
# Copyright 2014 SolidBuilds.com. All rights reserved #
# Authors: Ling Thio <ling.thio@gmail.com>
from datetime import datetime
from flask import current_app, flash
from flask import Blueprint, redirect, render_template
from flask import request, url_for
from flask_user import current_user, login_required, roles_accepted
from flask_user.views import _get_safe_next_param, render, _send_registered_email, _endpoint_url, _do_login_user
from flask_user import signals
from webapp import db
from webapp.models.user_models import User, Role, AdminRegisterForm, EmployerRegisterForm, EmployeeRegisterForm
from webapp.models.user_models import AdminProfileForm, EmployerProfileForm, EmployeeProfileForm, SuspendUserForm
from webapp.models.user_models import TrainingVideoForm
from MappingCommon import MappingCommon
# When using a Flask app factory we must use a blueprint to avoid needing 'app' for '@app.route'
main_blueprint = Blueprint('main', __name__, template_folder='templates')
@main_blueprint.route('/')
def base_page():
return redirect(url_for('main.home_page'))
# The Home page is accessible to anyone
@main_blueprint.route('/home')
def home_page():
return render_template('pages/home_page.html')
# ----------------------------------------------------------------
# The Administrator page is accessible to authenticated users with the 'admin' role
@main_blueprint.route('/admin')
@roles_accepted('admin')
@login_required
def admin_page():
return render_template('pages/admin_page.html')
# The Administrator submenu is accessible to authenticated users with the 'admin' role
@main_blueprint.route('/admin/list_admins_employers')
@roles_accepted('admin')
@login_required
def list_admins_employers():
# Get all users that are admins or employers.
users = User.query.filter(User.roles.any((Role.name=='admin') | (Role.name=='employer'))).all()
admin_list = []
employer_list = []
for user in users:
if user.get_roles_string() == 'admin':
admin_list.append((user.last_name, user.first_name, user.email))
elif user.get_roles_string() == 'employer':
employer_list.append((user.company_name, user.last_name, user.first_name, user.email))
admin_list.sort()
employer_list.sort()
return render_template('pages/list_admins_employers_page.html', admin_list=admin_list, employer_list=employer_list)
# The Administrator submenu is accessible to authenticated users with the 'admin' role.
@main_blueprint.route('/employer/list_employees_by_admin')
@roles_accepted('admin')
@login_required
def list_employees_by_admin():
# Get all users that are employers.
employers = User.query.filter(User.roles.any(Role.name=='employer')).all()
employer_list = []
for employer in employers:
# Get all users invited by this employer.
users = User.query.filter(User.invited_by == employer.id).all()
employee_list = []
for user in users:
employee_list.append((user.last_name, user.first_name, user.email))
employee_list.sort()
employer_list.append((employer.company_name, employee_list))
employer_list.sort()
return render_template('pages/list_employees_by_admin_page.html', employer_list=employer_list)
# The Administrator submenu is accessible to authenticated users with the 'admin' role
@main_blueprint.route('/admin/admin_employer_invite')
@roles_accepted('admin')
@login_required
def admin_employer_invite():
return redirect(url_for('user.invite'))
# The Administrator submenu is accessible to authenticated users with the 'admin' role
@main_blueprint.route('/admin/suspend_admin_employer_employee', methods=['GET', 'POST'])
@roles_accepted('admin')
@login_required
def suspend_admin_employer_employee():
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
form = SuspendUserForm(request.form)
# Process valid POST
if request.method == 'POST' and form.validate():
# Validate the specified email address.
email = form.email.data
user = User.query.filter(User.email == email).first()
if not user:
flash("No such user", "error")
return redirect(url_for('main.suspend_admin_employer_employee'))
if int(form.activate_flag.data):
activate = True
verb = 'reactivated.'
else:
activate = False
verb = 'suspended.'
db_adapter.update_object(user, active=activate)
# Save modified user record
db_adapter.commit()
flash('User has been successfully ' + verb, 'success')
# Process GET or invalid POST
return render_template('pages/suspend_admin_employer_employee_page.html', form=form)
# ----------------------------------------------------------------
# The Employer page is accessible to authenticated users with the 'employer' or 'admin' role.
@main_blueprint.route('/employer')
@roles_accepted('employer', 'admin')
@login_required
def employer_page():
return render_template('pages/employer_page.html')
# The Employer submenu is accessible to authenticated users with the 'employer' role.
@main_blueprint.route('/employer/list_employees_by employer')
@roles_accepted('employer')
@login_required
def list_employees_by_employer():
# Get all users invited by this employer.
users = User.query.filter(User.invited_by == current_user.id).all()
employee_list = []
for user in users:
employee_list.append((user.last_name, user.first_name, user.email))
employee_list.sort()
employer = User.query.filter(User.id == current_user.id).first()
return render_template('pages/list_employees_by_employer_page.html', company_name=employer.company_name, employee_list=employee_list)
# The Employer submenu is accessible to authenticated users with the 'employer' role
@main_blueprint.route('/employer/employee_invite')
@roles_accepted('employer')
@login_required
def employee_invite():
return redirect(url_for('user.invite'))
# The Employer submenu is accessible to authenticated users with the 'employer' role
@main_blueprint.route('/employer/suspend_employee', methods=['GET', 'POST'])
@roles_accepted('employer')
@login_required
def suspend_employee():
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
form = SuspendUserForm(request.form)
# Process valid POST
if request.method == 'POST' and form.validate():
# Validate the specified email address.
email = form.email.data
user = User.query.filter((User.email == email) & (User.invited_by == current_user.id)).first()
if not user:
flash("No such employee", "error")
return redirect(url_for('main.suspend_employee'))
if int(form.activate_flag.data):
activate = True
verb = 'reactivated.'
else:
activate = False
verb = 'suspended.'
db_adapter.update_object(user, active=activate)
# Save modified user record
db_adapter.commit()
flash('Employee has been successfully ' + verb, 'success')
# Process GET or invalid POST
return render_template('pages/suspend_employee_page.html', form=form)
# ----------------------------------------------------------------
# The Employee page is accessible to authenticated users with the 'employee' or 'admin' role.
@main_blueprint.route('/employee')
@roles_accepted('employee', 'admin')
@login_required # Limits access to authenticated users
def employee_page():
return render_template('pages/employee_page.html')
# The Employee submenu is accessible to authenticated users with the 'employee' role
@main_blueprint.route('/employee/training')
@roles_accepted('employee')
@login_required # Limits access to authenticated users
def training():
trainingForm = TrainingVideoForm(request.form)
mapc = MappingCommon()
# Read configuration parameters.
videoUrl = mapc.getConfiguration('VideoUrl')
introVideo = mapc.getConfiguration('QualTest_IntroVideo')
introWidth = mapc.getConfiguration('QualTest_IntroVideoWidth')
introHeight = mapc.getConfiguration('QualTest_IntroVideoHeight')
instructionalVideo = mapc.getConfiguration('QualTest_InstructionalVideo')
instructionalWidth = mapc.getConfiguration('QualTest_InstructionalVideoWidth')
instructionalHeight = mapc.getConfiguration('QualTest_InstructionalVideoHeight')
introUrl = "%s/%s" % (videoUrl, introVideo)
instructionalUrl = "%s/%s" % (videoUrl, instructionalVideo)
# Load up the training form.
trainingForm.introUrl.data = introUrl
trainingForm.introWidth.data = introWidth
trainingForm.introHeight.data = introHeight
trainingForm.instructionalUrl.data = instructionalUrl
trainingForm.instructionalWidth.data = instructionalWidth
trainingForm.instructionalHeight.data = instructionalHeight
return render_template('pages/training_page.html', form=trainingForm)
# ----------------------------------------------------------------
# The registration page is accessible to all users by invitation only.
def register():
""" Display registration form and create new User."""
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
safe_next = _get_safe_next_param('next', user_manager.after_login_endpoint)
safe_reg_next = _get_safe_next_param('reg_next', user_manager.after_register_endpoint)
# invite token used to determine validity of registeree
invite_token = request.values.get("token")
# require invite without a token should disallow the user from registering
if user_manager.require_invitation and not invite_token:
flash("Registration is invite only", "error")
return redirect(url_for('user.login'))
user_invite = None
if invite_token and db_adapter.UserInvitationClass:
user_invite = db_adapter.find_first_object(db_adapter.UserInvitationClass, token=invite_token)
if user_invite is None:
flash("Invalid invitation token", "error")
return redirect(url_for('user.login'))
# Initialize form
login_form = user_manager.login_form() # for login_or_register.html
if user_invite.role == 'admin':
register_form = AdminRegisterForm(request.form)
elif user_invite.role == 'employer':
register_form = EmployerRegisterForm(request.form)
elif user_invite.role == 'employee':
register_form = EmployeeRegisterForm(request.form)
if user_invite:
register_form.invite_token.data = invite_token
if request.method!='POST':
login_form.next.data = register_form.next.data = safe_next
login_form.reg_next.data = register_form.reg_next.data = safe_reg_next
if user_invite:
register_form.email.data = user_invite.email
if hasattr(db_adapter.UserInvitationClass, 'role'):
register_form.role.data = user_invite.role
# Process valid POST
if request.method=='POST' and register_form.validate():
# Create a User object using Form fields that have a corresponding User field
User = db_adapter.UserClass
user_class_fields = User.__dict__
user_fields = {}
# Create a UserEmail object using Form fields that have a corresponding UserEmail field
if db_adapter.UserEmailClass:
UserEmail = db_adapter.UserEmailClass
user_email_class_fields = UserEmail.__dict__
user_email_fields = {}
# Create a UserAuth object using Form fields that have a corresponding UserAuth field
if db_adapter.UserAuthClass:
UserAuth = db_adapter.UserAuthClass
user_auth_class_fields = UserAuth.__dict__
user_auth_fields = {}
Role = db_adapter.RoleClass
role_class_fields = Role.__dict__
role_fields = {}
# Enable user account
if db_adapter.UserProfileClass:
if hasattr(db_adapter.UserProfileClass, 'active'):
user_auth_fields['active'] = True
elif hasattr(db_adapter.UserProfileClass, 'is_enabled'):
user_auth_fields['is_enabled'] = True
else:
user_auth_fields['is_active'] = True
else:
if hasattr(db_adapter.UserClass, 'active'):
user_fields['active'] = True
elif hasattr(db_adapter.UserClass, 'is_enabled'):
user_fields['is_enabled'] = True
else:
user_fields['is_active'] = True
# For all form fields
role = None
for field_name, field_value in register_form.data.items():
# Hash password field
if field_name=='password':
hashed_password = user_manager.hash_password(field_value)
if db_adapter.UserAuthClass:
user_auth_fields['password'] = hashed_password
else:
user_fields['password'] = hashed_password
elif field_name == 'role':
role = Role.query.filter(Role.name == field_value).first()
# Store corresponding Form fields into the User object and/or UserProfile object
else:
if field_name in user_class_fields:
user_fields[field_name] = field_value
if db_adapter.UserEmailClass:
if field_name in user_email_class_fields:
user_email_fields[field_name] = field_value
if db_adapter.UserAuthClass:
if field_name in user_auth_class_fields:
user_auth_fields[field_name] = field_value
if user_invite:
user_fields['invited_by'] = user_invite.invited_by
# Add User record using named arguments 'user_fields'
user = db_adapter.add_object(User, **user_fields)
if (role):
user.roles.append(role)
if db_adapter.UserProfileClass:
user_profile = user
# Add UserEmail record using named arguments 'user_email_fields'
if db_adapter.UserEmailClass:
user_email = db_adapter.add_object(UserEmail,
user=user,
is_primary=True,
**user_email_fields)
else:
user_email = None
# Add UserAuth record using named arguments 'user_auth_fields'
if db_adapter.UserAuthClass:
user_auth = db_adapter.add_object(UserAuth, **user_auth_fields)
if db_adapter.UserProfileClass:
user = user_auth
else:
user.user_auth = user_auth
require_email_confirmation = True
if user_invite:
if user_invite.email == register_form.email.data:
require_email_confirmation = False
db_adapter.update_object(user, confirmed_at=datetime.utcnow())
# Clear token so invite can only be used once.
user_invite.token = None
db_adapter.commit()
# Send 'registered' email and delete new User object if send fails
if user_manager.send_registered_email:
try:
# Send 'registered' email
_send_registered_email(user, user_email, require_email_confirmation)
except Exception as e:
# delete new User object if send fails
db_adapter.delete_object(user)
db_adapter.commit()
raise
# Send user_registered signal
signals.user_registered.send(current_app._get_current_object(),
user=user,
user_invite=user_invite)
# Redirect if USER_ENABLE_CONFIRM_EMAIL is set
if user_manager.enable_confirm_email and require_email_confirmation:
safe_reg_next = user_manager.make_safe_url_function(register_form.reg_next.data)
return redirect(safe_reg_next)
# Auto-login after register or redirect to login page
if 'reg_next' in request.args:
safe_reg_next = user_manager.make_safe_url_function(register_form.reg_next.data)
else:
safe_reg_next = _endpoint_url(user_manager.after_confirm_endpoint)
if user_manager.auto_login_after_register:
return _do_login_user(user, safe_reg_next) # auto-login
else:
return redirect(url_for('user.login')+'?next='+quote(safe_reg_next)) # redirect to login page
# Process GET or invalid POST
return render(user_manager.register_template,
form=register_form,
login_form=login_form,
register_form=register_form)
# ----------------------------------------------------------------
@main_blueprint.route('/user/profile', methods=['GET', 'POST'])
@login_required
def user_profile():
# Initialize form
if current_user.has_role('admin'):
form = AdminProfileForm(request.form)
elif current_user.has_role('employer'):
form = EmployerProfileForm(request.form)
elif current_user.has_role('employee'):
form = EmployeeProfileForm(request.form)
# Process valid POST
if request.method == 'POST' and form.validate():
# Copy form fields to user_profile fields
form.populate_obj(current_user)
# Save user_profile
db.session.commit()
# Redirect to user_profile page
return redirect(url_for('main.user_profile'))
# Process GET or invalid POST
return render_template('pages/user_profile_page.html', form=form)
# ----------------------------------------------------------------
@main_blueprint.route('/select_role_page')
@login_required
def select_role_page():
if current_user.has_role('admin'):
return redirect(url_for('main.admin_page'))
elif current_user.has_role('employer'):
return redirect(url_for('main.employer_page'))
elif current_user.has_role('employee'):
return redirect(url_for('main.employee_page'))
return redirect(url_for('main.home_page'))
|
#!/usr/bin/env python
# coding: utf-8
# # important libraries
# In[1]:
import numpy as np
import pandas as pd
import os
import re
from matplotlib import pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
import seaborn as sns
import pickle
# Nltk for tekenize and stopwords
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import TreebankWordTokenizer
from nltk import RegexpTokenizer
nltk.download("stopwords")
# **reading data**
# In[2]:
data = pd.read_csv("data.csv")
data1 = pd.read_csv("data1.csv")
# **Exploring data**
# In[3]:
data.columns
# In[4]:
data1.columns
# In[5]:
data.drop("selected_text" , axis = 1 , inplace = True)
# In[6]:
train = pd.concat([data , data1] , axis = 0 )
# In[7]:
train.shape
# In[8]:
train.isnull().sum()
# In[9]:
train = train.dropna()
# In[10]:
count = train.sentiment.value_counts()
percentage = train.sentiment.value_counts(normalize = True)
df = pd.DataFrame({"count":count , "percentage":percentage})
# **Looking sentimentwise**
# In[11]:
df.head()
# In[13]:
train.describe()
# # cleaning part
# In[14]:
def standardize_text(df, text_field):
df[text_field] = df[text_field].str.replace(r"http\S+", "")
df[text_field] = df[text_field].str.replace(r"http", "")
df[text_field] = df[text_field].str.replace(r"@\S+", "")
df[text_field] = df[text_field].str.replace(r"[^A-Za-z0-9(),!?@\'\`\"\_\n]", " ")
df[text_field] = df[text_field].str.replace(r"@", "at")
df[text_field] = df[text_field].str.lower()
return df
train = standardize_text(train, "text")
# In[15]:
def rep(text):
grp = text.group(0)
if len(grp) > 1:
return grp[0:1] # can change the value here on repetition
def unique_char(rep,sentence):
convert = re.sub(r'(\w)\1+', rep, sentence)
return convert
# In[16]:
train['text']=train['text'].apply(lambda x : unique_char(rep,x))
# In[17]:
train.head()
# In[18]:
tokenizer = RegexpTokenizer(r"\w+")
# In[19]:
train.text = train["text"].apply(lambda x:tokenizer.tokenize(x))
# In[20]:
train.head()
# In[21]:
stop = set(stopwords.words('english'))
# In[22]:
train['text'] = train['text'].apply(lambda x: ' '.join([word for word in x if word not in (stop)]))
# In[23]:
train.head()
# In[24]:
tokenizer = TreebankWordTokenizer()
stemmer = nltk.WordNetLemmatizer()
train["text"] = train["text"].apply(lambda x: tokenizer.tokenize(x))
train["text"] = train["text"].apply(lambda x:" ".join([stemmer.lemmatize(token) for token in x]))
# # model building part
# In[26]:
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import LabelEncoder
# In[27]:
le = LabelEncoder()
train["sentiment"] = le.fit_transform(train.sentiment)
# In[28]:
train.sentiment.unique()
# In[29]:
corpus = train["text"].tolist()
labels = train["sentiment"].tolist()
# In[30]:
x_train, x_test, y_train, y_test = train_test_split(corpus,labels, test_size=0.2, random_state=40)
# In[32]:
def tfidf(data):
tfidf_vectorizer = TfidfVectorizer()
train = tfidf_vectorizer.fit_transform(data)
return train, tfidf_vectorizer
X_train_tfidf, tfidf_vectorizer = tfidf(x_train)
X_test_tfidf = tfidf_vectorizer.transform(x_test)
# In[34]:
pickle.dump(tfidf_vectorizer, open('tfidf-transform.pkl', 'wb'))
# In[35]:
from sklearn.linear_model import LogisticRegression
clf_tfidf = LogisticRegression(C=30.0, class_weight='balanced', solver='newton-cg', multi_class='multinomial', n_jobs=-1, random_state=40)
clf_tfidf.fit(X_train_tfidf, y_train)
y_predicted_tfidf = clf_tfidf.predict(X_test_tfidf)
# In[36]:
filename = 'tweet-sentiment-lc-model.pkl'
pickle.dump(clf_tfidf, open(filename, 'wb'))
# In[ ]:
|
from contact import Contact
import unittest #model that helps in running the test
import pyperclip
class TestContact(unittest.TestCase):
'''
test class that defines test case
'''
def setUp(self):
'''
setup method to run before each test
'''
self.new_contact = Contact("James","Muriuki","0702901315","shawnnjoga@gmail.com")
#FIRST-TEST
def test_init(self):
'''
checking if the objects are initialized correctly
'''
self.assertEqual(self.new_contact.first_name,"James")
self.assertEqual(self.new_contact.second_name,"Muriuki")
self.asserEqual(self.new_contact.phone_number,"0702901315")
self.assertEqual(self.new_contact.email,"shawnnjoga@gmail.com")
#SECOND-TEST(SAVING )
def test_save_contact(self):
'''
test for saving a contact
'''
self.new_contact.save_contact()#how we save
self.asserEqual(len(Contact.contact_list),1)
#THIRD-TEST(SAVINGMULTIPLECONTACT)
def test_save_multiple_contact(self):
'''
saving multiple contacts
'''
self.new_contact.save_contact()
test_contact = Contact("Test","User","0722348613","test@gmail.com")
test_contact.save_contact()
self.assertEqual(len(Contact.contact_list),2)
#(tearDown method that does clean up after each test case has run.)
def tearDown(self):
'''
tearDown method that does clean up after each test case has run.
'''
Contact.contact_list = []
#FOURTH-TEST(DELETING)
def test_delete_contact(self):
'''
method that delets contacts
'''
self.new_contact.save_contact()
test_contact = Contact("Test","User","0722348613","test@gmail.com")
test_contact.save_contact()
self.new_contact.delete_contact()#deleting a contact
self.assertEqual(len(Contact.contact_list),1)
def test_contact_exist(self):
'''method to test i a contact exits
'''
self.new_contact.save_contact()
test_contact = Contact("Test","User","0722348613","test@gmail.com")
test_contact.save_contact()
contact_exists = Contact.contact_exists("0722348613")
self.assertTrue(contact_exists)
def test_copy_email(self):
'''
Test to confirm that we are copying the email address from a found contact
'''
self.new_contact.save_contact()
Contact.copy_email("0702901315")
self.assertEqual(self.new_contact.email,pyperclip.paste())
if __name__ == '__main__':
unittest.main()
|
from moabb.datasets import BNCI2014001, Cho2017, PhysionetMI
from moabb.paradigms import MotorImagery
import numpy as np
from numpy.random import RandomState
import pickle
import time
import torch
import os
import pandas as pd
import mne
from NeurIPS_2.util.support import (
expand_data_dim,generate_common_chan_test_data,load_Cho2017,load_Physionet,load_BCI_IV,
correct_EEG_data_order,relabel,process_target_data,relabel_target,load_dataset_A,load_dataset_B,modify_data,
generate_data_file,print_dataset_info,print_info,get_dataset_A_ch,get_dataset_B_ch,shuffle_data,EuclideanAlignment,reduce_dataset,LabelAlignment,
generate_common_target_chans,create_epoch_array,reformat,load_source_data,load_target_data,combine
)
from NeurIPS_2.util.setup_dataset import (
setup_datasets,setup_specific_subject_dataset_EA
)
def relabel(l):
if l == 'left_hand': return 0
elif l == 'right_hand': return 1
elif l == 'feet': return 2
else: return 3
def relabel_target(l):
if l == 0: return 0
elif l == 1: return 1
elif l ==2 : return 2
else: return 3
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
cuda = torch.cuda.is_available()
print('gpu: ', cuda)
device = 'cuda' if cuda else 'cpu'
seed = 42
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True
rng = RandomState(seed)
# data_path = "C:\\Users\\wduong\\mne_data\\finalMI"
data_path = "/home/vernon/mne_data/finalMI"
#get common channel between dataset A and dataset B
dataset_A_channels = get_dataset_A_ch()
dataset_B_channels = get_dataset_B_ch()
X_src1,_,_ = load_Cho2017(subjects=[1])
X_src2, _, _ = load_Physionet(subjects=[1])
X_src3, _, _ = load_BCI_IV(subjects=[1])
import matplotlib
matplotlib.use('TkAgg')
Cho2017_channels = X_src1.ch_names
Physionet_channels = X_src2.ch_names
BCI_IV_channels = X_src3.ch_names
common_channel_A_B = generate_common_target_chans(target_chan=BCI_IV_channels,source_chans=[dataset_A_channels,dataset_B_channels,Cho2017_channels,Physionet_channels])
montage = None
# common_channel_A_B = generate_common_target_chans(target_chan=dataset_A_channels,source_chans=[dataset_B_channels,Cho2017_channels,Physionet_channels])
print("common chan A_B size : ",len(common_channel_A_B))
print("common chan A_B : ",common_channel_A_B)
subject_ids = None
# subject_ids = [1,2,3]
X_src1,y_src1,m_src1 = load_source_data(target_channels=common_channel_A_B,relabel_func=relabel,dataset_name="cho2017",subject_ids=subject_ids)
X_src2,y_src2,m_src2 = load_source_data(target_channels=common_channel_A_B,relabel_func=relabel,dataset_name="physionet",subject_ids=subject_ids)
X_src3,y_src3,m_src3 = load_source_data(target_channels=common_channel_A_B,relabel_func=relabel,dataset_name="BCI_IV",montage=montage,subject_ids=subject_ids)
# #
print("before update meta data : ",m_src2)
X_src2,y_src2,m_src2 = reformat(X_src2,y_src2,m_src2)
X_src2,y_src2,m_src2 = reduce_dataset(X_src2,y_src2,m_src2)
#
source_datasets = [
(X_src1,y_src1,m_src1,"cho2017"),
(X_src2,y_src2,m_src2, "physionet"),
(X_src3,y_src3,m_src3, "BCI_IV")
]
#test 0
## shuffle data
target_dataset_A_name = "dataset_A"
save_folder_A = "../da_dataset/task_2/final_MI_A_1"
test_folder_A = "../da_dataset/task_2/final_MI_test_A_1"
target_dataset_B_name = "dataset_B"
save_folder_B = "../da_dataset/task_2/final_MI_B_1"
test_folder_B = "../da_dataset/task_2/final_MI_test_B_1"
generate_data=True
convert_EA = True
start_id=1
end_id=4
setup_specific_subject_dataset_EA(source_datasets,target_dataset_A_name,common_channel_A_B,path=data_path,save_folder=save_folder_A,test_folder=test_folder_A,generate_folder_data=generate_data,start_id=start_id,end_id=end_id,convert_EA=convert_EA)
start_id=4
end_id=6
setup_specific_subject_dataset_EA(source_datasets,target_dataset_B_name,common_channel_A_B,path=data_path,save_folder=save_folder_B,test_folder=test_folder_B,generate_folder_data=generate_data,start_id=start_id,end_id=end_id,convert_EA=convert_EA)
|
import tensorflow as tf
import torch
from .resnet import ResNet
# _url_format = 'https://s3.us-west-1.wasabisys.com/resnest/torch/{}-{}.pth'
_url_format = 'https://github.com/zhanghang1989/ResNeSt/releases/download/weights_step1/{}-{}.pth'
_model_sha256 = {name: checksum for checksum, name in [
('528c19ca', 'resnest50'),
('22405ba7', 'resnest101'),
('75117900', 'resnest200'),
('0cc87c48', 'resnest269'),
]}
def short_hash(name):
if name not in _model_sha256:
raise ValueError('Pretrained model for {name} is not available.'.format(name=name))
return _model_sha256[name][:8]
resnest_model_urls = {name: _url_format.format(name, short_hash(name)) for
name in _model_sha256.keys()
}
def load_weight(keras_model, torch_url, group_size=2):
"""
https://s3.us-west-1.wasabisys.com/resnest/torch/resnest50-528c19ca.pth > https://github.com/zhanghang1989/ResNeSt/releases/download/weights_step1/resnest50-528c19ca.pth
https://s3.us-west-1.wasabisys.com/resnest/torch/resnest101-22405ba7.pth > https://github.com/zhanghang1989/ResNeSt/releases/download/weights_step1/resnest101-22405ba7.pth
https://s3.us-west-1.wasabisys.com/resnest/torch/resnest200-75117900.pth > https://github.com/zhanghang1989/ResNeSt/releases/download/weights_step1/resnest200-75117900.pth
https://s3.us-west-1.wasabisys.com/resnest/torch/resnest269-0cc87c48.pth > https://github.com/zhanghang1989/ResNeSt/releases/download/weights_step1/resnest269-0cc87c48.pth
"""
torch_weight = torch.hub.load_state_dict_from_url(torch_url, progress=True, check_hash=True)
weight = {}
for k, v in dict(torch_weight).items():
if k.split(".")[-1] in ["weight", "bias", "running_mean", "running_var"]:
if ("downsample" in k or "conv" in k) and "weight" in k and v.ndim == 4:
v = v.permute(2, 3, 1, 0)
elif "fc.weight" in k:
v = v.t()
weight[k] = v.data.numpy()
g = 0
downsample = []
keras_weight = []
for i, (torch_name, torch_weight) in enumerate(weight.items()):
if i + g < len(keras_model.weights):
keras_name = keras_model.weights[i + g].name
if "downsample" in torch_name:
downsample.append(torch_weight)
continue
elif "group" in keras_name:
g += (group_size - 1)
torch_weight = tf.split(torch_weight, group_size, axis=-1)
else:
torch_weight = [torch_weight]
keras_weight += torch_weight
for w in keras_model.weights:
if "downsample" in w.name:
new_w = downsample.pop(0)
else:
new_w = keras_weight.pop(0)
try:
tf.keras.backend.set_value(w, new_w)
except:
print("load weight error ! \n", new_w)
return keras_model
def resnest50(include_top=True, weights="imagenet", input_tensor=None, input_shape=None, classes=1000):
if input_tensor is None:
img_input = tf.keras.layers.Input(shape=input_shape)
else:
if not tf.keras.backend.is_keras_tensor(input_tensor):
img_input = tf.keras.layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
out = ResNet(img_input, [3, 4, 6, 3], classes, include_top, radix=2, group_size=1, block_width=64, stem_width=32,
deep_stem=True, avg_down=True, avd=True, avd_first=False)
model = tf.keras.Model(img_input, out)
if weights == "imagenet":
load_weight(model, resnest_model_urls["resnest50"], group_size=2 * 1)
elif weights is not None:
model.load_weights(weights)
return model
def resnest101(include_top=True, weights="imagenet", input_tensor=None, input_shape=None, classes=1000):
if input_tensor is None:
img_input = tf.keras.layers.Input(shape=input_shape)
else:
if not tf.keras.backend.is_keras_tensor(input_tensor):
img_input = tf.keras.layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
out = ResNet(img_input, [3, 4, 23, 3], classes, include_top, radix=2, group_size=1, block_width=64, stem_width=64,
deep_stem=True, avg_down=True, avd=True, avd_first=False)
model = tf.keras.Model(img_input, out)
if weights == "imagenet":
load_weight(model, resnest_model_urls["resnest101"], group_size=2 * 1)
print("ImageNet pretrained weight is Loaded OK \n\n")
elif weights is not None:
model.load_weights(weights)
return model
def resnest200(include_top=True, weights="imagenet", input_tensor=None, input_shape=None, classes=1000):
if input_tensor is None:
img_input = tf.keras.layers.Input(shape=input_shape)
else:
if not tf.keras.backend.is_keras_tensor(input_tensor):
img_input = tf.keras.layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
out = ResNet(img_input, [3, 24, 36, 3], classes, include_top, radix=2, group_size=1, block_width=64, stem_width=64,
deep_stem=True, avg_down=True, avd=True, avd_first=False)
model = tf.keras.Model(img_input, out)
if weights == "imagenet":
load_weight(model, resnest_model_urls["resnest200"], group_size=2 * 1)
elif weights is not None:
model.load_weights(weights)
return model
def resnest269(include_top=True, weights="imagenet", input_tensor=None, input_shape=None, classes=1000):
if input_tensor is None:
img_input = tf.keras.layers.Input(shape=input_shape)
else:
if not tf.keras.backend.is_keras_tensor(input_tensor):
img_input = tf.keras.layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
out = ResNet(img_input, [3, 30, 48, 8], classes, include_top, radix=2, group_size=1, block_width=64, stem_width=64,
deep_stem=True, avg_down=True, avd=True, avd_first=False)
model = tf.keras.Model(img_input, out)
if weights == "imagenet":
load_weight(model, resnest_model_urls["resnest269"], group_size=2 * 1)
elif weights is not None:
model.load_weights(weights)
return model |
from netapp.netapp_object import NetAppObject
class WaflSyncHandle(NetAppObject):
"""
Handle representing a specific wafl-sync operation on a
volume.
"""
_volume = None
@property
def volume(self):
"""
Volume name on which wafl-sync operation being performed.
"""
return self._volume
@volume.setter
def volume(self, val):
if val != None:
self.validate('volume', val)
self._volume = val
_handle = None
@property
def handle(self):
"""
Opaque handle that represents a specific wafl-sync
operation.
"""
return self._handle
@handle.setter
def handle(self, val):
if val != None:
self.validate('handle', val)
self._handle = val
_volume_uuid = None
@property
def volume_uuid(self):
"""
Volume UUID on which wafl-sync operation being performed.
"""
return self._volume_uuid
@volume_uuid.setter
def volume_uuid(self, val):
if val != None:
self.validate('volume_uuid', val)
self._volume_uuid = val
@staticmethod
def get_api_name():
return "wafl-sync-handle"
@staticmethod
def get_desired_attrs():
return [
'volume',
'handle',
'volume-uuid',
]
def describe_properties(self):
return {
'volume': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'handle': { 'class': int, 'is_list': False, 'required': 'required' },
'volume_uuid': { 'class': basestring, 'is_list': False, 'required': 'optional' },
}
|
from random import randint
def reverseList(List):
for x in range(0, (len(List) - 1)//2):
try:
endSpot = len(List) - 1 - x
temp = List[x]
List[x] = List[endSpot]
List[endSpot] = temp
except IndexError:
continue
return List
myint = int(input("Give int"))
rndList = []
for y in range(0, myint):
rndList.append(randint(0, myint))
print(rndList)
print(reverseList(rndList)) |
#coding:utf-8
def checkBST(root):
stack = []
prev = None
while root or stack:
while root:
stack.append(root)
root = root.left
root = stack.pop()
if prev == None:
prev = root.data
else:
if root.data <= prev:
return False
else:
prev = root.data
root = root.right
return True
|
import numpy as np
import networkx as nx
import warnings
import os
from networkx.algorithms.shortest_paths.weighted import dijkstra_path
from utils import *
_sim_info = {}
def push_to_sim_info(key, value):
global _sim_info
if key in _sim_info.keys():
warnings.warn("Key already exists! Replacing its current value...")
_sim_info[key] = value
def pull_from_sim_info(key):
return _sim_info[key]
class Clock:
def __init__(self, max_time, start_time=0, interval=1, mode='forward'):
self._max_time = max_time
self._start_time = self._time = start_time
self._interval = interval
self._mode = mode
def get_time(self):
return self._time
def tick(self):
self._time += self._interval
def switch_mode(self):
if self._mode == 'forward':
self._time = self._max_time
self._mode = 'backward'
self._interval *= -1
else:
self._time = self._start_time
self._mode = 'forward'
self._interval *= -1
def reset(self):
self._time = self._start_time
if self._mode == 'backward':
self._interval = 1
self._mode = 'forward'
class Controller:
def __init__(self, link_info, T, Tm, models, demands, cost_params, dtchoices=None, srates=None):
self.link_info = link_info
self.T = T
self.Tm = Tm
self.models = models
self.demands = demands
self.cost_params = cost_params
self.dtchoices = dtchoices
self.srates = srates
tmpgraph = nx.DiGraph(link_info)
self.sources, self.sinks, self.diverges, self.merges = get_node_types(tmpgraph)
self.ntypes = len(self.sinks)
self.init_choices(tmpgraph)
for nodeid in self.sources:
tmpgraph.node[nodeid]['nodeid'] = nodeid
tmpgraph.node[nodeid]['nodetype'] = 'source'
tmpgraph.node[nodeid]['demands'] = demands[nodeid]
tmpgraph.node[nodeid]['dtchoices'] = self.dtchoices[nodeid]
for iterid,nodeid in enumerate(self.sinks):
tmpgraph.node[nodeid]['nodeid'] = nodeid
tmpgraph.node[nodeid]['nodetype'] = 'sink'
tmpgraph.node[nodeid]['sinkno'] = iterid
for nodeid in self.diverges:
tmpgraph.node[nodeid]['nodeid'] = nodeid
tmpgraph.node[nodeid]['nodetype'] = 'diverge'
tmpgraph.node[nodeid]['srates'] = self.srates[nodeid]
for nodeid in self.merges:
tmpgraph.node[nodeid]['nodeid'] = nodeid
tmpgraph.node[nodeid]['nodetype'] = 'merge'
push_to_sim_info('T', T)
push_to_sim_info('ntypes', self.ntypes)
for key,value in cost_params.items():
push_to_sim_info(key,value)
self.network = Network(tmpgraph, models)
def init_choices(self, init_graph):
paths = {snknode:[dijkstra_path(init_graph, srcnode, snknode, weight='length') for srcnode in self.sources] for snknode in self.sinks}
def dtchoice_init():
choices = np.zeros((self.T, self.ntypes))
choices[np.random.choice(self.Tm, 4),:] = 0.25
return choices
def srate_init(nodeid):
out_degree = init_graph.out_degree(nodeid)
choices = np.zeros((self.T, out_degree, self.ntypes))
for i in np.arange(self.ntypes):
for j,srcnodeid in enumerate(self.sources):
if nodeid in paths[self.sinks[i]][j]:
nxtnodeid = paths[self.sinks[i]][j][paths[self.sinks[i]][j].index(nodeid)+1]
try:
idx = list(init_graph.out_edges(nodeid)).index((nodeid, nxtnodeid))
choices[:,idx,i] = 1
except ValueError:
pass
break
if np.any(~np.isclose(1, np.sum(choices[:,:,i], axis=1))):
choices[:,np.random.choice(out_degree, 1),i] = 1
return choices
if self.dtchoices == None:
self.dtchoices = dict()
for nodeid in self.sources:
self.dtchoices[nodeid] = dtchoice_init()
if self.srates == None:
self.srates = dict()
for nodeid in self.diverges:
self.srates[nodeid] = srate_init(nodeid)
def calc_DUE(self, niter, learning_rate, out_file='out.csv', print_iter_measures=False):
with open(out_file, 'w') as f:
for iterno in np.arange(niter):
self.network.runsim()
self.network.compute_costs()
dtchoices_old = self.dtchoices
srates_old = self.srates
dtccosts = dict()
srcosts = dict()
for node in self.network.nodes:
if node.nodetype == 'source':
dtccosts[node.nodeid] = node.get_outlink_costs().squeeze() # since only one outlink
elif node.nodetype == 'diverge':
srcosts[node.nodeid] = node.get_outlink_costs()
dtchoices_new = dict()
srates_new = dict()
# update dtchoices
for sourcenodeid in self.sources:
dtchoices_new[sourcenodeid] = np.zeros_like(self.dtchoices[sourcenodeid])
for iterid,sinknodeid in enumerate(self.sinks):
tmp = solveqp(self.dtchoices[sourcenodeid][:self.Tm, iterid], dtccosts[sourcenodeid][:self.Tm, iterid], lr=learning_rate)['x']
tmp[tmp<MIN_PROP_VALUE] = 0
dtchoices_new[sourcenodeid][:self.Tm, iterid] = tmp/np.sum(tmp)
assert np.isclose(np.sum(dtchoices_new[sourcenodeid][:self.Tm, iterid]), 1.)
# update srates
for divergenodeid in self.diverges:
srates_new[divergenodeid] = np.zeros_like(self.srates[divergenodeid])
for iterid,sinknodeid in enumerate(self.sinks):
for t in np.arange(self.T):
tmp = solveqp(self.srates[divergenodeid][t, :, iterid], srcosts[divergenodeid][t, :, iterid], lr=learning_rate)['x']
tmp[tmp<MIN_PROP_VALUE] = 0
srates_new[divergenodeid][t, :, iterid] = tmp/np.sum(tmp)
assert np.isclose(np.sum(srates_new[divergenodeid][t, :, iterid]), 1.)
measures = get_measures(dtchoices_new, srates_new, dtchoices_old, srates_old, dtccosts, srcosts)
f.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}\n'.format(iterno, *measures))
# reset all
self.reset(dtchoices_new, srates_new)
if print_iter_measures:
print('{0:<5} {1:<14.10} {2:<14.10} {3:<14.10}\t {4:<14.10} {5:<14.10} {6:<14.10}'.format(iterno, *measures))
return self.dtchoices, self.srates
def reset(self, dtchoices_new, srates_new):
self.dtchoices = dtchoices_new
self.srates = srates_new
self.network.reset(dtchoices_new, srates_new)
class Network(nx.DiGraph):
def __init__(self, init_graph, models):
self.T = pull_from_sim_info('T')
self.ntypes = pull_from_sim_info('ntypes')
self.clock = Clock(max_time = self.T-1)
push_to_sim_info('clock', self.clock)
self.links = []
for upnodeid, downnodeid, edgedata in init_graph.edges(data=True):
edgedata['link'] = models['link'](**edgedata)
self.links.append(edgedata['link'])
nodes = {}
for nodeid,nodedata in init_graph.nodes(data=True):
inlinks = [edgedata['link'] for _, _, edgedata in init_graph.in_edges(nodeid, data=True)]
outlinks = [edgedata['link'] for _, _, edgedata in init_graph.out_edges(nodeid, data=True)]
nodes[nodeid] = models[nodedata['nodetype']](inlinks, outlinks, **nodedata)
graph_data = []
for upnodeid, downnodeid, edgedata in init_graph.edges(data=True):
graph_data.append([nodes[upnodeid], nodes[downnodeid], {'link': edgedata['link']}])
edgedata['link'].connect_nodes(nodes[upnodeid], nodes[downnodeid])
super().__init__(graph_data)
def runsim(self):
while self.clock.get_time() < self.T:
for iterid,node in enumerate(self.nodes):
node.determine_flows()
for iterid,link in enumerate(self.links):
link.update()
self.clock.tick()
self.clock.switch_mode()
def compute_costs(self):
for link in self.links:
link.init_cost_computation()
for node in self.nodes:
node.init_cost_computation()
while self.clock.get_time() >= 0:
for link in self.links:
link.compute_costs()
for node in self.nodes:
node.compute_costs()
self.clock.tick()
self.clock.switch_mode()
def reset(self, dtchoices_new, srates_new):
self.clock.reset()
for link in self.links:
link.reset()
for node in self.nodes:
if node.nodetype == 'source':
node.reset(dtchoices_new[node.nodeid])
elif node.nodetype == 'diverge':
node.reset(srates_new[node.nodeid])
else:
node.reset()
class Link:
def __init__(self, length, **kwargs):
self.length = length
self.T = pull_from_sim_info('T')
self.ntypes = pull_from_sim_info('ntypes')
if type(self) == Link:
self.reset()
def get_time(self):
return pull_from_sim_info('clock').get_time()
def connect_nodes(self, upnode, downnode):
self.upnode = upnode
self.downnode = downnode
def init_cost_computation(self):
raise NotImplementedError
def update(self):
raise NotImplementedError
def compute_costs(self):
raise NotImplementedError
def reset(self):
self.occs = np.zeros((self.T+1, self.length, self.ntypes))
self.flows = np.zeros((self.T, self.length+1, self.ntypes))
self.occs_agg = np.zeros((self.T+1, self.length))
self.flows_agg = np.zeros((self.T, self.length+1))
self.inflow_costs = np.zeros((self.T, self.ntypes))
class Node:
def __init__(self, inlinks, outlinks, nodeid, nodetype):
self.nodeid = nodeid
self.inlinks = inlinks
self.outlinks = outlinks
self.nodetype = nodetype
self.in_degree = len(self.inlinks)
self.out_degree = len(self.outlinks)
self.T = pull_from_sim_info('T')
self.ntypes = pull_from_sim_info('ntypes')
if type(self) == Node:
self.reset()
def get_time(self):
return pull_from_sim_info('clock').get_time()
def get_outflows(self, outlink):
return self.outflows[outlink][self.get_time(),:]
def get_inflows(self, inlink):
return self.inflows[inlink][self.get_time(),:]
def get_inflow_costs(self, inlink, timesteps):
return self.inflow_costs[inlink][timesteps,:]
def get_outflow_costs(self, outlink, timestep):
return self.outflow_costs[outlink][timesteps,:]
def init_cost_computation(self):
raise NotImplementedError
def determine_flows(self):
raise NotImplementedError
def compute_costs(self):
raise NotImplementedError
def get_outlink_costs(self):
return np.stack([self.outflow_costs[outlink] for outlink in self.outlinks], axis=1)
def reset(self):
self.inflows = dict()
self.inflow_costs = dict()
for inlink in self.inlinks:
self.inflows[inlink] = np.zeros((self.T, self.ntypes))
self.inflow_costs[inlink] = np.zeros((self.T, self.ntypes))
self.outflows = dict()
self.outflow_costs = dict()
for outlink in self.outlinks:
self.outflows[outlink] = np.zeros((self.T, self.ntypes))
self.outflow_costs[outlink] = np.zeros((self.T, self.ntypes))
self.cost_calc_info = dict()
|
# Copyright(c) Max Kolosov 2009 maxkolosov@inbox.ru
# http://vosolok2008.narod.ru
# BSD license
__version__ = "0.2"
__versionTime__ = "2013-01-22"
__author__ = "Max Kolosov <maxkolosov@inbox.ru>"
__doc__ = """
pybass_ape.py - is ctypes python module for
BASS_APE - extension to the BASS audio library
that enables the playback of Monkey's Audio streams.
"""
import sys, ctypes, platform
from scribepy.pybass import pybass
from pathlib import Path
pybass_module = Path(__file__).parent
QWORD = pybass.QWORD
HSTREAM = pybass.HSTREAM
BASS_FILEPROCS = pybass.BASS_FILEPROCS
if platform.system().lower() == "windows":
bass_ape_module = ctypes.WinDLL("bass_ape")
func_type = ctypes.WINFUNCTYPE
else:
bass_ape_module = ctypes.CDLL(
f"{pybass_module}/../BASS_modules/libbass_ape.so"
)
func_type = ctypes.CFUNCTYPE
# Additional tags available from BASS_StreamGetTags
BASS_TAG_APE = 6 # APE tags
# BASS_CHANNELINFO type
BASS_CTYPE_STREAM_APE = 0x10700
# HSTREAM BASSAPEDEF(BASS_APE_StreamCreateFile)(BOOL mem, const void *file, QWORD offset, QWORD length, DWORD flags);
BASS_APE_StreamCreateFile = func_type(
HSTREAM, ctypes.c_byte, ctypes.c_void_p, QWORD, QWORD, ctypes.c_ulong
)(("BASS_APE_StreamCreateFile", bass_ape_module))
# HSTREAM BASSAPEDEF(BASS_APE_StreamCreateFileUser)(DWORD system, DWORD flags, const BASS_FILEPROCS *procs, void *user);
BASS_APE_StreamCreateFileUser = func_type(
HSTREAM,
ctypes.c_ulong,
ctypes.c_ulong,
ctypes.POINTER(BASS_FILEPROCS),
ctypes.c_void_p,
)(("BASS_APE_StreamCreateFileUser", bass_ape_module))
if __name__ == "__main__":
if not pybass.BASS_Init(-1, 44100, 0, 0, 0):
print(
"BASS_Init error %s"
% pybass.get_error_description(pybass.BASS_ErrorGetCode())
)
else:
handle = BASS_APE_StreamCreateFile(False, b"test.ape", 0, 0, 0)
pybass.play_handle(handle)
if not pybass.BASS_Free():
print(
"BASS_Free error %s"
% pybass.get_error_description(pybass.BASS_ErrorGetCode())
)
|
'''
A tasker on top of a SQL database.
'''
import logging
from securitybot.tasker.tasker import Task, Tasker, STATUS_LEVELS, Escalation
from securitybot.sql import SQLEngine
from typing import List
# Note: this order is provided to match the SQLTask constructor
GET_ALERTS = '''
SELECT alerts.hash,
title,
alerts.ldap,
reason,
description,
url,
performed,
comment,
authenticated,
status,
event_time
FROM alerts
JOIN user_responses ON alerts.hash = user_responses.hash
JOIN alert_status ON alerts.hash = alert_status.hash
WHERE status = %s
'''
GET_ESCALATION = '''SELECT ldap, delay_in_sec, escalated_at FROM escalation WHERE hash=%s'''
SET_ESCALATED = '''UPDATE escalation SET escalated_at=NOW() WHERE hash = %s AND ldap=%s AND delay_in_sec=%s'''
class SQLTasker(Tasker):
def _get_tasks(self, level):
# type: (int) -> List[Task]
'''
Gets all tasks of a certain level.
Args:
level (int): One of STATUS_LEVELS
Returns:
List of SQLTasks.
'''
alerts = SQLEngine.execute(GET_ALERTS, (level,))
tasks = [SQLTask(*alert) for alert in alerts]
for task in tasks:
task.escalation = []
escalation_list = SQLEngine.execute(GET_ESCALATION, (task.hash,))
for escalation_tuple in escalation_list:
task.escalation.append(Escalation(*escalation_tuple))
logging.debug("Fetched task from DB: {0}".format(task))
return tasks
def get_new_tasks(self):
# type: () -> List[Task]
return self._get_tasks(STATUS_LEVELS.OPEN)
def get_active_tasks(self):
# type: () -> List[Task]
return self._get_tasks(STATUS_LEVELS.INPROGRESS)
def get_pending_tasks(self):
# type: () -> List[Task]
return self._get_tasks(STATUS_LEVELS.VERIFICATION)
SET_STATUS = '''
UPDATE alert_status
SET status=%s
WHERE hash=%s
'''
SET_RESPONSE = '''
UPDATE user_responses
SET comment=%s,
ldap=%s,
performed=%s,
authenticated=%s,
updated_at=NOW()
WHERE hash=%s
'''
class SQLTask(Task):
def __init__(self, hsh, title, username, reason, description, url,
performed, comment, authenticated, status, event_time, escalation=None):
# type: (str, str, str, str, str, str, bool, str, bool, int) -> None
'''
Args:
hsh (str): SHA256 primary key hash.
'''
super(SQLTask, self).__init__(title, username, reason, description, url,
performed, comment, authenticated, status, event_time=event_time, escalation=escalation)
self.hash = hsh
def _set_status(self, status):
# type: (int) -> None
'''
Sets the status of a task in the DB.
Args:
status (int): The new status to use.
'''
self.status = status
SQLEngine.execute(SET_STATUS, (status, self.hash))
def _set_response(self):
# type: () -> None
'''
Updates the user response for this task.
'''
SQLEngine.execute(SET_RESPONSE, (self.comment,
self.username,
self.performed,
self.authenticated,
self.hash))
def set_open(self):
self._set_status(STATUS_LEVELS.OPEN)
def set_in_progress(self):
self._set_status(STATUS_LEVELS.INPROGRESS)
def set_verifying(self):
self._set_status(STATUS_LEVELS.VERIFICATION)
self._set_response()
def set_escalated(self, escalation):
escalation.set_notified()
SQLEngine.execute(SET_ESCALATED, (self.hash, escalation.ldap, escalation.delay_in_sec))
|
import runchecks
import setup
import socket
import settings
import request_handler
import threading
print('Identifying IP!')
HOST = socket.gethostbyname(socket.gethostname())
print('Your IPv4, is ' + str(HOST))
ADDR = (HOST, settings.PORT)
print('Starting Server on {}:{}'.format(HOST, settings.PORT))
# starting TCP protocol over IPv4
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as server:
try:
server.bind(ADDR)
server.listen()
print(f'server successfully started at {HOST}:{settings.PORT}!')
print('Please disallow for Private Network Firewall (will be prompted), ignore if already disallowed!')
except:
raise Exception(
'could not start server, this could be some issue with your proxy'
'settings or firewall settings or some other app is using this PORT!')
# making server running continuously
while True:
connection, address = server.accept()
thread = threading.Thread(target=request_handler.gate_way, args=(connection, address))
print(f'Starting Connection with {address}')
thread.start()
print(f'active connections = {threading.activeCount() - 2}') |
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""module for contextimpl: ContextImpl"""
from heronpy.streamlet.context import Context
class ContextImpl(Context):
"""ContextImpl"""
def __init__(self, top_context, state, emitter):
self._top_context = top_context
self._state = state
self._emitter = emitter
def get_task_id(self):
return self._top_context.get_task_id()
def get_config(self):
return self._top_context.get_cluster_config()
def get_stream_name(self):
return list(self._top_context.get_this_sources().keys())[0].id
def get_num_partitions(self):
return len(self._top_context.get_component_tasks(self._top_context.get_component_id()))
def get_partition_index(self):
tasks = self._top_context.get_component_tasks(self._top_context.get_component_id())
tasks.sort()
return tasks.index(self.get_task_id())
def get_state(self):
return self._state
def emit(self, values):
self._emitter.emit([values], stream='output')
|
# Copyright (c) 2019 UAVCAN Consortium
# This software is distributed under the terms of the MIT License.
# Author: Pavel Kirienko <pavel@uavcan.org>
import os
import sys
import time
import gzip
import typing
import pickle
import base64
import pathlib
import logging
import itertools
import dataclasses
import pydsdl
import nunavut
import nunavut.jinja
import nunavut.postprocessors
_AnyPath = typing.Union[str, pathlib.Path]
_TEMPLATE_DIRECTORY: pathlib.Path = pathlib.Path(__file__).absolute().parent / pathlib.Path("_templates")
_OUTPUT_FILE_PERMISSIONS = 0o444
"""
Read-only for all because the files are autogenerated and should not be edited manually.
"""
_logger = logging.getLogger(__name__)
@dataclasses.dataclass(frozen=True)
class GeneratedPackageInfo:
path: pathlib.Path
"""
Path to the directory that contains the top-level ``__init__.py``.
"""
models: typing.Sequence[pydsdl.CompositeType]
"""
List of PyDSDL objects describing the source DSDL definitions.
This can be used for arbitrarily complex introspection and reflection.
"""
name: str
"""
The name of the generated package, which is the same as the name of the DSDL root namespace unless
the name had to be stropped. See ``nunavut.lang.py.PYTHON_RESERVED_IDENTIFIERS``.
"""
def generate_package(
root_namespace_directory: _AnyPath,
lookup_directories: typing.Optional[typing.List[_AnyPath]] = None,
output_directory: typing.Optional[_AnyPath] = None,
allow_unregulated_fixed_port_id: bool = False,
) -> typing.Optional[GeneratedPackageInfo]:
"""
This function runs the DSDL compiler, converting a specified DSDL root namespace into a Python package.
In the generated package, nested DSDL namespaces are represented as Python subpackages,
DSDL types as Python classes, type version numbers as class name suffixes separated via underscores
(like ``Type_1_0``), constants as class attributes, fields as properties.
For a more detailed information on how to use generated types, just generate them and read the resulting
code -- it is made to be human-readable and contains docstrings.
Generated packages can be freely moved around the file system or even deployed on other systems --
they are fully location-invariant.
Generated packages do not automatically import their nested subpackages. For example, if the application
needs to use ``uavcan.node.Heartbeat.1.0``, it has to ``import uavcan.node`` explicitly; doing just
``import uavcan`` is not sufficient.
If the source definition contains identifiers, type names, namespace components, or other entities whose
names are listed in ``nunavut.lang.py.PYTHON_RESERVED_IDENTIFIERS``,
the compiler applies stropping by suffixing such entities with an underscore ``_``.
A small subset of applications may require access to a generated entity without knowing in advance whether
its name is a reserved identifier or not (i.e., whether it's stropped or not). To simplify usage,
this submodule provides helper functions
:func:`pyuavcan.dsdl.get_attribute` and :func:`pyuavcan.dsdl.set_attribute` that provide access to generated
class/object attributes using their original names before stropping.
Likewise, the function :func:`pyuavcan.dsdl.get_model` can find a generated type even if any of its name
components are stropped; e.g., a DSDL type ``str.Type.1.0`` would be imported as ``str_.Type_1_0``.
None of it, however, is relevant for an application that does not require genericity (vast majority of
applications don't), so a much easier approach in that case is just to look at the generated code and see
if there are any stropped identifiers in it, and then just use appropriate names statically.
The recommended usage pattern for this function is lazy generation.
First, add the ``output_directory`` (if not specified it defaults to the current working directory)
to :data:`sys.path` or to the ``PYTHONPATH`` environment variable to make the generated package(s) importable.
Then try importing the target DSDL-generated package. If the attempt is successful, our job here is done.
Otherwise, the package(s) need(s) to be generated by invoking this function,
and then another import attempt will have to be made.
Beware that before retrying the import it's necessary to invoke :func:`importlib.invalidate_caches`.
A package generated for a particular version of PyUAVCAN may be incompatible with any other version of the
library. If your application relies on lazy generation, consider including the library version string
:data:`pyuavcan.__version__` in ``output_directory``, so that the generated package cache is
invalidated automatically when a different version of the library is used.
Having generated a package, consider updating the include path set of your Python IDE to take advantage
of code completion and static type checking.
When using PyUAVCAN from an interactive session (e.g., REPL or Jupyter), it is usually more convenient
to generate packages using the command-line tool rather than invoking this function manually.
Please refer to the command-line tool documentation for details.
:param root_namespace_directory: The source DSDL root namespace directory path. The last component of the path
is the name of the root namespace. For example, to generate package for the root namespace ``uavcan``,
the path would be like ``foo/bar/uavcan``.
:param lookup_directories: An iterable of DSDL root namespace directory paths where to search for referred DSDL
definitions. The format of each path is the same as for the previous parameter; i.e., the last component
of each path is a DSDL root namespace name. If you are generating code for a vendor-specific DSDL root
namespace, make sure to provide at least the path to the standard ``uavcan`` namespace directory here.
:param output_directory: The generated Python package directory will be placed into this directory.
If not specified or None, the current working directory is used.
For example, if this argument equals ``foo/bar``, and the DSDL root namespace name is ``uavcan``,
the top-level ``__init__.py`` of the generated package will end up in ``foo/bar/uavcan/__init__.py``.
The directory tree will be created automatically if it does not exist (like ``mkdir -p``).
If the destination exists, it will be silently written over.
In production, applications are recommended to shard the output directory by the library version number
to avoid compatibility issues with code generated by older versions of the library.
Don't forget to add the output directory to ``PYTHONPATH``, even if it's the current working directory.
:param allow_unregulated_fixed_port_id: If True, the DSDL processing front-end will not reject unregulated
data types with fixed port-ID. If you are not sure what it means, do not use it, and read the UAVCAN
specification first. The default is False.
:return: An instance of :class:`GeneratedPackageInfo` describing the generated package,
unless the root namespace is empty, in which case it's None.
:raises: :class:`OSError` if required operations on the file system could not be performed;
:class:`pydsdl.InvalidDefinitionError` if the source DSDL definitions are invalid;
:class:`pydsdl.InternalError` if there is a bug in the DSDL processing front-end;
:class:`ValueError` if any of the arguments are otherwise invalid.
The following table is an excerpt from the UAVCAN specification. Observe that *unregulated fixed port identifiers*
are prohibited by default, but it can be overridden.
+-------+---------------------------------------------------+----------------------------------------------+
|Scope | Regulated | Unregulated |
+=======+===================================================+==============================================+
|Public |Standard and contributed (e.g., vendor-specific) |Definitions distributed separately from the |
| |definitions. Fixed port identifiers are allowed; |UAVCAN specification. Fixed port identifiers |
| |they are called *"regulated port-IDs"*. |are *not allowed*. |
+-------+---------------------------------------------------+----------------------------------------------+
|Private|Nonexistent category. |Definitions that are not available to anyone |
| | |except their authors. Fixed port identifiers |
| | |are permitted (although not recommended); they|
| | |are called *"unregulated fixed port-IDs"*. |
+-------+---------------------------------------------------+----------------------------------------------+
Here is a brief usage example:
>>> import sys
>>> import pathlib
>>> import tempfile
>>> import importlib
>>> import pyuavcan
>>> dsdl_generated_dir = pathlib.Path(tempfile.gettempdir(), 'dsdl-for-my-program', pyuavcan.__version__)
>>> dsdl_generated_dir.mkdir(parents=True, exist_ok=True)
>>> sys.path.insert(0, str(dsdl_generated_dir))
>>> try:
... import sirius_cyber_corp
... import uavcan.si.sample.volumetric_flow_rate
... except (ImportError, AttributeError):
... _ = pyuavcan.dsdl.generate_package(root_namespace_directory='tests/dsdl/namespaces/sirius_cyber_corp',
... lookup_directories=['tests/public_regulated_data_types/uavcan'],
... output_directory=dsdl_generated_dir)
... _ = pyuavcan.dsdl.generate_package(root_namespace_directory='tests/public_regulated_data_types/uavcan',
... output_directory=dsdl_generated_dir)
... importlib.invalidate_caches()
... import sirius_cyber_corp
... import uavcan.si.sample.volumetric_flow_rate
"""
started_at = time.monotonic()
if isinstance(lookup_directories, (str, bytes, pathlib.Path)):
# https://forum.uavcan.org/t/nestedrootnamespaceerror-in-basic-usage-demo/794
raise TypeError(f"Lookup directories shall be an iterable of paths, not {type(lookup_directories).__name__}")
output_directory = pathlib.Path(pathlib.Path.cwd() if output_directory is None else output_directory).resolve()
root_namespace_directory = pathlib.Path(root_namespace_directory).resolve()
if root_namespace_directory.parent == output_directory:
# https://github.com/UAVCAN/pyuavcan/issues/133 and https://github.com/UAVCAN/pyuavcan/issues/127
raise ValueError(
"The specified destination may overwrite the DSDL root namespace directory. "
"Consider specifying a different output directory instead."
)
# Read the DSDL definitions
composite_types = pydsdl.read_namespace(
root_namespace_directory=str(root_namespace_directory),
lookup_directories=list(map(str, lookup_directories or [])),
allow_unregulated_fixed_port_id=allow_unregulated_fixed_port_id,
)
if not composite_types:
_logger.info("Root namespace directory %r does not contain DSDL definitions", root_namespace_directory)
return None
(root_namespace_name,) = set(map(lambda x: x.root_namespace, composite_types)) # type: str,
_logger.info("Read %d definitions from root namespace %r", len(composite_types), root_namespace_name)
# Template primitives
filters = {
"pickle": _pickle_object,
"numpy_scalar_type": _numpy_scalar_type,
}
# Generate code
assert isinstance(output_directory, pathlib.Path)
language_context = nunavut.lang.LanguageContext("py", namespace_output_stem="__init__")
root_ns = nunavut.build_namespace_tree(
types=composite_types,
root_namespace_dir=str(root_namespace_directory),
output_dir=str(output_directory),
language_context=language_context,
)
generator = nunavut.jinja.DSDLCodeGenerator(
namespace=root_ns,
generate_namespace_types=nunavut.YesNoDefault.YES,
templates_dir=_TEMPLATE_DIRECTORY,
followlinks=True,
additional_filters=filters,
post_processors=[
nunavut.postprocessors.SetFileMode(_OUTPUT_FILE_PERMISSIONS),
nunavut.postprocessors.LimitEmptyLines(2),
nunavut.postprocessors.TrimTrailingWhitespace(),
],
)
generator.generate_all()
_logger.info(
"Generated %d types from the root namespace %r in %.1f seconds",
len(composite_types),
root_namespace_name,
time.monotonic() - started_at,
)
# A minor UX improvement; see https://github.com/UAVCAN/pyuavcan/issues/115
for p in sys.path:
if pathlib.Path(p).resolve() == pathlib.Path(output_directory):
break
else:
if os.name == "nt":
quick_fix = f'Quick fix: `$env:PYTHONPATH += ";{output_directory.resolve()}"`'
elif os.name == "posix":
quick_fix = f'Quick fix: `export PYTHONPATH="{output_directory.resolve()}"`'
else:
quick_fix = "Quick fix is not available for this OS."
_logger.info(
"Generated package is stored in %r, which is not in Python module search path list. "
"The package will fail to import unless you add the destination directory to sys.path or PYTHONPATH. %s",
str(output_directory),
quick_fix,
)
return GeneratedPackageInfo(
path=pathlib.Path(output_directory) / pathlib.Path(root_namespace_name),
models=composite_types,
name=root_namespace_name,
)
def _pickle_object(x: typing.Any) -> str:
pck: str = base64.b85encode(gzip.compress(pickle.dumps(x, protocol=4))).decode().strip()
segment_gen = map("".join, itertools.zip_longest(*([iter(pck)] * 100), fillvalue=""))
return "\n".join(repr(x) for x in segment_gen)
def _numpy_scalar_type(t: pydsdl.Any) -> str:
def pick_width(w: int) -> int:
for o in [8, 16, 32, 64]:
if w <= o:
return o
raise ValueError(f"Invalid bit width: {w}") # pragma: no cover
if isinstance(t, pydsdl.BooleanType):
return f"_np_.bool"
if isinstance(t, pydsdl.SignedIntegerType):
return f"_np_.int{pick_width(t.bit_length)}"
if isinstance(t, pydsdl.UnsignedIntegerType):
return f"_np_.uint{pick_width(t.bit_length)}"
if isinstance(t, pydsdl.FloatType):
return f"_np_.float{pick_width(t.bit_length)}"
assert not isinstance(t, pydsdl.PrimitiveType), "Forgot to handle some primitive types"
return f"_np_.object_"
|
class ke2600(object):
def off(instr,ch=1):
if ch==1:
instr.write("smua.source.output=smua.OUTPUT_OFF")
if ch==2:
instr.write("smub.source.output=smub.OUTPUT_OFF")
def on(instr,ch=1):
if ch==1:
instr.write("smua.source.output=smua.OUTPUT_ON")
if ch==2:
instr.write("smub.source.output=smub.OUTPUT_ON")
def forImeasV(instr, current, volt_cmpl, ch=1):
if ch==1:
instr.write('display.smua.measure.func = display.MEASURE_DCVOLTS')
instr.write('smua.source.func = smua.OUTPUT_DCAMPS')
instr.write('smua.source.limitv = %f'%volt_cmpl)
instr.write("smua.source.leveli = %f"%current)
instr.write("smua.source.output=smua.OUTPUT_ON")
instr.write("currenta, voltagea = smua.measure.iv()")
volt_data = instr.query("print(voltagea)")
if ch==2:
instr.write('display.smub.measure.func = display.MEASURE_DCVOLTS')
instr.write('smub.source.func = smub.OUTPUT_DCAMPS')
instr.write('smub.source.limitv = %f'%volt_cmpl)
instr.write("smub.source.leveli = %f"%current)
instr.write("smub.source.output=smub.OUTPUT_ON")
instr.write("currentb, voltageb = smub.measure.iv()")
volt_data = instr.query("print(voltageb)")
return volt_data
def forVmeasI(instr, voltage, curr_cmpl , ch=1):
if ch==1:
instr.write('display.smua.measure.func = display.MEASURE_DCAMPS')
instr.write('smua.source.func = smua.OUTPUT_DCVOLTS')
instr.write('smua.source.limiti = %f'%curr_cmpl)
instr.write("smua.source.levelv = %f"%voltage)
instr.write("smua.source.output=smua.OUTPUT_ON")
instr.write("currenta, voltagea = smua.measure.iv()")
curr_data = instr.query("print(currenta)")
if ch==2:
instr.write('display.smub.measure.func = display.MEASURE_DCAMPS')
instr.write('smub.source.func = smub.OUTPUT_DCVOLTS')
instr.write('smub.source.limiti = %f' % curr_cmpl)
instr.write("smub.source.levelv = %f" % voltage)
instr.write("smub.source.output=smub.OUTPUT_ON")
instr.write("currentb, voltageb = smub.measure.iv()")
curr_data = instr.query("print(currentb)")
return curr_data
def forVmeasV(instr, voltage, volt_cmpl ,ch=1):
if ch==1:
instr.write('smua.sense = smua.SENSE_REMOTE')
instr.write('display.smua.measure.func = display.MEASURE_DCVOLTS')
instr.write('smua.source.func = smua.OUTPUT_DCVOLTS')
instr.write('smua.source.limitv = %f'%volt_cmpl)
instr.write("smua.source.levelv = %f"%voltage)
instr.write("smua.source.output=smua.OUTPUT_ON")
instr.write("currenta, voltagea = smua.measure.iv()")
curr_data = instr.query("print(voltagea)")
if ch==2:
instr.write('smub.sense = smub.SENSE_REMOTE')
instr.write('display.smub.measure.func = display.MEASURE_DCVOLTS')
instr.write('smub.source.func = smub.OUTPUT_DCVOLTS')
instr.write('smub.source.limitv = %f' % volt_cmpl)
instr.write("smub.source.levelv = %f" % voltage)
instr.write("smub.source.output=smub.OUTPUT_ON")
instr.write("currentb, voltageb = smub.measure.iv()")
curr_data = instr.query("print(voltageb)")
return curr_data
def autorang_forVmeasI(instr,ch=1):
if ch==1:
instr.write('smua.measure.autorangei = smua.AUTORANGE_ON')
instr.write('smua.source.autorangev = smua.AUTORANGE_ON')
if ch==2:
instr.write('smub.measure.autorangei = smub.AUTORANGE_ON')
instr.write('smub.source.autorangev = smub.AUTORANGE_ON')
def autorang_forImeasV(instr,ch=1):
if ch==1:
instr.write('smua.measure.autorangev = smua.AUTORANGE_ON')
instr.write('smua.source.autorangei = smua.AUTORANGE_ON')
if ch==2:
instr.write('smub.measure.autorangev = smub.AUTORANGE_ON')
instr.write('smub.source.autorangei = smub.AUTORANGE_ON')
|
# TODO: Create variables
import tensorflow as tf
import matplotlib.pyplot as plt
tf.reset_default_graph()
input_data = tf.placeholder(dtype=tf.float32, shape=None)
output_data = tf.placeholder(dtype=tf.float32, shape=None)
slope = tf.Variable(0.5, dtype=tf.float32)
intercept = tf.Variable(0.1, dtype=tf.float32)
model_operation = slope * input_data + intercept
error = model_operation - output_data
squared_error = tf.square(error)
loss = tf.reduce_mean(squared_error)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.005)
train = optimizer.minimize(loss)
# TODO: Run a session
init = tf.global_variables_initializer()
x_values = [0, 1, 2, 3, 4]
y_values = [1, 3, 5, 7, 9]
with tf.Session() as sess:
sess.run(init)
for i in range(2000):
sess.run(train, feed_dict={input_data: x_values, output_data: y_values})
if i % 100 == 0:
print(sess.run([slope, intercept]))
plt.plot(x_values, sess.run(model_operation, feed_dict={input_data: x_values}))
print(sess.run(loss, feed_dict={input_data: x_values, output_data: y_values}))
plt.plot(x_values, y_values, 'ro', 'Training Data')
plt.plot(x_values, sess.run(model_operation, feed_dict={input_data: x_values}))
plt.show()
|
from aetherling.modules.term_any_type import DefineTermAnyType
from magma import *
import fault
from aetherling.helpers.fault_helpers import compile_and_run
def test_term_empty():
term = DefineTermAnyType(Array[0, Array[8, Bit]])
tester = fault.Tester(term)
tester.circuit.valid_up = 1
if False:
compile_and_run(tester)
|
# -*- coding: utf-8 -*-
from ReadData import *
# =====================๊ฐ ํ๊ณผ ํ์
๋ฐ์ดํฐ๋ฅผ ์ ์ฅํ๋ ๋ฆฌ์คํธ=========================
sw_list = read_data("Computer.txt")
# test code
print(sw_list)
print(type(sw_list)) # class 'list'
print(sw_list[0])
print(type(sw_list[0])) # class 'list'
print(sw_list[0][0])
print(type(sw_list[0][0])) # class 'str'
print(sw_list[0][1])
print(type(sw_list[0][1])) # class 'str'
print(sw_list[0][2])
print(type(sw_list[0][2])) # class 'str'
print(sw_list[1])
print(sw_list[2])
print(sw_list.__len__())
print(type('1')) # class 'str'
print(type("1")) # class 'str'
a = '1'
b = "1"
if a == b:
print("ok")
# ===========================ํ๋
๋ณ ๊ณผ๋ชฉ ๋ฆฌ์คํธ===================================
first_list = [] # 1ํ๋
๊ณผ๋ชฉ(๊ณผ๋ชฉ, ํ๋
)์ด ๋ค์ด์๋ ๋ฆฌ์คํธ
second_list = [] # 2ํ๋
๊ณผ๋ชฉ(๊ณผ๋ชฉ, ํ๋
)์ด ๋ค์ด์๋ ๋ฆฌ์คํธ
third_list = [] # 3ํ๋
๊ณผ๋ชฉ(๊ณผ๋ชฉ, ํ๋
)์ด ๋ค์ด์๋ ๋ฆฌ์คํธ
fourth_list = [] # 4ํ๋
๊ณผ๋ชฉ(๊ณผ๋ชฉ, ํ๋
)์ด ๋ค์ด์๋ ๋ฆฌ์คํธ
main_list = [] # main ๋ฆฌ์คํธ์ 1,2,3,4ํ๋
๊ณผ๋ชฉ ์ผ๋ ฌ๋ก ์ญ ๋์ด๋๊ธฐ
'''
fileOut = open("Computer.txt", 'w')
for i in range(sw_list.__len__()):
for j in range(3):
a = sw_list[i][j] + '\n'
fileOut.write(a)
fileOut.close()
'''
target = "ํ๋ก๊ทธ๋๋ฐ"
for i in range(sw_list.__len__()):
if target in sw_list[i][1]:
if sw_list[i][2] == "1":
first_list.append([sw_list[i][0], 1])
elif sw_list[i][2] == "2":
second_list.append([sw_list[i][0], 2])
elif sw_list[i][2] == "3":
third_list.append([sw_list[i][0], 3])
elif sw_list[i][2] == "4":
fourth_list.append([sw_list[i][0], 4])
main_list.extend(first_list)
main_list.extend(second_list)
main_list.extend(third_list)
main_list.extend(fourth_list)
'''
test code
======================== ํจ์์ฒ๋ผ ๊ตฌํํ๋ฉด list1,2,3,4๊ฐ ์์ฑ์ด ๋์ง ์๋๋ค. =====================
target = "ํ๋ก๊ทธ๋๋ฐ"
def find_keyword(target):
for i in range(sw_list.__len__()):
if target in sw_list[i][2]:
if sw_list[i][2] == "1":
first_list.append([sw_list[i][0], 1])
elif sw_list[i][2] == "2":
second_list.append([sw_list[i][0], 2])
elif sw_list[i][2] == "3":
third_list.append([sw_list[i][0], 3])
elif sw_list[i][2] == "4":
fourth_list.append([sw_list[i][0], 4])
main_list.extend(first_list)
main_list.extend(second_list)
main_list.extend(third_list)
main_list.extend(fourth_list)
'''
print(first_list)
print(second_list)
print(third_list)
print(fourth_list)
print(main_list)
'''
test code
# ===========================์ ๋ ฌ ํ ํ๋
๋ณ ๊ณผ๋ชฉ ๋ฆฌ์คํธ===============================
list1 = [] # ์ ๋ ฌ์ด ๋ ํ 1ํ๋
์ต์ข
๋ฆฌ์คํธ
list2 = [] # ์ ๋ ฌ์ด ๋ ํ 2ํ๋
์ต์ข
๋ฆฌ์คํธ
list3 = [] # ์ ๋ ฌ์ด ๋ ํ 3ํ๋
์ต์ข
๋ฆฌ์คํธ
list4 = [] # ์ ๋ ฌ์ด ๋ ํ 4ํ๋
์ต์ข
๋ฆฌ์คํธ
final_list = [] # ์ ๋ ฌ์ด ๋ ํ ์ ์ฒด ์ต์ข
๋ฆฌ์คํธ
sorted(main_list, key=lambda x: x[1])
for i in range(main_list.__len__()):
final_list.append(main_list[i])
for i in range(first_list.__len__()):
list1.append(first_list[i])
for i in range(second_list.__len__()):
list2.append(second_list[i])
for i in range(third_list.__len__()):
list3.append(third_list[i])
for i in range(fourth_list.__len__()):
list4.append(fourth_list[i])
print(list1)
print(list2)
print(list3)
print(list4)
print(final_list)
'''
|
import numpy as np
from math import ceil
from scipy import sparse
from scipy.sparse import csr_matrix, kron
from scipy.sparse.linalg import lsqr as sparse_lsqr
from scipy.sparse.linalg import inv as sparse_inv
from scipy.signal import fftconvolve, correlate
from scipy.integrate import quad
from ..spline import (
discrete_spline_2D,
discrete_spline_3D,
discrete_spline,
discrete_spline_second_derivative,
)
from .base import (
PatchRegressorBase,
SplineRegressorBase,
TemplateCrossCorellatorBase,
)
from ..preprocessing import OrientationScoreTransformer
class R2Ridge(SplineRegressorBase, PatchRegressorBase):
def __init__(
self,
template_shape,
splines_per_axis,
spline_order=2,
batch_size=50,
mu=0,
lbd=0,
verbose=0,
solver="dual",
random_state=None,
eye="left",
n_jobs=1,
):
assert template_shape[0] == template_shape[1]
assert solver in ["primal", "dual"], "solver must be primal or dual"
SplineRegressorBase.__init__(
self, template_shape, splines_per_axis, spline_order=spline_order
)
PatchRegressorBase.__init__(
self,
patch_shape=template_shape,
eye=eye,
random_state=random_state,
n_jobs=n_jobs
)
self.model_name = "Linear Ridge"
self.mu = mu
self.lbd = lbd
self.verbose = verbose
self.batch_size = batch_size
self.solver = solver
self._is_fitted = False
self._cached_template = None
self.n_jobs = n_jobs
@TemplateCrossCorellatorBase.template.getter
def template(self):
if self._is_fitted:
if self._template is not None:
return self._template
else:
return self._reconstruct_template()
else:
raise AttributeError("No template yet: Classifier not fitted")
def _fit_patches(self, X, y):
self._check_params(X)
num_samples, _, _, _, _, _, _ = self._get_dims()
S = self._create_s_matrix(X)
R = self._create_r_matrix()
print("Solving the ridge problem...", end='')
if self.solver == "primal":
c = np.linalg.lstsq(
S.T @ S + num_samples * (self.lbd * R + self.mu * np.eye(S.shape[1])),
S.T @ y,
rcond=None,
)[0]
elif self.solver == "dual":
if self.lbd == 0:
c = (
S.T
@ np.linalg.inv(
S @ S.T + num_samples * self.mu * np.eye(S.shape[0])
)
@ y
)
else:
B = num_samples * (
self.mu * sparse.eye(S.shape[1], format="csc") +
self.lbd * R)
B_inv = sparse_inv(B)
c = (
B_inv
@ S.T
@ np.linalg.inv(S @ B_inv @ S.T + np.eye(S.shape[0]))
@ y
)
else:
raise ValueError(f"solver must be 'primal' or 'dual', got '{self.solver}'")
print(" OK.")
self._R, self._S, self._spline_coef = R, S, c
def _reconstruct_template(self):
_, Nx, Ny, Nk, Nl, sk, sl = self._get_dims()
B = self._make_unit_spline(sk, sl)
impulses = np.zeros((Nx, Ny))
impulses[::sk, ::sl] = self._spline_coef.reshape(Nk, Nl)
self._template = fftconvolve(impulses, B, mode="same")
return self._template
def _create_s_matrix(self, X):
num_samples, Nx, Ny, Nk, Nl, sk, sl = self._get_dims()
S = np.zeros((num_samples, Nk * Nl))
B = self._make_unit_spline(sk, sl)
B = B.reshape(1, *B.shape)
batch_size = min(self.batch_size, num_samples)
for i in range(ceil(num_samples / batch_size)):
X_batch = X[i * batch_size : (i + 1) * batch_size, :, :]
convolved_X = fftconvolve(X_batch, B, mode="same")
S[i * batch_size : (i + 1) * batch_size, :] = convolved_X[
:, ::sk, ::sl
].reshape(len(X_batch), Nk * Nl)
S /= np.linalg.norm(S, axis=0, keepdims=True)
return S
def _create_r_matrix(self):
_, _, _, Nk, Nl, sk, sl = self._get_dims()
k = np.linspace(-int(Nk / 2), int(Nk / 2), Nk)
l = np.linspace(-int(Nl / 2), int(Nl / 2), Nl)
ks = np.array([[ki - kk for ki in k] for kk in k])
ls = np.array([[li - lk for li in l] for lk in l])
Bxk = csr_matrix(-1 / sk * discrete_spline_second_derivative(ks, 2 * self.spline_order + 1))
Bxl = csr_matrix(sl * discrete_spline(ls, 2 * self.spline_order + 1))
Byk = csr_matrix(sk * discrete_spline(ks, 2 * self.spline_order + 1))
Byl = csr_matrix(-1 / sl * discrete_spline_second_derivative(ls, 2 * self.spline_order + 1))
R = kron(Bxk, Bxl) + kron(Byk, Byl)
return sparse.csc.csc_matrix(R)
def _make_unit_spline(self, sk, sl):
# Our spline is always defined on [-2.5, 2.5] (may be a problem if we
# change the order of the spline, as B_k is defined on [-k/2, k/2]) the
# granularity of the grid impacts the percieved width of the spline.
x_grid = np.array(range(-int(5 * sk / 2), int(5 * sk / 2) + 1)) / sk
y_grid = np.array(range(-int(5 * sl / 2), int(5 * sl / 2) + 1)) / sl
B = discrete_spline_2D(x_grid, y_grid, self.spline_order)
return B
def _check_params(self, X):
_, Nx, Ny = X.shape # Nx, Ny: training images shape
Nk, Nl = self.splines_per_axis
# The convention is that images are "centered" on the origin:
# A valid image should thus have 2n + 1 pixels.
assert ((Nx - 1) % (Nk - 1)) == 0
assert ((Ny - 1) % (Nl - 1)) == 0
self._X_shape = X.shape
def _get_dims(self):
num_samples, Nx, Ny = self._X_shape
Nk, Nl = self.splines_per_axis
sk, sl = (Nx - 1) // (Nk - 1), (Ny - 1) // (Nl - 1) # fmt: off
return num_samples, Nx, Ny, Nk, Nl, sk, sl
class SE2Ridge(SplineRegressorBase, PatchRegressorBase):
def __init__(
self,
template_shape,
splines_per_axis,
wavelet_dim,
num_orientation_slices=12,
spline_order=2,
batch_size=10,
mu=0,
lbd=0,
Dxi=0,
Deta=0,
Dtheta=0,
verbose=0,
solver="dual",
random_state=None,
eye="left",
n_jobs=1
):
assert template_shape[0] == template_shape[1]
assert solver in ["primal", "dual"], "solver must be primal or dual"
SplineRegressorBase.__init__(
self, template_shape, splines_per_axis, spline_order=spline_order
)
PatchRegressorBase.__init__(
self,
patch_shape=template_shape,
eye=eye,
random_state=random_state,
)
self.name = "SE2 Ridge"
self.mu = mu
self.lbd = lbd
self.Dxi = Dxi
self.Deta = Deta
self.Dtheta = Dtheta
self.verbose = verbose
self.batch_size = batch_size
self.solver = solver
self._is_fitted = False
self._template = None
self._ost = OrientationScoreTransformer(
wavelet_dim=wavelet_dim,
num_slices=num_orientation_slices,
batch_size=batch_size, n_jobs=n_jobs
)
self.n_jobs = n_jobs
@TemplateCrossCorellatorBase.template.getter
def template(self):
if self._is_fitted:
if self._template is not None:
return self._template
else:
return self._reconstruct_template()
else:
raise AttributeError("No template yet: Classifier not fitted")
def predict(self, X):
# TODO: put this method in a Mixin Class.
X = abs(self._ost.transform(X)) # can take imag
template = self.template.reshape(1, *self.template.shape)
batch_size = min(self.batch_size, X.shape[0])
convs = np.zeros(X.shape)
for i in range(ceil(X.shape[0] / batch_size)):
X_batch = X[i * batch_size : (i + 1) * batch_size, :, :]
convs[i * batch_size : (i + 1) * batch_size, :, :] = correlate(
X_batch, template, mode="same", method="fft"
)
positions = []
for i in range(len(X)):
(y, x, _) = np.unravel_index(np.argmax(convs[i]), convs[i].shape)
positions.append([x, y])
return convs, np.array(positions)
def _fit_patches(self, X, y):
X = abs(self._ost.fit_transform(X)) # can also take the imag
self._check_params(X)
num_samples, _, _, _, _, _, _, _, _, _ = self._get_dims()
S = self._create_s_matrix(X)
R = self._create_r_matrix()
print("Solving the ridge problem...", end='')
if self.solver == "primal":
c = np.linalg.lstsq(
S.T @ S + num_samples * (self.lbd * R +
self.mu * np.eye(S.shape[1])),
S.T @ y,
rcond=None,
)[0]
elif self.solver == "dual":
if self.lbd == 0:
c = (
S.T
@ np.linalg.inv(
S @ S.T + num_samples * self.mu * np.eye(S.shape[0])
)
@ y
)
else:
B = num_samples * (
self.mu * sparse.eye(S.shape[1], format="csc") +
self.lbd * R)
B_inv = sparse_inv(B)
c = (
B_inv
@ S.T
@ np.linalg.inv(S @ B_inv @ S.T + np.eye(S.shape[0]))
@ y
)
else:
raise ValueError(f"solver must be 'primal' or 'dual', got '{self.solver}'")
print("OK.")
c /= np.linalg.norm(c)
self._S, self._spline_coef = S, c
def _create_s_matrix(self, X):
num_samples, Nx, Ny, Nt, Nk, Nl, Nm, sk, sl, sm = self._get_dims()
S = np.zeros((num_samples, Nk * Nl * Nm))
B = self._make_unit_spline(sk, sl, sm)
B = B.reshape(1, *B.shape)
batch_size = min(self.batch_size, X.shape[0])
for i in range(ceil(num_samples / batch_size)):
X_batch = X[i * batch_size : (i + 1) * batch_size, :, :]
convolved_X = fftconvolve(X_batch, B, mode="same")
S[i * batch_size : (i + 1) * batch_size, :] = convolved_X[
:, ::sk, ::sl, ::sm
].reshape(len(X_batch), Nk * Nl * Nm)
S /= np.linalg.norm(S, axis=0, keepdims=True)
return S
def _create_r_matrix(self):
"""
Create and returns R = Dxi * Rxi + Deta * Reta + Dtheta * Rtheta
Inputs:
-------
Dxi, Deta, Dtheta (float):
"""
_, _, _, _, Nk, Nl, Nm, sk, sl, sm = self._get_dims()
k = np.linspace(-int(Nk / 2), int(Nk / 2), Nk)
l = np.linspace(-int(Nl / 2), int(Nl / 2), Nl)
m = np.linspace(-int(Nm / 2), int(Nm / 2), Nm)
ks = np.array([[ki - kk for ki in k] for kk in k])
ls = np.array([[li - lk for li in l] for lk in l])
ms = np.array([[mi - mk for mi in m] for mk in m])
RIx = csr_matrix(-1 / sk * discrete_spline_second_derivative(ks, 2 * self.spline_order + 1))
RIy = csr_matrix(sl * discrete_spline(ls, 2 * self.spline_order + 1))
cos2_spline = lambda theta, m1, m2: np.cos(theta) ** 2 * self._util_spline(
theta, m1, m2
)
RItheta = csr_matrix(
[[quad(cos2_spline, 0, np.pi, args=(m1, m2))[0] for m1 in m] for m2 in m]
)
RIIx = csr_matrix(discrete_spline_second_derivative(ks, 2 * self.spline_order + 1))
RIIy = csr_matrix(-discrete_spline_second_derivative(ls, 2 * self.spline_order + 1))
sincos_spline = (
lambda theta, m1, m2: np.cos(theta)
* np.sin(theta)
* self._util_spline(theta, m1, m2)
)
RIItheta = csr_matrix(
[[quad(sincos_spline, 0, np.pi, args=(m1, m2))[0] for m1 in m] for m2 in m]
)
RIIIx = -RIIx.copy()
RIIIy = -RIIy.copy()
RIIItheta = RIItheta.copy()
RIVx = csr_matrix(sk * discrete_spline(ks, 2 * self.spline_order + 1))
RIVy = csr_matrix((
-1 / sl * discrete_spline_second_derivative(ls, 2 * self.spline_order + 1)
))
sin2_spline = lambda theta, m1, m2: np.sin(theta) ** 2 * self._util_spline(
theta, m1, m2
)
RIVtheta = csr_matrix(
[[quad(sin2_spline, 0, np.pi, args=(m1, m2))[0] for m1 in m] for m2 in m]
)
Rxtheta = csr_matrix(sk * discrete_spline(ks, 2 * self.spline_order + 1))
Rytheta = csr_matrix(sl * discrete_spline(ls, 2 * self.spline_order + 1))
Rthetatheta = csr_matrix(
-1 / sm * discrete_spline_second_derivative(ms, 2 * self.spline_order + 1)
)
Rxi = (
kron(kron(RIx, RIy), RItheta)
+ kron(kron(RIIx, RIIy), RIItheta)
+ kron(kron(RIIIx, RIIIy), RIIItheta)
+ kron(kron(RIVx, RIVy), RIVtheta)
)
Reta = (
kron(kron(RIIx, RIIy), RIVtheta)
- kron(kron(RIIx, RIIy), RIItheta)
- kron(kron(RIIIx, RIIIy), RIIItheta)
+ kron(kron(RIVx, RIVy), RItheta)
)
Rtheta = kron(kron(Rxtheta, Rytheta), Rthetatheta)
R = self.Dxi * Rxi + self.Deta * Reta + self.Dtheta * Rtheta
return sparse.csc.csc_matrix(R)
def _util_spline(self, theta, m1, m2):
_, _, _, _, _, _, _, _, _, sm = self._get_dims()
Bm1 = discrete_spline(theta / sm - m1, self.spline_order)
Bm2 = discrete_spline(theta / sm - m2, self.spline_order)
return np.outer(Bm1, Bm2)
def _reconstruct_template(self):
_, Nx, Ny, Nt, Nk, Nl, Nm, sk, sl, sm = self._get_dims()
B = self._make_unit_spline(sk, sl, sm)
impulses = np.zeros((Nx, Ny, Nt))
impulses[::sk, ::sl, ::sm] = self._spline_coef.reshape(Nk, Nl, Nm)
self._template = fftconvolve(impulses, B, mode="same")
return self._template
def _make_unit_spline(self, sk, sl, st):
# Our spline is always defined on [-2.5, 2.5] (may be a problem if we
# change the order of the spline, as B_k is defined on [-k/2, k/2]) the
# granularity of the grid impacts the percieved width of the spline.
x_grid = np.array(range(-int(5 * sk / 2), int(5 * sk / 2) + 1)) / sk
y_grid = np.array(range(-int(5 * sl / 2), int(5 * sl / 2) + 1)) / sl
t_grid = np.array(range(-int(5 * st / 2), int(5 * st / 2) + 1)) / st
B = discrete_spline_3D(x_grid, y_grid, t_grid, self.spline_order)
return B
def _check_params(self, X):
_, Nx, Ny, Nt = X.shape # Nx, Ny, Nt: training images shape
Nk, Nl, Nm = self.splines_per_axis # fmt: off
assert (Nx - 1) % (Nk - 1) == 0
assert (Ny - 1) % (Nl - 1) == 0
assert Nt % Nm == 0 # theta is not centered
self._X_shape = X.shape
def _get_dims(self):
num_samples, Nx, Ny, Nt = self._X_shape
Nk, Nl, Nm = self.splines_per_axis # fmt: off
sk, sl = (Nx - 1) // (Nk - 1), (Ny - 1) // (Nl - 1)
sm = Nt // Nm
return num_samples, Nx, Ny, Nt, Nk, Nl, Nm, sk, sl, sm
|
import sys
import os
import subprocess
import avalon.api
from reveries.plugins import PackageLoader
"""
Deprecated
"""
def open(filepath):
"""Open file with system default executable"""
if sys.platform.startswith('darwin'):
subprocess.call(('open', filepath))
elif os.name == 'nt':
os.startfile(filepath)
elif os.name == 'posix':
subprocess.call(('xdg-open', filepath))
class PlayImageSequence(PackageLoader, avalon.api.Loader):
"""Open Image Sequence with system default"""
label = "Play sequence"
order = -10
icon = "play-circle"
color = "orange"
families = [
"reveries.imgseq",
]
representations = [
"imageSequence",
]
def load(self, context, name, namespace, data):
from avalon.vendor import clique
directory = self.package_path
files = os.listdir(directory)
collections, remainder = clique.assemble(files,
minimum_items=1)
assert not remainder, ("There shouldn't have been a remainder for "
"'%s': %s" % (directory, remainder))
seqeunce = collections[0]
first_image = list(seqeunce)[0]
filepath = os.path.normpath(os.path.join(directory, first_image))
self.log.info("Opening : {}".format(filepath))
open(filepath)
class OpenImageSequence(PackageLoader, avalon.api.Loader):
"""Open Image Sequence with system default"""
label = "Open sequence"
order = -10
icon = "folder-open"
color = "orange"
families = [
"reveries.imgseq",
]
representations = [
"imageSequence",
"imageSequenceSet",
]
def load(self, context, name, namespace, data):
directory = self.package_path
self.log.info("Opening : {}".format(directory))
open(directory)
|
from src.ConfigReader import ConfigReader
from src.DocumentationBuilderTemplate import DocumentationBuilderTemplate
from sys import argv
from pathlib import Path
def main(generateDefault=False):
if(generateDefault):
print("Generating default configuration...")
else:
cr = ConfigReader(str(argv[1]))
cr.readConfiguration()
cr.printConfiguration()
includeDirectory=cr.getConfig()['includeDirectory']
outputDirectory = cr.getConfig()['outputDirectory']
commentDenotion=cr.getConfig()['commentDenotion']
filterList=cr.getConfig()['filterList']
language=cr.getConfig()['language']
writer = DocumentationBuilderTemplate(language, includeDirectory,commentDenotion,filterList, outputDirectory)
writer.createDocumentation()
if __name__ == "__main__":
generateDefault = False
if(len(argv) <= 1):
print("Invalid Amount of Arguments Given!")
exit(1)
if(str(argv[1]) == "--gen"):
generateDefault = True
else:
file = Path(str(argv[1]))
if not file.exists():
print("Given configuration file does not exist. Using defaults!")
generateDefault = True
main(generateDefault) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.