content stringlengths 5 1.05M |
|---|
'''
Application-wide preferences.
'''
from transformers import RobertaTokenizerFast
import spacy
from . import TOKENIZER_PATH
class Config:
vocab_size = 54_000
max_length = 512 # tokens!
min_char_length = 120 # characters
split_ratio = {
'train': 0.7,
'eval': 0.2,
'test': 0.1,
'max_eval': 10_000,
'max_test': 10_000,
}
celery_batch_size = 1000
from_pretrained = 'roberta-base' # leave empty if training a language model from scratch
tokenizer = None
nlp = spacy.load('en_core_web_sm')
config = Config()
if config.from_pretrained:
config.tokenizer = RobertaTokenizerFast.from_pretrained(config.from_pretrained, max_len=config.max_length)
else:
config.tokenizer = RobertaTokenizerFast.from_pretrained(TOKENIZER_PATH, max_len=config.max_length) |
#!/usr/bin/env python3
"""
A program to filter tweets that contain links to a web archive. At the moment it
supports archive.org and archive.is, but please add more if you want!
"""
import json
import fileinput
archives = ["archive.is", "web.archive.org", "wayback.archive.org"]
for line in fileinput.input():
tweet = json.loads(line)
for url in tweet["entities"]["urls"]:
for host in archives:
if host in url["expanded_url"]:
print(line, end="")
done = True
# prevent outputting same data twice if it contains
# multiple archive urls
if done:
break
|
import numpy as np
import math
def upper_error(x, f, neg=False):
n = len(x)
p = np.linspace(np.min(x), np.max(x))
max_fun = 0
max_prod = 0
for val in p:
fun = 0
if neg:
fun = abs(-f(val))
else:
fun = abs(f(val))
if fun > max_fun:
max_fun = fun
res = np.array(list(map(lambda a: val - a, x)))
prod = abs(np.prod(res))
if prod > max_prod:
max_prod = prod
return (max_fun * max_prod) / math.factorial(n) |
_ANDROID_CPUS_TO_PLATFORMS = {
"arm64-v8a": "@io_bazel_rules_go//go/toolchain:android_arm64_cgo",
"armeabi-v7a": "@io_bazel_rules_go//go/toolchain:android_arm_cgo",
"x86": "@io_bazel_rules_go//go/toolchain:android_386_cgo",
"x86_64": "@io_bazel_rules_go//go/toolchain:android_amd64_cgo",
}
_IOS_CPUS_TO_PLATFORMS = {
"ios_arm64": "@io_bazel_rules_go//go/toolchain:ios_arm64_cgo",
"ios_armv7": "@io_bazel_rules_go//go/toolchain:ios_arm_cgo",
"ios_i386": "@io_bazel_rules_go//go/toolchain:ios_386_cgo",
"ios_x86_64": "@io_bazel_rules_go//go/toolchain:ios_amd64_cgo",
}
def _go_platform_transition_impl(settings, attr):
platform = ""
cpu = settings["//command_line_option:cpu"]
crosstool_top = settings["//command_line_option:crosstool_top"]
if str(crosstool_top) == "//external:android/crosstool" or crosstool_top.workspace_name == "androidndk":
platform = _ANDROID_CPUS_TO_PLATFORMS[cpu]
elif cpu in _IOS_CPUS_TO_PLATFORMS:
platform = _IOS_CPUS_TO_PLATFORMS[cpu]
return {
"//command_line_option:platforms": platform,
}
go_platform_transition = transition(
implementation = _go_platform_transition_impl,
inputs = [
"//command_line_option:cpu",
"//command_line_option:crosstool_top",
],
outputs = [
"//command_line_option:platforms",
],
)
|
"""
MozTrap root URLconf.
"""
from django.conf.urls import patterns, url, include
from django.conf.urls.static import static
from django.conf import settings
from django.contrib import admin
from moztrap.model import mtadmin
admin.site = mtadmin.MTAdminSite()
admin.autodiscover()
import session_csrf
session_csrf.monkeypatch()
urlpatterns = patterns(
"",
url(r"^$", "moztrap.view.views.home", name="home"),
# runtests ---------------------------------------------------------------
url(r"^runtests/", include("moztrap.view.runtests.urls")),
# users ------------------------------------------------------------------
url(r"^users/", include("moztrap.view.users.urls")),
# manage -----------------------------------------------------------------
url(r"^manage/", include("moztrap.view.manage.urls")),
# results ----------------------------------------------------------------
url(r"^results/", include("moztrap.view.results.urls")),
# admin ------------------------------------------------------------------
url(r"^admin/", include(admin.site.urls)),
# browserid --------------------------------------------------------------
url(r"^browserid/", include("moztrap.view.users.browserid_urls")),
# api --------------------------------------------------------------------
url(r"^api/", include("moztrap.view.api.urls")),
# open web apps-----------------------------------------------------------
url("^owa/", include("moztrap.view.owa.urls")),
# special /contribute.json endpoint --------------------------------------
url(r"^(?P<path>contribute\.json)$", "django.views.static.serve",
{'document_root': settings.BASE_PATH})
) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
from hlt import *
from networking import *
myID, gameMap = getInit()
sendInit("BasicPythonBot")
while True:
moves = []
gameMap = getFrame()
for y in range(gameMap.height):
for x in range(gameMap.width):
site = gameMap.getSite(Location(x, y))
if site.owner == myID:
direction = random.randint(0, 5)
if site.strength < 5*site.production:
direction = STILL
else:
for d in CARDINALS:
if gameMap.getSite(Location(x, y), d).owner != myID:
direction = d
break
moves.append(Move(Location(x, y), direction))
sendFrame(moves)
|
import origami_rectangle as rect
import scadnano as sc
def create_design():
design = rect.create(num_helices=16, num_cols=28, seam_left_column=12, assign_seq=False,
num_flanking_columns=2,
num_flanking_helices=2, edge_staples=False,
scaffold_nick_offset=102, use_idt_defaults=True)
# # need this to match original design, but doesn't leave room for left-side adapters
# design.move_strand_offsets(8)
set_helix_major_tickets(design)
move_top_and_bottom_staples_within_column_boundaries(design)
add_domains_for_barrel_seam(design)
add_toeholds_for_seam_displacement(design)
add_adapters(design)
add_twist_correct_deletions(design)
add_angle_inducing_insertions_deletions(design)
add_tiles_and_assign_dna(design)
assign_dna_to_unzipper_toeholds(design)
design.assign_m13_to_scaffold()
return design
def set_helix_major_tickets(design):
major_ticks = [11, 22, 32]
for tick in range(40, 481, 8):
major_ticks.append(tick)
major_ticks.extend([490, 501])
for helix in design.helices.values():
helix.major_ticks = list(major_ticks)
def add_twist_correct_deletions(design: sc.Design):
# I choose between 3 and 4 offset arbitrarily for twist-correction deletions for some reason,
# so they have to be hard-coded.
for col, offset in zip(range(4, 29, 3), [4, 3, 3, 4, 3, 3, 3, 3, 3]):
for helix in range(2, 18):
design.add_deletion(helix, 16 * col + offset)
def move_top_and_bottom_staples_within_column_boundaries(design: sc.Design):
top_staples = design.strands_starting_on_helix(2)
bot_staples = design.strands_starting_on_helix(17)
bot_staples.remove(design.scaffold)
for top_staple in top_staples:
current_end = top_staple.domains[0].end
design.set_end(top_staple.domains[0], current_end - 8)
for bot_staple in bot_staples:
current_start = bot_staple.domains[0].start
design.set_start(bot_staple.domains[0], current_start + 8)
def add_domains_for_barrel_seam(design):
top_staples_5p = design.strands_starting_on_helix(2)
top_staples_3p = design.strands_ending_on_helix(2)
bot_staples_5p = design.strands_starting_on_helix(17)
bot_staples_3p = design.strands_ending_on_helix(17)
# remove scaffold
top_staples_5p = [st for st in top_staples_5p if len(st.domains) <= 3]
top_staples_3p = [st for st in top_staples_3p if len(st.domains) <= 3]
bot_staples_5p = [st for st in bot_staples_5p if len(st.domains) <= 3]
bot_staples_3p = [st for st in bot_staples_3p if len(st.domains) <= 3]
top_staples_5p.sort(key=lambda stap: stap.offset_5p())
top_staples_3p.sort(key=lambda stap: stap.offset_3p())
bot_staples_5p.sort(key=lambda stap: stap.offset_5p())
bot_staples_3p.sort(key=lambda stap: stap.offset_3p())
for top_5p, top_3p, bot_5p, bot_3p in zip(top_staples_5p, top_staples_3p, bot_staples_5p, bot_staples_3p):
ss_top = sc.Domain(helix=2, forward=False,
start=top_5p.first_domain().end, end=top_3p.last_domain().start)
ss_bot = sc.Domain(helix=17, forward=True,
start=bot_3p.last_domain().end, end=bot_5p.first_domain().start)
design.insert_domain(bot_5p, 0, ss_top)
design.insert_domain(top_5p, 0, ss_bot)
def add_toeholds_for_seam_displacement(design: sc.Design):
for helix in [2, 17]:
staples_5p = design.strands_starting_on_helix(helix)
# remove scaffold
staples_5p = [st for st in staples_5p if len(st.domains) <= 3]
staples_5p.sort(key=lambda stap: stap.offset_5p())
for stap_5p in staples_5p:
toe_ss = sc.Domain(helix=1 if helix == 2 else 18, forward=helix == 2,
start=stap_5p.first_bound_domain().start,
end=stap_5p.first_bound_domain().end)
design.insert_domain(stap_5p, 0, toe_ss)
def add_adapters(design):
# left adapters
left_inside_seed = 48
left_outside_seed = left_inside_seed - 26
for bot_helix in range(2, 18, 2):
top_helix = bot_helix - 1 if bot_helix != 2 else 17
dom_top = sc.Domain(helix=top_helix, forward=True,
start=left_outside_seed, end=left_inside_seed)
dom_bot = sc.Domain(helix=bot_helix, forward=False,
start=left_outside_seed, end=left_inside_seed)
idt = sc.IDTFields(name=f'adap-left-{top_helix}-{bot_helix}',
scale='25nm', purification='STD')
adapter = sc.Strand(domains=[dom_bot, dom_top], idt=idt)
design.add_strand(adapter)
# right adapters
right_inside_seed = 464
right_outside_seed = right_inside_seed + 26
for bot_helix in range(2, 18, 2):
top_helix = bot_helix - 1 if bot_helix != 2 else 17
dom_top = sc.Domain(helix=top_helix, forward=True,
start=right_inside_seed, end=right_outside_seed)
dom_bot = sc.Domain(helix=bot_helix, forward=False,
start=right_inside_seed, end=right_outside_seed)
idt = sc.IDTFields(name=f'adap-right-{top_helix}-{bot_helix}',
scale='25nm', purification='STD')
adapter = sc.Strand(domains=[dom_top, dom_bot], idt=idt)
design.add_strand(adapter)
seq_lines = """tile1rot0,ACCAAGAACT TTGTCAACAAT AAACAAATCCA ATCTTTCCGT,25nm,STD
tile2rot0,TTGTCTAGAGT TTGGGATGTT AGTTCTTGGT ATTGTTGACAA,25nm,STD
tile3rot0,TTATCCACGT TTCCTCCTATT ACTCTAGACAA AACATCCCAA,25nm,STD
tile4rot0,AAGGAAGTAGA TTCGAAAGGT ACGTGGATAA AATAGGAGGAA,25nm,STD
tile5rot0,AACCTCGAAT TACCAGATTCT TCTACTTCCTT ACCTTTCGAA,25nm,STD
tile6rot0,AGAATAGTCGT TTGTCAGTGT ATTCGAGGTT AGAATCTGGTA,25nm,STD
tile7rot0,ATCTGCTCAT TCTGATCTCTT ACGACTATTCT ACACTGACAA,25nm,STD
tile8rot0,AATGGATAGGT AGGTGTCTTT ATGAGCAGAT AAGAGATCAGA,25nm,STD
tile9rot0,TCAAGTTCCA TATCCTTAGCA ACCTATCCATT AAAGACACCT,25nm,STD
tile10rot0,AGTGATGATCT TTTAGGCTGT TGGAACTTGA TGCTAAGGATA,25nm,STD
tile11rot0,ACCCATTCAT TTCCTGATACT AGATCATCACT ACAGCCTAAA,25nm,STD
tile12rot0,TGCGTTAAAAT AGATGCGTAT ATGAATGGGT AGTATCAGGAA,25nm,STD
tile13rot0,AACCTTCACA ATCGTCTCATA ATTTTAACGCA ATACGCATCT,25nm,STD
tile14rot0,ATTCAGAGAGT TGGCATGATA TGTGAAGGTT TATGAGACGAT,25nm,STD
tile15rot0,TACCATGCTT TTGACCAATTT ACTCTCTGAAT TATCATGCCA,25nm,STD
tile16rot0,TGGATTTGTTT ACGGAAAGAT AAGCATGGTA AAATTGGTCAA,25nm,STD""".split('\n')
tile_dna_seqs = [''.join(line.split(',')[1]) for line_no, line in enumerate(seq_lines) if line_no % 2 == 1]
# print(tile_dna_seqs)
def add_tiles_and_assign_dna(design):
# left tiles
left_left = 11
left_right = 32
for top_helix, seq in zip(range(2, 18, 2), tile_dna_seqs):
bot_helix = top_helix + 1
ss_top = sc.Domain(helix=top_helix, forward=True,
start=left_left, end=left_right)
ss_bot = sc.Domain(helix=bot_helix, forward=False,
start=left_left, end=left_right)
idt = sc.IDTFields(name=f'tile-left-{top_helix}-{bot_helix}',
scale='25nm', purification='STD')
tile = sc.Strand(domains=[ss_bot, ss_top], color=sc.Color(0, 0, 0), idt=idt)
design.add_strand(tile)
design.assign_dna(tile, seq)
# right tiles
right_left = 480
right_right = 501
for top_helix, seq in zip(range(2, 18, 2), tile_dna_seqs):
bot_helix = top_helix + 1
ss_top = sc.Domain(helix=top_helix, forward=True,
start=right_left, end=right_right)
ss_bot = sc.Domain(helix=bot_helix, forward=False,
start=right_left, end=right_right)
idt = sc.IDTFields(name=f'tile-right-{top_helix}-{bot_helix}',
scale='25nm', purification='STD')
tile = sc.Strand(domains=[ss_bot, ss_top], color=sc.Color(0, 0, 0), idt=idt)
design.add_strand(tile)
design.assign_dna(tile, seq)
def add_angle_inducing_insertions_deletions(design):
# insertion followed by deletion
start = 59
end = start + (32 * 12)
for helix in [3, 7, 9, 13, 15]:
for offset in range(start, end, 32):
design.add_insertion(helix, offset, 1)
design.add_deletion(helix, offset + 16)
# deletion followed by insertion
for helix in [4, 6, 10, 12, 16]:
for offset in range(start, end, 32):
design.add_deletion(helix, offset)
design.add_insertion(helix, offset + 16, 1)
uz_toes_wc = """
CACCCCAC
CTTTCCTT
TTCACTAA
ACCAACCC
TCTCTTAA
CTTTCATA
ATAATAAA
AACTCACC
ACTTAATA
CAAATCAC
ACCATCCA
TACTCTAT
ATACCTTC
TTATTCAT
ATCCACAA
ATATTTTT
CCACCTAA
CTAAATTA
ATTACCCC
CACTAACA
ACACACTT
TTTTAATC
ACATTTAA
TCCACATC
CCTACCTT
TCCCTATA
""".split()
# above is in order from right to left on helix 1, followed by left to right on helix 18
def assign_dna_to_unzipper_toeholds(design):
uz_toes = [sc.wc(seq) for seq in uz_toes_wc]
strands_h1 = design.strands_starting_on_helix(1)
strands_h1.sort(key=lambda _strand: _strand.first_domain().offset_5p())
strands_h1.reverse()
strands_h18 = design.strands_starting_on_helix(18)
strands_h18.sort(key=lambda _strand: _strand.first_domain().offset_5p())
for strand, toe in zip(strands_h1 + strands_h18, uz_toes):
seq = toe + sc.DNA_base_wildcard * (strand.dna_length() - 8)
design.assign_dna(strand, seq)
if __name__ == '__main__':
the_design = create_design()
the_design.write_scadnano_file(directory='output_designs')
the_design.write_idt_bulk_input_file(directory='idt')
# the_design.write_idt_plate_excel_file(directory='idt', export_non_modified_strand_version=True)
|
import numpy as np
import pdb
from tensorflow import flags
from copy import deepcopy
FLAGS = flags.FLAGS
def euclidean_proj_simplex(v, s=1):
assert s > 0, "Radius s must be strictly positive (%d <= 0)" % s
n, = v.shape # will raise ValueError if v is not 1-D
# check if we are already on the simplex
if v.sum() == s and np.alltrue(v >= 0):
# best projection: itself!
return v
# get the array of cumulative sums of a sorted (decreasing) copy of v
u = np.sort(v)[::-1]
cssv = np.cumsum(u)
# get the number of > 0 components of the optimal solution
rho = np.nonzero(u * np.arange(1, n+1) > (cssv - s))[0][-1]
# compute the Lagrange multiplier associated to the simplex constraint
theta = (cssv[rho] - s) / (rho + 1.0)
# compute the projection by thresholding v using theta
w = (v - theta).clip(min=0)
return w
def optimize_q(c, sq_norms):
'''Return q propto sqrt(sq_norms / c)'''
c_diffs = np.array([c[i] - (0. if i < 1 else c[i-1]) for i in range(len(c))])
c_diffs[np.logical_not(np.isfinite(c_diffs))] = 0.0
#sq_norms[not np.isfinite(sq_norms)]
p_j_geq_i_unnormalized = np.sqrt(sq_norms / (c_diffs+1e-14))
for i in range(len(p_j_geq_i_unnormalized)-1):
if p_j_geq_i_unnormalized[-(i+2)] < p_j_geq_i_unnormalized[-i+1]:
p_j_geq_i_unnormalized[-(i+2)] = p_j_geq_i_unnormalized[-i+1]
q_unnormalized = np.array(
[p - (0. if i+1 == len(p_j_geq_i_unnormalized)
else p_j_geq_i_unnormalized[i+1])
for i, p in enumerate(p_j_geq_i_unnormalized)
])
q_unnormalized = np.maximum(0., q_unnormalized)
q = np.array(q_unnormalized) / sum(q_unnormalized)
return q
def compute_and_variance(q, c, sq_norms):
weights = [1/(q[i:].sum()) for i in range(len(q))]
wn = np.array(weights) * sq_norms
wn_sum = np.cumsum(wn)
expected_variance = (q*wn_sum).sum()
expected_compute = (q*c).sum()
return expected_compute, expected_variance
def get_sq_norm_seq(sq_norms_matrix, idxs):
sq_norms = []
for i in range(len(idxs)):
idx1 = 0 if i < 1 else idxs[i-1] + 1
idx2 = idxs[i]
v = sq_norms_matrix[idx1, idx2]
sq_norms.append(v)
return np.array(sq_norms)
def get_c_seq(c, idxs):
c_seq = []
try:
compute_penalty = FLAGS.partial_update or FLAGS.compute_penalty
except Exception as e:
compute_penalty = False
if compute_penalty:
for i in range(len(idxs)):
c_seq.append(sum([c[idxs[j]] for j in range(0, i+1)]))
else:
c_seq = [c[i] for i in idxs]
return np.array(c_seq)
def cost(sq_norm_matrix, c, idxs, return_cv=False):
sq_norms = get_sq_norm_seq(sq_norm_matrix, idxs)
c = get_c_seq(c, idxs)
q = optimize_q(c, sq_norms)
cval, vval = compute_and_variance(q, c, sq_norms)
costval = cval * (vval ** FLAGS.variance_weight)
if not np.isfinite(costval):
pdb.set_trace()
if return_cv:
return costval, cval, vval
else:
return costval
def get_q(sq_norm_matrix, c, idxs):
sq_norms = get_sq_norm_seq(sq_norm_matrix, idxs)
c = get_c_seq(c, idxs)
return optimize_q(c, sq_norms)
def optimize_remove(sq_norm_matrix, c, idxs, verbose=False):
idxs = deepcopy(idxs)
baseline = cost(sq_norm_matrix, c, idxs)
converged = False
while not converged and len(idxs) > 1:
if verbose:
print("Not yet converged")
converged = True
i = len(idxs)-2
while i >= 0:
# Try eliminating every intermediate value
idxs_minus_i = idxs[:i] + idxs[i+1:]
cost_minus_i = cost(sq_norm_matrix, c, idxs_minus_i)
if cost_minus_i < baseline:
if verbose:
print("{}, trial cost {} under baseline {}".format(
i, cost_minus_i, baseline))
print("removing idx {}, remaining {}".format(idxs[i], idxs_minus_i))
baseline = cost_minus_i
idxs = idxs_minus_i
converged = False
break
else:
if verbose:
print("{}, trial cost {} not under baseline {}".format(
i, cost_minus_i, baseline))
i -= 1
q = get_q(sq_norm_matrix, c, idxs)
if verbose:
print("Converged. Final idxs: {}. Final ps: {}".format(idxs, q))
return idxs, q
def idxs_from_negative(negative_idxs, idxs):
return [i for i in idxs if i not in negative_idxs]
def optimize_add(sq_norm_matrix, c, idxs, verbose=False, logger=None):
idxs = deepcopy(idxs)
negative_idxs = idxs[:-1]
baseline = cost(sq_norm_matrix, c, idxs_from_negative(negative_idxs, idxs))
converged = False
while not converged and len(negative_idxs) > 0:
if verbose:
print("Not yet converged")
converged = True
i = 0
while i <= len(negative_idxs)-1:
idxs_minus_i = negative_idxs[:i] + negative_idxs[i+1:]
cost_minus_i = cost(sq_norm_matrix, c,
idxs_from_negative(idxs_minus_i, idxs))
if cost_minus_i < baseline:
if verbose:
print("{}, trial cost {} under baseline {}".format(
i, cost_minus_i, baseline))
print("adding idx {}, giving {}".format(
negative_idxs[i],
idxs_from_negative(idxs_minus_i, idxs)))
baseline = cost_minus_i
negative_idxs = idxs_minus_i
converged = False
break
else:
if verbose:
print("{}, trial cost {} not under baseline {}".format(
i, cost_minus_i, baseline))
i += 1
idxs = idxs_from_negative(negative_idxs, idxs)
q = get_q(sq_norm_matrix, c, idxs)
if verbose:
print("Converged. Final idxs: {}. Final ps: {}".format(idxs, q))
return idxs, q
def optimize_greedy_roulette(sq_norm_matrix, c,
idxs, verbose=False, logger=None):
'''Greedily optimize a RT sampler.
Args:
sq_norm_matrix: N+1 x N array
entries [0, j]: sq norm of g_j
entries [i+1, j]: sq norm of g_j - g_i
idxs: all remaining nodes under consideration
'''
# Try greedily optimizing idxs by starting with all and removing
base_cost, base_c, base_v = cost(sq_norm_matrix, c, [idxs[-1]],
return_cv=True)
try:
force_all_idxs = FLAGS.force_all_idxs
except Exception as e:
force_all_idxs = False
if force_all_idxs:
if verbose:
print("Forcing using all idxs")
q = get_q(sq_norm_matrix, c, idxs)
else:
idxs_remove, q_remove = optimize_remove(sq_norm_matrix, c, idxs, verbose)
idxs_add, q_add = optimize_add(sq_norm_matrix, c, idxs, verbose)
cost_remove = cost(sq_norm_matrix, c, idxs_remove)
cost_add = cost(sq_norm_matrix, c, idxs_add)
if cost_remove < cost_add:
if verbose:
print("Greedy remove cost {} < greedy add cost {}.".format(
cost_remove, cost_add))
print("Returning greedy remove idxs {} instead of greedy add "
"idxs {}".format(idxs_remove, idxs_add))
idxs = idxs_remove
q = q_remove
else:
if verbose:
print("Greedy add cost {} <= greedy remove cost {}.".format(
cost_add, cost_remove))
print("Returning greedy add idxs {} instead of greedy remove "
"idxs {}".format(idxs_add, idxs_remove))
idxs = idxs_add
q = q_add
costval, cval, vval = cost(sq_norm_matrix, c, idxs, return_cv=True)
if logger:
logger.info(
"Optimized RT. idxs: {}. q: {}".format(idxs, q))
logger.info("RT estimator has cost " +
"{:.2f}, compute: {:.2f}, variance: {:.2f}".format(
costval, cval, vval
))
logger.info("Deterministic estimator has " +
"cost: {:.2f}, compute {:.2f}, variance {:.2f}".format(
base_cost, base_c, base_v
))
logger.info("Change factors are " +
"cost {:.2f}, compute {:.2f}, variance {:.2f}.".format(
costval/base_cost, cval/base_c, vval/base_v
))
return idxs, q
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
from setuptools import find_packages, setup
def get_version(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
with open(os.path.join(package, "__init__.py")) as f:
return re.search("__version__ = ['\"]([^'\"]+)['\"]", f.read()).group(1)
def get_long_description():
"""
Return the README.
"""
with open("README.md", encoding="utf8") as f:
return f.read()
INSTALL_REQUIRES = [
"loguru>=0.5.0",
"confluent-kafka>=1.6.0",
"starlette>=0.16.0",
"pydantic>=1.0.0",
"PyYAML>=5.4.*",
"aiohttp>=3.7.0",
"janus>=1.0.0",
"blinker>=1.4",
"sentry-sdk>=1.5.0",
]
setup(
name="eventbus",
version=get_version("eventbus"),
author="Benn Ma",
author_email="bennmsg@gmail.com",
description="A reliable event/message hub for boosting Event-Driven architecture & big data ingestion.",
long_description=get_long_description(),
long_description_content_type="text/markdown",
python_requires=">=3.7.0",
url="https://github.com/thenetcircle/event-bus-3",
project_urls={
"Bug Tracker": "https://github.com/thenetcircle/event-bus-3/issues",
},
license="Apache License, Version 2.0",
packages=find_packages(include=("eventbus*",)),
install_requires=INSTALL_REQUIRES,
keywords=("event-bus eventbus eventhub messagehub event-driven micro-services"),
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
],
)
|
file_to_work = open("noman.txt", "r")
content = file_to_work.read()
print(content)
file_to_work.close()
print("\n\n\n\n\n")
file_to_work = open("noman.txt", "r")
just_one_character = file_to_work.read(1)
print(just_one_character)
remaining_four_characters = file_to_work.read(4)
print(remaining_four_characters)
rest_of_the_file = file_to_work.read()
print(rest_of_the_file)
file_to_work.close()
|
from abc import ABC
from ...batch.lazy.lazy_evaluation import PipeLazyEvaluationConsumer, LazyEvaluation
from ...common.types.bases.j_obj_wrapper import JavaObjectWrapper
from ...common.types.conversion.type_converters import j_value_to_py_value
from ...common.utils.printing import print_with_title
from ...py4j_util import get_java_class
class HasLazyPrintTrainInfo(JavaObjectWrapper, ABC):
def enableLazyPrintTrainInfo(self, title=None):
self.get_j_obj().enableLazyPrintTrainInfo(title)
j_pipeline_lazy_printer_cls = get_java_class("com.alibaba.alink.common.lazy.PipelineLazyCallbackUtils")
j_lazy_train_info = LazyEvaluation()
py_lazy_train_info = j_lazy_train_info.transform(j_value_to_py_value)
py_lazy_train_info.addCallback(lambda d: print_with_title(d, title))
j_consumer = PipeLazyEvaluationConsumer(j_lazy_train_info)
j_array_list_cls = get_java_class("java.util.ArrayList")
j_consumer_list = j_array_list_cls()
j_consumer_list.add(j_consumer)
j_pipeline_lazy_printer_cls.callbackForTrainerLazyTrainInfo(self.get_j_obj(), j_consumer_list)
return self
|
alex_salon = ['Audi R8 e tron', 'Mercedes Benz gle 400', 'Tesla Model S']
oleg_salon = ['Лада Калина', 'Запорожець']
len_alex_salon = len(alex_salon)
len_oleg_salon = len(oleg_salon)
if len_alex_salon < len_oleg_salon:
print("У Олега машин більше")
else:
print("В Алексія машин більше")
|
#! /usr/bin/env python
"""
------------------------------------------------------------------------------------------------------------------------
____ __ __ __ __ __
/ __ \__ __/ /_/ /_ ____ ____ / / / /__ ____ _____/ /__ _____
____________ / /_/ / / / / __/ __ \/ __ \/ __ \ / /_/ / _ \/ __ `/ __ / _ \/ ___/ ____________
/_____/_____/ / ____/ /_/ / /_/ / / / /_/ / / / / / __ / __/ /_/ / /_/ / __/ / /_____/_____/
/_/ \__, /\__/_/ /_/\____/_/ /_/ /_/ /_/\___/\__,_/\__,_/\___/_/
/____/
------------------------------------------------------------------------------------------------------------------------
:FILE: DjangoBackend-ReactFrontend/CoreRoot/settings.py
:AUTHOR: Nathan E White, PhD
:ABOUT: Sets a variety of project and site settings consumed throughout the Django project
------------------------------------------------------------------------------------------------------------------------
:NOTES: For more information on this file, see:
https://docs.djangoproject.com/en/3.1/topics/settings/
For a full list of settings in Django applications:
https://docs.djangoproject.com/en/3.1/ref/settings/
For a list of 'quick 'n' dirty' settings suitable for dev but not production:
https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
------------------------------------------------------------------------------------------------------------------------
"""
# <BOF>
# Imports --- Python STL Imports: For manipulating filesystem paths
from pathlib import Path
# Imports --- Python STL Imports: For importing environmental variables
# noinspection PyUnresolvedReferences
import os
# ----------------------------------------------------------------------------------------------------------------------
# Application definitions
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# TODO: NEVER LEAVE THIS IN PRODUCTION CODE -- PUT IT IN AN ENVIRONMENTAL VARIABLE
# SECRET_KEY = os.getenv(key = 'DJANGO_KEY')
SECRET_KEY = 'qkl+xdr8aimpf-&x(mi7)dwt^-q77aji#j*d#02-5usa32r9!y'
# TODO: NEVER SET TO TRUE IN A LIVE WEBSITE -- YOU WILL BE HACKED PRONTO
# noinspection DjangoDebugModeSettings
DEBUG = True
ALLOWED_HOSTS = []
DEFAULT_AUTO_FIELD = 'django.db.models.AutoField'
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'core',
'core.user'
]
AUTH_USER_MODEL = 'core_user.User'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'CoreRoot.urls'
# ----------------------------------------------------------------------------------------------------------------------
# Templates
# https://docs.djangoproject.com/en/3.2/topics/templates/
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# ----------------------------------------------------------------------------------------------------------------------
# WSGI: (Python) Web Service Gateway Interface
# https://wsgi.readthedocs.io/en/latest/
# https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
WSGI_APPLICATION = 'CoreRoot.wsgi.application'
# ----------------------------------------------------------------------------------------------------------------------
# Database Settings
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# ----------------------------------------------------------------------------------------------------------------------
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# ----------------------------------------------------------------------------------------------------------------------
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# ----------------------------------------------------------------------------------------------------------------------
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
# ----------------------------------------------------------------------------------------------------------------------
# REST Framework Definitions
# https://www.django-rest-framework.org/
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_simplejwt.authentication.JWTAuthentication',
),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
)
}
# ----------------------------------------------------------------------------------------------------------------------
# CORS Headers
# https://www.stackhawk.com/blog/django-cors-guide/
CORS_ALLOWED_ORIGINS = [
"http://localhost:3000",
"http://127.0.0.1:3000",
"http://localhost:8000",
"http://127.0.0.1:8000",
]
# ----------------------------------------------------------------------------------------------------------------------
# <EOF>
|
#!/usr/bin/env python
'''
Created on 25 Apr 2021
This file contains functions to get coordinates for animations of
lightcones in causal sets.
@author: Christoph Minz
@license: BSD 3-Clause
'''
from __future__ import annotations
from typing import List, Tuple
import numpy as np
from math import sqrt
default_eps: float = 0.001
def move_centre(C: List[List[float]]) -> List[List[float]]:
A: np.ndarray = np.array(C)
Amin = np.min(A, axis=0)
Amax = np.max(A, axis=0)
Acen = (Amax + Amin) / 2
for i in range(A.shape[0]):
C[i] = (A[i, :] - Acen).tolist()
return C
def get_1simplex(edge: float, eps: float = default_eps, spacetime: str = '') -> \
Tuple[str, List[int], List[List[float]]]:
'''
Returns the name, event permutation (for Hasse diagrams), and event
coordinates of a 1-simplex.
The first argument set the size, the optional second argument sets
a small time offset for each layer.
'''
C: List[List[float]]
s: float = edge / 2
t: float = 0.75 * s * (1 + eps)
if spacetime in {'black hole', 'Schwarzschild'}:
C = [[-t, 0.47], [-t, 0.87], [t, 0.25]]
else:
C = [[-t, -s], [-t, s], [t, 0.]]
return ('1-simplex', [2, 1, 3], C)
def get_2simplex(edge: float, eps: float = default_eps, spacetime: str = '') -> \
Tuple[str, List[int], List[List[float]]]:
'''
Returns the name, event permutation (for Hasse diagrams), and event
coordinates of a 2-simplex.
The first argument set the size, the optional second argument sets
a small time offset for each layer.
'''
P: List[int] = [4, 2, 6, 1, 5, 3, 7]
r: float = edge / sqrt(3.) # radius (from vertex to center)
r_h: float = r / 2 # radius half
a: float = edge / 2
tmax: float = a + r_h
t: np.ndarray
if spacetime in {'black hole', 'Schwarzschild'}:
raise ValueError('2D black hole spacetime not supported')
elif spacetime == 'de Sitter':
t = (np.array([0, 1.5 * a, 1.5 * tmax]) - 0.75 * tmax) * (1 + eps)
else:
t = (np.array([0, a, tmax]) - tmax / 2) * (1 + eps)
C: List[List[float]] = [[t[0], -a, -r_h],
[t[0], a, -r_h],
[t[1], 0., -r_h],
[t[0], 0., r],
[t[1], -a / 2, r_h / 2],
[t[1], a / 2, r_h / 2],
[t[2], 0., 0.]]
return ('2-simplex', P, C)
def get_2simplexFlipped(edge: float, eps: float = default_eps, spacetime: str = '') -> \
Tuple[str, List[int], List[List[float]]]:
'''
Returns the name, event permutation (for Hasse diagrams), and event
coordinates of a 2-simplex with one edge flipped.
The first argument set the size, the optional second argument sets
a small time offset for each layer.
'''
P: List[int] = [1, 5, 3, 6, 2, 4]
r: float = edge / sqrt(3.) # radius (from vertex to center)
r_h: float = r / 2 # radius half
a: float = edge / 2
t: np.ndarray
if spacetime in {'black hole', 'Schwarzschild'}:
raise ValueError('2D black hole spacetime not supported')
elif spacetime == 'de Sitter':
t = np.array([-1.5 * a, 0, 1.5 * a]) * (1 + eps)
else:
t = np.array([-a, 0, a]) * (1 + eps)
C: List[List[float]] = [[t[0], 0., -r_h],
[t[1], -a, -r_h],
[t[1], 0., r],
[t[2], -a / 2, r_h / 2],
[t[1], a, -r_h],
[t[2], a / 2, r_h / 2]]
return ('2-simplex with one edge flipped', P, C)
def get_2simplexFlipped2(edge: float, eps: float = default_eps, spacetime: str = '') -> \
Tuple[str, List[int], List[List[float]]]:
'''
Returns the name, event permutation (for Hasse diagrams), and event
coordinates of a 2-simplex with two edges flipped.
The first argument set the size, the optional second argument sets
a small time offset for each layer.
'''
P: List[int] = [3, 5, 1, 4, 2, 6]
r: float = edge / sqrt(3.) # radius (from vertex to center)
r_h: float = r / 2 # radius half
a: float = edge / 2
t: np.ndarray
if spacetime in {'black hole', 'Schwarzschild'}:
raise ValueError('2D black hole spacetime not supported')
elif spacetime == 'de Sitter':
t = np.array([-1.5 * a, 0, 1.5 * a]) * (1 + eps)
else:
t = np.array([-a, 0, a]) * (1 + eps)
C: List[List[float]] = [[t[0], -a / 2, r_h / 2],
[t[1], -a, -r_h],
[t[0], a / 2, r_h / 2],
[t[1], 0., r],
[t[1], a, -r_h],
[t[2], 0., -r_h]]
return ('2-simplex with two edges flipped', P, C)
def get_2simplexRotated3(edge: float, s: int = 1, eps: float = default_eps,
spacetime: str = '') -> \
Tuple[str, List[int], List[List[float]]]:
'''
Returns the name, event permutation (for Hasse diagrams), and event
coordinates of a 2-simplex rotating right/left-handed (3 steps).
The rotation is right-handed if s == 1, it is left-handed if s == -1.
The first argument set the size, the optional second argument sets
a small time offset for each layer.
'''
P: List[int] = [6, 3, 5, 9, 7, 1, 8, 2, 4]
r: float = edge / sqrt(3.) # radius (from vertex to center)
r_h: float = r / 2 # radius half
a: float = edge / 2
tstep: float = 2. * r / (sqrt(6.) + sqrt(2.))
t: np.ndarray
if spacetime in {'black hole', 'Schwarzschild'}:
raise ValueError('2D black hole spacetime not supported')
elif spacetime == 'de Sitter':
t = np.array([-1.5, 0.0, 1.5]) * tstep * (1 + eps)
else:
t = np.array([-1.0, 0.0, 1.0]) * tstep * (1 + eps)
C: List[List[float]] = [[t[0], -s * a, -r_h],
[t[1], -s * r_h, -a],
[t[0], s * a, -r_h],
[t[2], 0., -r],
[t[1], s * r, 0.],
[t[0], 0., r],
[t[1], -s * r_h, a],
[t[2], -s * a, r_h],
[t[2], s * a, r_h]]
orientation: str = 'right' if s == 1 else 'left'
return ('2-simplex rotating \n' + orientation + '-handed (3 steps)', P, C)
def get_2simplexRotated5(edge: float, s: int = 1, eps: float = default_eps,
spacetime: str = '') -> \
Tuple[str, List[int], List[List[float]]]:
'''
Returns the name, event permutation (for Hasse diagrams), and event
coordinates of a 2-simplex rotating right/left-handed (5 steps).
The rotation is right-handed if s == 1, it is left-handed if s == -1.
The first argument set the size, the optional second argument sets
a small time offset for each layer.
'''
P: List[int] = [7, 10, 3, 1, 12, 5, 2, 14, 8, 11, 4, 15, 13, 6, 9]
r: float = edge / sqrt(3.) # radius (from vertex to center)
r_h: float = r / 2 # radius half
a: float = edge / 2
tstep: float = 2. * r / (sqrt(6.) + sqrt(2.))
t: np.ndarray
if spacetime in {'black hole', 'Schwarzschild'}:
raise ValueError('2D black hole spacetime not supported')
elif spacetime == 'de Sitter':
t = np.array([-3.0, -1.5, 0.0, 1.5, 3.0]) * tstep * (1 + eps)
else:
t = np.array([-2.0, -1.0, 0.0, 1.0, 2.0]) * tstep * (1 + eps)
C: List[List[float]] = [[t[0], -s * a, -r_h],
[t[1], -s * r_h, -a],
[t[0], s * a, -r_h],
[t[2], 0., -r],
[t[1], s * r, 0.],
[t[3], s * r_h, -a],
[t[0], 0., r],
[t[2], s * a, r_h],
[t[4], s * a, -r_h],
[t[1], -s * r_h, a],
[t[3], s * r_h, a],
[t[2], -s * a, r_h],
[t[4], 0., r],
[t[3], -s * r, 0.],
[t[4], -s * a, -r_h]]
orientation: str = 'right' if s == 1 else 'left'
return ('2-simplex rotating \n' + orientation + '-handed (5 steps)', P, C)
def get_3simplex(edge: float, eps: float = default_eps, spacetime: str = '') -> \
Tuple[str, List[int], List[List[float]]]:
'''
Returns the name, event permutation (for Hasse diagrams), and event
coordinates of a 3-simplex.
The first argument set the size, the optional second argument sets
a small time offset for each layer.
'''
if spacetime != 'Minkowski' and spacetime != '':
raise ValueError('Spacetime not supported')
P: List[int] = [6, 4, 12, 2, 10, 1, 8, 7, 14, 5, 13, 3, 11, 9, 15]
r: float = sqrt(3. / 8.) * edge # radius (from vertex to center)
a: float = edge / 2
b: float = a / sqrt(3.) # from edge center to face center
c: float = edge / sqrt(24.) # from face center to center
tmax: float = a + b + c
t: np.ndarray = (np.array([0, a, a + b, a + b + c]) - tmax / 2) * (1 + eps)
# vertices:
v1: np.ndarray = np.array([0., -a, -b, -2 * c])
v2: np.ndarray = np.array([0., a, -b, -2 * c])
v3: np.ndarray = np.array([0., 0., 2. * b, -2 * c])
v4: np.ndarray = np.array([0., 0., 0., r - c])
# coordinates:
C: np.ndarray = np.array([v1,
v2,
(v1 + v2) / 2,
v3,
(v1 + v3) / 2,
v4,
(v1 + v4) / 2,
(v2 + v3) / 2,
(v1 + v2 + v3) / 3,
(v2 + v4) / 2,
(v1 + v2 + v4) / 3,
(v3 + v4) / 2,
(v1 + v3 + v4) / 3,
(v2 + v3 + v4) / 3,
(v1 + v2 + v3 + v4) / 4])
C[:, 0] = [t[0], t[0], t[1], t[0], t[1], t[0], t[1],
t[1], t[2], t[1], t[2], t[1], t[2], t[2], t[3]]
return ('3-simplex', P, C.tolist())
def get_latticeD2(edge: float, eps: float = default_eps, spacetime: str = '') -> \
Tuple[str, List[int], List[List[float]]]:
'''
Returns the name, event permutation (for Hasse diagrams), and event
coordinates of a 2D-lattice.
The first argument set the size, the optional second argument sets
a small time offset for each layer.
'''
if spacetime != 'Minkowski' and spacetime != '':
raise ValueError('Spacetime not supported')
P: List[int] = [1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15, 4, 8, 12, 16]
s: float = edge / 2
t: float = s * (1 + eps)
C: List[List[float]] = [[-3 * t, 0 * s], # 1
[-2 * t, -1 * s], # 2
[-1 * t, -2 * s], # 3
[0 * t, -3 * s], # 4
[-2 * t, 1 * s], # 5
[-1 * t, 0 * s], # 6
[0 * t, -1 * s], # 7
[1 * t, -2 * s], # 8
[-1 * t, 2 * s], # 9
[0 * t, 1 * s], # 10
[1 * t, 0 * s], # 11
[2 * t, -1 * s], # 12
[0 * t, 3 * s], # 13
[1 * t, 2 * s], # 14
[2 * t, 1 * s], # 15
[3 * t, 0 * s]] # 16
return ('2D-lattice', P, C)
def get_latticeD3_oct(edge: float, eps: float = default_eps, spacetime: str = '') -> \
Tuple[str, List[int], List[List[float]]]:
'''
Returns the name, event permutation (for Hasse diagrams), and event
coordinates of a 3D-lattice of octahedrons.
The first argument set the size, the optional second argument sets
a small time offset for each layer.
'''
if spacetime != 'Minkowski' and spacetime != '':
raise ValueError('Spacetime not supported')
P: List[int] = [1, 13, 29, 41, 8, 23, 38, 5, 20, 35,
17, 33, 2, 14, 30, 42, 11, 27, 26, 9, 24, 39, 6, 21, 36, 19,
18, 34, 3, 15, 31, 43, 12, 28, 10, 25, 40, 7, 22, 37,
4, 16, 32, 44]
s: float = edge / 2
t: float = s * (1 + eps)
C: List[List[float]] = [[-3 * t, 0 * s, 0 * s],
[-2 * t, 0 * s, -1 * s],
[-1 * t, 0 * s, -2 * s],
[0 * t, 0 * s, -3 * s],
[-2 * t, -1 * s, 0 * s],
[-1 * t, -1 * s, -1 * s],
[0 * t, -1 * s, -2 * s],
[-2 * t, 1 * s, 0 * s],
[-1 * t, 1 * s, -1 * s],
[0 * t, 1 * s, -2 * s],
[-1 * t, -2 * s, 0 * s],
[0 * t, -2 * s, -1 * s],
[-2 * t, 0 * s, 1 * s],
[-1 * t, 0 * s, 0 * s],
[0 * t, 0 * s, -1 * s],
[1 * t, 0 * s, -2 * s], # end part 1
[-1 * t, 2 * s, 0 * s],
[0 * t, 2 * s, -1 * s],
[0 * t, -3 * s, 0 * s],
[-1 * t, -1 * s, 1 * s],
[0 * t, -1 * s, 0 * s],
[1 * t, -1 * s, -1 * s],
[-1 * t, 1 * s, 1 * s],
[0 * t, 1 * s, 0 * s],
[1 * t, 1 * s, -1 * s],
[0 * t, 3 * s, 0 * s],
[0 * t, -2 * s, 1 * s],
[1 * t, -2 * s, 0 * s], # end part 2
[-1 * t, 0 * s, 2 * s],
[0 * t, 0 * s, 1 * s],
[1 * t, 0 * s, 0 * s],
[2 * t, 0 * s, -1 * s],
[0 * t, 2 * s, 1 * s],
[1 * t, 2 * s, 0 * s],
[0 * t, -1 * s, 2 * s],
[1 * t, -1 * s, 1 * s],
[2 * t, -1 * s, 0 * s],
[0 * t, 1 * s, 2 * s],
[1 * t, 1 * s, 1 * s],
[2 * t, 1 * s, 0 * s],
[0 * t, 0 * s, 3 * s],
[1 * t, 0 * s, 2 * s],
[2 * t, 0 * s, 1 * s],
[3 * t, 0 * s, 0 * s]] # end part 3
return ('3D-lattice of octahedrons', P, C)
def get_latticeD4_oct(edge: float, eps: float = default_eps, spacetime: str = '') -> \
Tuple[str, List[int], List[List[float]]]:
'''
Returns the name, event permutation (for Hasse diagrams), and event
coordinates of a 4D-lattice of octahedrons.
The first argument set the size, the optional second argument sets
a small time offset for each layer.
'''
if spacetime != 'Minkowski' and spacetime != '':
raise ValueError('Spacetime not supported')
P: List[int] = []
s: float = edge / 2
t: float = s * (1 + eps)
C: List[List[float]] = [[-3 * t, 0 * s, 0 * s, 0 * s],
[-2 * t, -1 * s, 0 * s, 0 * s],
[-2 * t, 0 * s, -1 * s, 0 * s],
[-2 * t, 0 * s, 0 * s, -1 * s],
[-2 * t, 0 * s, 0 * s, 1 * s],
[-2 * t, 0 * s, 1 * s, 0 * s],
[-2 * t, 1 * s, 0 * s, 0 * s],
[-1 * t, -2 * s, 0 * s, 0 * s],
[-1 * t, -1 * s, -1 * s, 0 * s],
[-1 * t, -1 * s, 0 * s, -1 * s],
[-1 * t, -1 * s, 0 * s, 1 * s],
[-1 * t, -1 * s, 1 * s, 0 * s],
[-1 * t, 0 * s, -2 * s, 0 * s],
[-1 * t, 0 * s, -1 * s, -1 * s],
[-1 * t, 0 * s, -1 * s, 1 * s],
[-1 * t, 0 * s, 0 * s, -2 * s],
[-1 * t, 0 * s, 0 * s, 0 * s],
[-1 * t, 0 * s, 0 * s, 2 * s],
[-1 * t, 0 * s, 1 * s, -1 * s],
[-1 * t, 0 * s, 1 * s, 1 * s],
[-1 * t, 0 * s, 2 * s, 0 * s],
[-1 * t, 1 * s, -1 * s, 0 * s],
[-1 * t, 1 * s, 0 * s, -1 * s],
[-1 * t, 1 * s, 0 * s, 1 * s],
[-1 * t, 1 * s, 1 * s, 0 * s],
[-1 * t, 2 * s, 0 * s, 0 * s],
[0 * t, -3 * s, 0 * s, 0 * s],
[0 * t, -2 * s, 0 * s, -1 * s],
[0 * t, -1 * s, 0 * s, -2 * s],
[0 * t, 0 * s, 0 * s, -3 * s],
[0 * t, -2 * s, -1 * s, 0 * s],
[0 * t, -1 * s, -1 * s, -1 * s],
[0 * t, 0 * s, -1 * s, -2 * s],
[0 * t, -2 * s, 1 * s, 0 * s],
[0 * t, -1 * s, 1 * s, -1 * s],
[0 * t, 0 * s, 1 * s, -2 * s],
[0 * t, -1 * s, -2 * s, 0 * s],
[0 * t, 0 * s, -2 * s, -1 * s],
[0 * t, -2 * s, 0 * s, 1 * s],
[0 * t, -1 * s, 0 * s, 0 * s],
[0 * t, 0 * s, 0 * s, -1 * s],
[0 * t, 1 * s, 0 * s, -2 * s],
[0 * t, -1 * s, 2 * s, 0 * s],
[0 * t, 0 * s, 2 * s, -1 * s],
[0 * t, 0 * s, -3 * s, 0 * s],
[0 * t, -1 * s, -1 * s, 1 * s],
[0 * t, 0 * s, -1 * s, 0 * s],
[0 * t, 1 * s, -1 * s, -1 * s],
[0 * t, -1 * s, 1 * s, 1 * s],
[0 * t, 0 * s, 1 * s, 0 * s],
[0 * t, 1 * s, 1 * s, -1 * s],
[0 * t, 0 * s, 3 * s, 0 * s],
[0 * t, 0 * s, -2 * s, 1 * s],
[0 * t, 1 * s, -2 * s, 0 * s],
[0 * t, -1 * s, 0 * s, 2 * s],
[0 * t, 0 * s, 0 * s, 1 * s],
[0 * t, 1 * s, 0 * s, 0 * s],
[0 * t, 2 * s, 0 * s, -1 * s],
[0 * t, 0 * s, 2 * s, 1 * s],
[0 * t, 1 * s, 2 * s, 0 * s],
[0 * t, 0 * s, -1 * s, 2 * s],
[0 * t, 1 * s, -1 * s, 1 * s],
[0 * t, 2 * s, -1 * s, 0 * s],
[0 * t, 0 * s, 1 * s, 2 * s],
[0 * t, 1 * s, 1 * s, 1 * s],
[0 * t, 2 * s, 1 * s, 0 * s],
[0 * t, 0 * s, 0 * s, 3 * s],
[0 * t, 1 * s, 0 * s, 2 * s],
[0 * t, 2 * s, 0 * s, 1 * s],
[0 * t, 3 * s, 0 * s, 0 * s],
[1 * t, -2 * s, 0 * s, 0 * s],
[1 * t, -1 * s, -1 * s, 0 * s],
[1 * t, -1 * s, 0 * s, -1 * s],
[1 * t, -1 * s, 0 * s, 1 * s],
[1 * t, -1 * s, 1 * s, 0 * s],
[1 * t, 0 * s, -2 * s, 0 * s],
[1 * t, 0 * s, -1 * s, -1 * s],
[1 * t, 0 * s, -1 * s, 1 * s],
[1 * t, 0 * s, 0 * s, -2 * s],
[1 * t, 0 * s, 0 * s, 0 * s],
[1 * t, 0 * s, 0 * s, 2 * s],
[1 * t, 0 * s, 1 * s, -1 * s],
[1 * t, 0 * s, 1 * s, 1 * s],
[1 * t, 0 * s, 2 * s, 0 * s],
[1 * t, 1 * s, -1 * s, 0 * s],
[1 * t, 1 * s, 0 * s, -1 * s],
[1 * t, 1 * s, 0 * s, 1 * s],
[1 * t, 1 * s, 1 * s, 0 * s],
[1 * t, 2 * s, 0 * s, 0 * s],
[2 * t, -1 * s, 0 * s, 0 * s],
[2 * t, 0 * s, -1 * s, 0 * s],
[2 * t, 0 * s, 0 * s, -1 * s],
[2 * t, 0 * s, 0 * s, 1 * s],
[2 * t, 0 * s, 1 * s, 0 * s],
[2 * t, 1 * s, 0 * s, 0 * s],
[3 * t, 0 * s, 0 * s, 0 * s]]
return ('4D-lattice of octahedrons', P, C)
def get_latticeD3_oct_cut(edge: float, eps: float = default_eps, spacetime: str = '') -> \
Tuple[str, List[int], List[List[float]]]:
'''
Returns the name, event permutation (for Hasse diagrams), and event
coordinates of a 3D-lattice of octahedrons (diamond cut).
The first argument set the size, the optional second argument sets
a small time offset for each layer.
'''
if spacetime != 'Minkowski' and spacetime != '':
raise ValueError('Spacetime not supported')
P: List[int] = [1, 11, 21, 31, 8, 18, 28, 5, 15, 25,
2, 12, 22, 32, 9, 19, 29, 6, 16, 26,
3, 13, 23, 33, 10, 20, 30, 7, 17, 27,
4, 14, 24, 34]
s: float = edge / 2
t: float = s * (1 + eps)
C: List[List[float]] = [[-3 * t, 0 * s, 0 * s],
[-2 * t, 0 * s, -1 * s],
[-1 * t, 0 * s, -2 * s],
[0 * t, 0 * s, -3 * s],
[-2 * t, -1 * s, 0 * s],
[-1 * t, -1 * s, -1 * s],
[0 * t, -1 * s, -2 * s],
[-2 * t, 1 * s, 0 * s],
[-1 * t, 1 * s, -1 * s],
[0 * t, 1 * s, -2 * s], # end part 1
[-2 * t, 0 * s, 1 * s],
[-1 * t, 0 * s, 0 * s],
[0 * t, 0 * s, -1 * s],
[1 * t, 0 * s, -2 * s],
[-1 * t, -1 * s, 1 * s],
[0 * t, -1 * s, 0 * s],
[1 * t, -1 * s, -1 * s],
[-1 * t, 1 * s, 1 * s],
[0 * t, 1 * s, 0 * s],
[1 * t, 1 * s, -1 * s], # end part 2
[-1 * t, 0 * s, 2 * s],
[0 * t, 0 * s, 1 * s],
[1 * t, 0 * s, 0 * s],
[2 * t, 0 * s, -1 * s],
[0 * t, -1 * s, 2 * s],
[1 * t, -1 * s, 1 * s],
[2 * t, -1 * s, 0 * s],
[0 * t, 1 * s, 2 * s],
[1 * t, 1 * s, 1 * s],
[2 * t, 1 * s, 0 * s], # end part 3
[0 * t, 0 * s, 3 * s],
[1 * t, 0 * s, 2 * s],
[2 * t, 0 * s, 1 * s],
[3 * t, 0 * s, 0 * s]]
return ('3D-lattice of octahedrons', P, move_centre(C))
def get_latticeD3_hcp(edge: float, eps: float = default_eps, spacetime: str = '') -> \
Tuple[str, List[int], List[List[float]]]:
'''
Returns the name, event permutation (for Hasse diagrams), and event
coordinates of a 3D-lattice.
The first argument set the size, the optional second argument sets
a small time offset for each layer.
'''
if spacetime != 'Minkowski' and spacetime != '':
raise ValueError('Spacetime not supported')
P: List[int] = [13, 9, 29, 25, 45, 41, 61, 57, 5, 1, 21, 17, 37, 33, 53, 49,
14, 10, 30, 26, 46, 42, 62, 58, 6, 2, 22, 18, 38, 34, 54, 50,
15, 11, 31, 27, 47, 43, 63, 59, 7, 3, 23, 19, 39, 35, 55, 51,
16, 12, 32, 28, 48, 44, 64, 60, 8, 4, 24, 20, 40, 36, 56, 52]
a: float = edge / 2
r: float = edge / sqrt(3.) # radius (from vertex to center)
t: float = r * (1 + eps)
C: List[List[float]] = [[-3 * t, 0 * a, -2.5 * r],
[-2 * t, -1 * a, -2.0 * r],
[-1 * t, -2 * a, -2.5 * r],
[0 * t, -3 * a, -2.0 * r], # 1
[-3 * t, 0 * a, 0.5 * r],
[-2 * t, -1 * a, 1.0 * r],
[-1 * t, -2 * a, 0.5 * r],
[0 * t, -3 * a, 1.0 * r], # 2
[-3 * t, 1 * a, -1.0 * r],
[-2 * t, 0 * a, -0.5 * r],
[-1 * t, -1 * a, -1.0 * r],
[0 * t, -2 * a, -0.5 * r], # 3
[-3 * t, 1 * a, 2.0 * r],
[-2 * t, 0 * a, 2.5 * r],
[-1 * t, -1 * a, 2.0 * r],
[0 * t, -2 * a, 2.5 * r], # 4
[-2 * t, 1 * a, -2.0 * r],
[-1 * t, 0 * a, -2.5 * r],
[0 * t, -1 * a, -2.0 * r],
[1 * t, -2 * a, -2.5 * r], # 1
[-2 * t, 1 * a, 1.0 * r],
[-1 * t, 0 * a, 0.5 * r],
[0 * t, -1 * a, 1.0 * r],
[1 * t, -2 * a, 0.5 * r], # 2
[-2 * t, 2 * a, -0.5 * r],
[-1 * t, 1 * a, -1.0 * r],
[0 * t, 0 * a, -0.5 * r],
[1 * t, -1 * a, -1.0 * r], # 3
[-2 * t, 2 * a, 2.5 * r],
[-1 * t, 1 * a, 2.0 * r],
[0 * t, 0 * a, 2.5 * r],
[1 * t, -1 * a, 2.0 * r], # 4
[-1 * t, 2 * a, -2.5 * r],
[0 * t, 1 * a, -2.0 * r],
[1 * t, 0 * a, -2.5 * r],
[2 * t, -1 * a, -2.0 * r], # 1
[-1 * t, 2 * a, 0.5 * r],
[0 * t, 1 * a, 1.0 * r],
[1 * t, 0 * a, 0.5 * r],
[2 * t, -1 * a, 1.0 * r], # 2
[-1 * t, 3 * a, -1.0 * r],
[0 * t, 2 * a, -0.5 * r],
[1 * t, 1 * a, -1.0 * r],
[2 * t, 0 * a, -0.5 * r], # 3
[-1 * t, 3 * a, 2.0 * r],
[0 * t, 2 * a, 2.5 * r],
[1 * t, 1 * a, 2.0 * r],
[2 * t, 0 * a, 2.5 * r], # 4
[0 * t, 3 * a, -2.0 * r],
[1 * t, 2 * a, -2.5 * r],
[2 * t, 1 * a, -2.0 * r],
[3 * t, 0 * a, -2.5 * r], # 1
[0 * t, 3 * a, 1.0 * r],
[1 * t, 2 * a, 0.5 * r],
[2 * t, 1 * a, 1.0 * r],
[3 * t, 0 * a, 0.5 * r], # 2
[0 * t, 4 * a, -0.5 * r],
[1 * t, 3 * a, -1.0 * r],
[2 * t, 2 * a, -0.5 * r],
[3 * t, 1 * a, -1.0 * r], # 3
[0 * t, 4 * a, 2.5 * r],
[1 * t, 3 * a, 2.0 * r],
[2 * t, 2 * a, 2.5 * r],
[3 * t, 1 * a, 2.0 * r]] # 4
return ('3D-lattice in HCP', P, move_centre(C))
def get_latticeD3_fcc(edge: float, eps: float = default_eps, spacetime: str = '') -> \
Tuple[str, List[int], List[List[float]]]:
'''
Returns the name, event permutation (for Hasse diagrams), and event
coordinates of a 3D fcc-lattice.
The first argument set the size, the optional second argument sets
a small time offset for each layer.
'''
if spacetime != 'Minkowski' and spacetime != '':
raise ValueError('Spacetime not supported')
P: List[int] = [1, 9, 21, 37, 5, 17, 33, 49, 13, 29, 45, 57, 25, 41, 53, 61,
2, 10, 22, 38, 6, 18, 34, 50, 14, 30, 46, 58, 26, 42, 54, 62,
3, 11, 23, 39, 7, 19, 35, 51, 15, 31, 47, 59, 27, 43, 55, 63,
4, 12, 24, 40, 8, 20, 36, 52, 16, 32, 48, 60, 28, 44, 56, 64]
a: float = edge / 2
r: float = edge / sqrt(3.) # radius (from vertex to center)
b: float = r / 2
t: float = r * (1 + eps)
C: np.ndarray = np.array([[0 * t, 0 * a, 0 * b],
[1 * t, 1 * a, 1 * b],
[2 * t, 2 * a, 2 * b],
[3 * t, 3 * a, 3 * b],
[1 * t, -1 * a, 1 * b],
[2 * t, 0 * a, 2 * b],
[3 * t, 1 * a, 3 * b],
[4 * t, 2 * a, 4 * b], # 8
[1 * t, 0 * a, -1 * b],
[2 * t, 1 * a, 0 * b],
[3 * t, 2 * a, 1 * b],
[4 * t, 3 * a, 2 * b],
[2 * t, -2 * a, 2 * b],
[3 * t, -1 * a, 3 * b],
[4 * t, 0 * a, 4 * b],
[5 * t, 1 * a, 5 * b], # 16
[2 * t, -1 * a, 0 * b],
[3 * t, 0 * a, 1 * b],
[4 * t, 1 * a, 2 * b],
[5 * t, 2 * a, 3 * b],
[2 * t, 0 * a, -2 * b],
[3 * t, 1 * a, -1 * b],
[4 * t, 2 * a, 0 * b],
[5 * t, 3 * a, 1 * b], # 24
[3 * t, -3 * a, 3 * b],
[4 * t, -2 * a, 4 * b],
[5 * t, -1 * a, 5 * b],
[6 * t, 0 * a, 6 * b], # end 1
[3 * t, -2 * a, 1 * b],
[4 * t, -1 * a, 2 * b],
[5 * t, 0 * a, 3 * b],
[6 * t, 1 * a, 4 * b], # 32
[3 * t, -1 * a, -1 * b],
[4 * t, 0 * a, 0 * b],
[5 * t, 1 * a, 1 * b],
[6 * t, 2 * a, 2 * b],
[3 * t, 0 * a, -3 * b],
[4 * t, 1 * a, -2 * b],
[5 * t, 2 * a, -1 * b],
[6 * t, 3 * a, 0 * b], # 40
[4 * t, -3 * a, 2 * b],
[5 * t, -2 * a, 3 * b],
[6 * t, -1 * a, 4 * b],
[7 * t, 0 * a, 5 * b], # end 2
[4 * t, -2 * a, 0 * b],
[5 * t, -1 * a, 1 * b],
[6 * t, 0 * a, 2 * b],
[7 * t, 1 * a, 3 * b], # 48
[4 * t, -1 * a, -2 * b],
[5 * t, 0 * a, -1 * b],
[6 * t, 1 * a, 0 * b],
[7 * t, 2 * a, 1 * b],
[5 * t, -3 * a, 1 * b],
[6 * t, -2 * a, 2 * b],
[7 * t, -1 * a, 3 * b],
[8 * t, 0 * a, 4 * b], # end 3
[5 * t, -2 * a, -1 * b],
[6 * t, -1 * a, 0 * b],
[7 * t, 0 * a, 1 * b],
[8 * t, 1 * a, 2 * b],
[6 * t, -3 * a, 0 * b],
[7 * t, -2 * a, 1 * b],
[8 * t, -1 * a, 2 * b],
[9 * t, 0 * a, 3 * b]])
C[:, 0] = C[:, 0] - 4.5 * t
return ('3D-lattice in FCC', P, move_centre(C.tolist()))
def get_latticeD3_rho(edge: float, eps: float = default_eps, spacetime: str = '') -> \
Tuple[str, List[int], List[List[float]]]:
'''
Returns the name, event permutation (for Hasse diagrams), and event
coordinates of a 3D-lattice of rhombohedrons.
The first argument set the size, the optional second argument sets
a small time offset for each layer.
'''
if spacetime != 'Minkowski' and spacetime != '':
raise ValueError('Spacetime not supported')
P: List[int] = [13, 9, 29, 25, 45, 41, 61, 57, 5, 1, 21, 17, 37, 33, 53, 49,
14, 10, 30, 26, 46, 42, 62, 58, 6, 2, 22, 18, 38, 34, 54, 50,
15, 11, 31, 27, 47, 43, 63, 59, 7, 3, 23, 19, 39, 35, 55, 51,
16, 12, 32, 28, 48, 44, 64, 60, 8, 4, 24, 20, 40, 36, 56, 52]
s: float = edge / 2
t: float = s * (1 + eps)
C: List[List[float]] = [[-3 * t, -1.5 * s, 0 * s],
[-2 * t, -1.5 * s, -1 * s],
[-1 * t, -1.5 * s, -2 * s],
[0 * t, -1.5 * s, -3 * s],
[-3 * t, 0.5 * s, 1 * s],
[-2 * t, 0.5 * s, 0 * s],
[-1 * t, 0.5 * s, -1 * s],
[0 * t, 0.5 * s, -2 * s],
[-3 * t, -0.5 * s, 0 * s],
[-2 * t, -0.5 * s, -1 * s],
[-1 * t, -0.5 * s, -2 * s],
[0 * t, -0.5 * s, -3 * s],
[-3 * t, 1.5 * s, 1 * s],
[-2 * t, 1.5 * s, 0 * s],
[-1 * t, 1.5 * s, -1 * s],
[0 * t, 1.5 * s, -2 * s],
[-2 * t, -1.5 * s, 1 * s],
[-1 * t, -1.5 * s, 0 * s],
[0 * t, -1.5 * s, -1 * s],
[1 * t, -1.5 * s, -2 * s],
[-2 * t, 0.5 * s, 2 * s],
[-1 * t, 0.5 * s, 1 * s],
[0 * t, 0.5 * s, 0 * s],
[1 * t, 0.5 * s, -1 * s],
[-2 * t, -0.5 * s, 1 * s],
[-1 * t, -0.5 * s, 0 * s],
[0 * t, -0.5 * s, -1 * s],
[1 * t, -0.5 * s, -2 * s],
[-2 * t, 1.5 * s, 2 * s],
[-1 * t, 1.5 * s, 1 * s],
[0 * t, 1.5 * s, 0 * s],
[1 * t, 1.5 * s, -1 * s],
[-1 * t, -1.5 * s, 2 * s],
[0 * t, -1.5 * s, 1 * s],
[1 * t, -1.5 * s, 0 * s],
[2 * t, -1.5 * s, -1 * s],
[-1 * t, 0.5 * s, 3 * s],
[0 * t, 0.5 * s, 2 * s],
[1 * t, 0.5 * s, 1 * s],
[2 * t, 0.5 * s, 0 * s],
[-1 * t, -0.5 * s, 2 * s],
[0 * t, -0.5 * s, 1 * s],
[1 * t, -0.5 * s, 0 * s],
[2 * t, -0.5 * s, -1 * s],
[-1 * t, 1.5 * s, 3 * s],
[0 * t, 1.5 * s, 2 * s],
[1 * t, 1.5 * s, 1 * s],
[2 * t, 1.5 * s, 0 * s],
[0 * t, -1.5 * s, 3 * s],
[1 * t, -1.5 * s, 2 * s],
[2 * t, -1.5 * s, 1 * s],
[3 * t, -1.5 * s, 0 * s],
[0 * t, 0.5 * s, 4 * s],
[1 * t, 0.5 * s, 3 * s],
[2 * t, 0.5 * s, 2 * s],
[3 * t, 0.5 * s, 1 * s],
[0 * t, -0.5 * s, 3 * s],
[1 * t, -0.5 * s, 2 * s],
[2 * t, -0.5 * s, 1 * s],
[3 * t, -0.5 * s, 0 * s],
[0 * t, 1.5 * s, 4 * s],
[1 * t, 1.5 * s, 3 * s],
[2 * t, 1.5 * s, 2 * s],
[3 * t, 1.5 * s, 1 * s]]
return ('3D-lattice of rhombohedrons', P, move_centre(C))
def get_latticeD3_slab(edge: float, eps: float = default_eps, spacetime: str = '') -> \
Tuple[str, List[int], List[List[float]]]:
'''
Returns the name, event permutation (for Hasse diagrams), and event
coordinates of a 3D-lattice slab.
The first argument set the size, the optional second argument sets
a small time offset for each layer.
'''
if spacetime != 'Minkowski' and spacetime != '':
raise ValueError('Spacetime not supported')
P: List[int] = [17, 13, 27, 9, 22,
6, 18, 32, 4, 14, 28, 42, 2, 10, 23, 37,
1, 7, 19, 33, 47, 5, 15, 29, 43, 57, 3, 11, 24, 38, 52,
8, 20, 34, 48, 62, 16, 30, 44, 58, 71, 12, 25, 39, 53, 67,
21, 35, 49, 63, 75, 31, 45, 59, 72, 80, 26, 40, 54, 68, 78,
36, 50, 64, 76, 82, 46, 60, 73, 81, 41, 55, 69, 79,
51, 65, 77, 61, 74, 56, 70, 66]
s: float = edge / sqrt(2)
t: float = s * (1 + eps)
C: List[List[float]] = [[-5 * t, -2 * s, 0 * s],
[-5 * t, -1 * s, -1 * s],
[-4 * t, -2 * s, -1 * s],
[-5 * t, -1 * s, 1 * s],
[-4 * t, -2 * s, 1 * s],
[-5 * t, 0 * s, 0 * s],
[-4 * t, -1 * s, 0 * s],
[-3 * t, -2 * s, 0 * s],
[-5 * t, 1 * s, -1 * s],
[-4 * t, 0 * s, -1 * s], # 10
[-3 * t, -1 * s, -1 * s],
[-2 * t, -2 * s, -1 * s],
[-5 * t, 1 * s, 1 * s],
[-4 * t, 0 * s, 1 * s],
[-3 * t, -1 * s, 1 * s],
[-2 * t, -2 * s, 1 * s],
[-5 * t, 2 * s, 0 * s],
[-4 * t, 1 * s, 0 * s],
[-3 * t, 0 * s, 0 * s],
[-2 * t, -1 * s, 0 * s], # 20
[-1 * t, -2 * s, 0 * s],
[-4 * t, 2 * s, -1 * s],
[-3 * t, 1 * s, -1 * s],
[-2 * t, 0 * s, -1 * s],
[-1 * t, -1 * s, -1 * s],
[0 * t, -2 * s, -1 * s],
[-4 * t, 2 * s, 1 * s],
[-3 * t, 1 * s, 1 * s],
[-2 * t, 0 * s, 1 * s],
[-1 * t, -1 * s, 1 * s], # 30
[0 * t, -2 * s, 1 * s],
[-3 * t, 2 * s, 0 * s],
[-2 * t, 1 * s, 0 * s],
[-1 * t, 0 * s, 0 * s],
[0 * t, -1 * s, 0 * s],
[1 * t, -2 * s, 0 * s],
[-2 * t, 2 * s, -1 * s],
[-1 * t, 1 * s, -1 * s],
[0 * t, 0 * s, -1 * s],
[1 * t, -1 * s, -1 * s], # 40
[2 * t, -2 * s, -1 * s],
[-2 * t, 2 * s, 1 * s],
[-1 * t, 1 * s, 1 * s],
[0 * t, 0 * s, 1 * s],
[1 * t, -1 * s, 1 * s],
[2 * t, -2 * s, 1 * s],
[-1 * t, 2 * s, 0 * s],
[0 * t, 1 * s, 0 * s],
[1 * t, 0 * s, 0 * s],
[2 * t, -1 * s, 0 * s], # 50
[3 * t, -2 * s, 0 * s],
[0 * t, 2 * s, -1 * s],
[1 * t, 1 * s, -1 * s],
[2 * t, 0 * s, -1 * s],
[3 * t, -1 * s, -1 * s],
[4 * t, -2 * s, -1 * s],
[0 * t, 2 * s, 1 * s],
[1 * t, 1 * s, 1 * s],
[2 * t, 0 * s, 1 * s],
[3 * t, -1 * s, 1 * s], # 60
[4 * t, -2 * s, 1 * s],
[1 * t, 2 * s, 0 * s],
[2 * t, 1 * s, 0 * s],
[3 * t, 0 * s, 0 * s],
[4 * t, -1 * s, 0 * s],
[5 * t, -2 * s, 0 * s],
[2 * t, 2 * s, -1 * s],
[3 * t, 1 * s, -1 * s],
[4 * t, 0 * s, -1 * s],
[5 * t, -1 * s, -1 * s], # 70
[2 * t, 2 * s, 1 * s],
[3 * t, 1 * s, 1 * s],
[4 * t, 0 * s, 1 * s],
[5 * t, -1 * s, 1 * s],
[3 * t, 2 * s, 0 * s],
[4 * t, 1 * s, 0 * s],
[5 * t, 0 * s, 0 * s],
[4 * t, 2 * s, -1 * s],
[5 * t, 1 * s, -1 * s],
[4 * t, 2 * s, 1 * s], # 80
[5 * t, 1 * s, 1 * s],
[5 * t, 2 * s, 0 * s]]
return ('3D-lattice (slab)', P, move_centre(C))
def get_latticeD3_slabpinf(edge: float, eps: float = default_eps, spacetime: str = '') -> \
Tuple[str, List[int], List[List[float]]]:
'''
Returns the name, event permutation (for Hasse diagrams), and event
coordinates of a 3D-lattice slab past (4-step past infinity).
The first argument set the size, the optional second argument sets
a small time offset for each layer.
'''
if spacetime != 'Minkowski' and spacetime != '':
raise ValueError('Spacetime not supported')
P: List[int] = [17, 13, 24, 9, 21,
6, 18, 27, 4, 14, 25, 30, 2, 10, 22, 29,
1, 7, 19, 28, 5, 15, 26, 3, 11, 23,
8, 20, 16, 12]
s: float = edge / sqrt(2)
t: float = s * (1 + eps)
C: List[List[float]] = [[-5 * t, -2 * s, 0 * s],
[-5 * t, -1 * s, -1 * s],
[-4 * t, -2 * s, -1 * s],
[-5 * t, -1 * s, 1 * s],
[-4 * t, -2 * s, 1 * s],
[-5 * t, 0 * s, 0 * s],
[-4 * t, -1 * s, 0 * s],
[-3 * t, -2 * s, 0 * s],
[-5 * t, 1 * s, -1 * s],
[-4 * t, 0 * s, -1 * s], # 10
[-3 * t, -1 * s, -1 * s],
[-2 * t, -2 * s, -1 * s],
[-5 * t, 1 * s, 1 * s],
[-4 * t, 0 * s, 1 * s],
[-3 * t, -1 * s, 1 * s],
[-2 * t, -2 * s, 1 * s],
[-5 * t, 2 * s, 0 * s],
[-4 * t, 1 * s, 0 * s],
[-3 * t, 0 * s, 0 * s],
[-2 * t, -1 * s, 0 * s], # 20
[-4 * t, 2 * s, -1 * s],
[-3 * t, 1 * s, -1 * s],
[-2 * t, 0 * s, -1 * s],
[-4 * t, 2 * s, 1 * s],
[-3 * t, 1 * s, 1 * s],
[-2 * t, 0 * s, 1 * s],
[-3 * t, 2 * s, 0 * s],
[-2 * t, 1 * s, 0 * s],
[-2 * t, 2 * s, -1 * s],
[-2 * t, 2 * s, 1 * s]] # 30
return ('3D-lattice (slab, past inf.)', P, move_centre(C))
def get_latticeD3_slabfinf(edge: float, eps: float = default_eps, spacetime: str = '') -> \
Tuple[str, List[int], List[List[float]]]:
'''
Returns the name, event permutation (for Hasse diagrams), and event
coordinates of a 3D-lattice slab future (4-step future infinity).
The first argument set the size, the optional second argument sets
a small time offset for each layer.
'''
if spacetime != 'Minkowski' and spacetime != '':
raise ValueError('Spacetime not supported')
P: List[int] = [19, 15, 11, 23,
8, 20, 28, 5, 16, 26, 3, 12, 24, 30,
2, 9, 21, 29, 1, 6, 17, 27, 4, 13, 25,
10, 22, 7, 18, 14]
s: float = edge / sqrt(2)
t: float = s * (1 + eps)
C: List[List[float]] = [[2 * t, -2 * s, -1 * s],
[2 * t, -2 * s, 1 * s],
[2 * t, -1 * s, 0 * s],
[3 * t, -2 * s, 0 * s],
[2 * t, 0 * s, -1 * s],
[3 * t, -1 * s, -1 * s],
[4 * t, -2 * s, -1 * s],
[2 * t, 0 * s, 1 * s],
[3 * t, -1 * s, 1 * s],
[4 * t, -2 * s, 1 * s], # 10
[2 * t, 1 * s, 0 * s],
[3 * t, 0 * s, 0 * s],
[4 * t, -1 * s, 0 * s],
[5 * t, -2 * s, 0 * s],
[2 * t, 2 * s, -1 * s],
[3 * t, 1 * s, -1 * s],
[4 * t, 0 * s, -1 * s],
[5 * t, -1 * s, -1 * s],
[2 * t, 2 * s, 1 * s],
[3 * t, 1 * s, 1 * s], # 20
[4 * t, 0 * s, 1 * s],
[5 * t, -1 * s, 1 * s],
[3 * t, 2 * s, 0 * s],
[4 * t, 1 * s, 0 * s],
[5 * t, 0 * s, 0 * s],
[4 * t, 2 * s, -1 * s],
[5 * t, 1 * s, -1 * s],
[4 * t, 2 * s, 1 * s],
[5 * t, 1 * s, 1 * s],
[5 * t, 2 * s, 0 * s]] # 30
return ('3D-lattice (slab, future inf.)', P, move_centre(C))
def get_latticeD3_slabpert(edge: float, eps: float = default_eps, spacetime: str = '') -> \
Tuple[str, List[int], List[List[float]]]:
'''
Returns the name, event permutation (for Hasse diagrams), and event
coordinates of a a 3D-lattice slab, perturbed.
The first argument set the size, the optional second argument
sets a small time offset for each layer.
'''
if spacetime != 'Minkowski' and spacetime != '':
raise ValueError('Spacetime not supported')
P: List[int] = [17, 13, 27, 9, 22,
6, 18, 32, 4, 14, 28, 42, 2, 10, 23, 37,
1, 7, 19, 33, 49, 5, 15, 29, 43, 60, 3, 11, 24, 38, 55,
8, 20, 34, 51, 65, 16, 30, 35, 46, 45, 61, 74, 12, 25, 39, 50, 56, 70,
21, 44, 52, 66, 78, 31, 47, 62, 75, 83, 26, 40, 57, 71, 81,
36, 53, 67, 79, 85, 48, 63, 76, 84, 41, 58, 72, 82,
54, 68, 80, 64, 77, 59, 73,
69]
s: float = edge / sqrt(2)
t: float = s * (1 + eps)
C: List[List[float]] = [[-5 * t, -2 * s, 0 * s],
[-5 * t, -1 * s, -1 * s],
[-4 * t, -2 * s, -1 * s],
[-5 * t, -1 * s, 1 * s],
[-4 * t, -2 * s, 1 * s],
[-5 * t, 0 * s, 0 * s],
[-4 * t, -1 * s, 0 * s],
[-3 * t, -2 * s, 0 * s],
[-5 * t, 1 * s, -1 * s],
[-4 * t, 0 * s, -1 * s], # 10
[-3 * t, -1 * s, -1 * s],
[-2 * t, -2 * s, -1 * s],
[-5 * t, 1 * s, 1 * s],
[-4 * t, 0 * s, 1 * s],
[-3 * t, -1 * s, 1 * s],
[-2 * t, -2 * s, 1 * s],
[-5 * t, 2 * s, 0 * s],
[-4 * t, 1 * s, 0 * s],
[-3 * t, 0 * s, 0 * s],
[-2 * t, -1 * s, 0 * s], # 20
[-1 * t, -2 * s, 0 * s],
[-4 * t, 2 * s, -1 * s],
[-3 * t, 1 * s, -1 * s],
[-2 * t, 0 * s, -1 * s],
[-1 * t, -1 * s, -1 * s], # 25
[0 * t, -2 * s, -1 * s],
[-4 * t, 2 * s, 1 * s],
[-3 * t, 1 * s, 1 * s],
[-2 * t, 0 * s, 1 * s],
[-1 * t, -1 * s, 1 * s], # 30
[0 * t, -2 * s, 1 * s],
[-3 * t, 2 * s, 0 * s],
[-2 * t, 1 * s, 0 * s],
[-1 * t, 0 * s, 0 * s],
[-0.5 * t, -0.5 * s, 0 * s], # pert 35
[1 * t, -2 * s, 0 * s],
[-2 * t, 2 * s, -1 * s],
[-1 * t, 1 * s, -1 * s],
[-0 * t, 0 * s, -1 * s],
[1 * t, -1 * s, -1 * s], # 40
[2 * t, -2 * s, -1 * s],
[-2 * t, 2 * s, 1 * s],
[-1 * t, 1 * s, 1 * s],
[0.5 * t, -5 / 6 * s, 5 / 6 * s], # pert 44
[0 * t, 0 * s, 1 * s],
[0 * t, 0.5 * s, -0.25 * s], # pert 46
[1 * t, -1 * s, 1 * s],
[2 * t, -2 * s, 1 * s],
[-1 * t, 2 * s, 0 * s],
[0.5 * t, 0.25 * s, 0.15 * s], # pert 50
[0 * t, 1 * s, 0 * s],
[1 * t, 0 * s, 0 * s],
[2 * t, -1 * s, 0 * s],
[3 * t, -2 * s, 0 * s],
[0 * t, 2 * s, -1 * s],
[1 * t, 1 * s, -1 * s],
[2 * t, 0 * s, -1 * s],
[3 * t, -1 * s, -1 * s],
[4 * t, -2 * s, -1 * s],
[0 * t, 2 * s, 1 * s], # 60
[1 * t, 1 * s, 1 * s],
[2 * t, 0 * s, 1 * s],
[3 * t, -1 * s, 1 * s],
[4 * t, -2 * s, 1 * s],
[1 * t, 2 * s, 0 * s],
[2 * t, 1 * s, 0 * s],
[3 * t, 0 * s, 0 * s],
[4 * t, -1 * s, 0 * s],
[5 * t, -2 * s, 0 * s],
[2 * t, 2 * s, -1 * s], # 70
[3 * t, 1 * s, -1 * s],
[4 * t, 0 * s, -1 * s],
[5 * t, -1 * s, -1 * s],
[2 * t, 2 * s, 1 * s],
[3 * t, 1 * s, 1 * s],
[4 * t, 0 * s, 1 * s],
[5 * t, -1 * s, 1 * s],
[3 * t, 2 * s, 0 * s],
[4 * t, 1 * s, 0 * s],
[5 * t, 0 * s, 0 * s], # 80
[4 * t, 2 * s, -1 * s],
[5 * t, 1 * s, -1 * s],
[4 * t, 2 * s, 1 * s],
[5 * t, 1 * s, 1 * s],
[5 * t, 2 * s, 0 * s]]
return ('3D-lattice (slab, perturbed)', P, move_centre(C))
|
"""
Tests for hashtag.
"""
import responses
def test_get_info(helpers, api):
hashtag_id = "17843826142012701"
with responses.RequestsMock() as m:
m.add(
method=responses.GET,
url=f"https://graph.facebook.com/{api.version}/{hashtag_id}",
json=helpers.load_json(
"testdata/instagram/apidata/hashtags/hashtag_info.json"
),
)
hashtag = api.hashtag.get_info(hashtag_id=hashtag_id)
assert hashtag.id == hashtag_id
hashtag_json = api.hashtag.get_info(
hashtag_id=hashtag_id,
fields="id,name",
return_json=True,
)
assert hashtag_json["id"] == hashtag_id
def test_get_batch(helpers, api):
hashtag_ids = ["17843826142012701", "17841593698074073"]
with responses.RequestsMock() as m:
m.add(
method=responses.GET,
url=f"https://graph.facebook.com/{api.version}",
json=helpers.load_json(
"testdata/instagram/apidata/hashtags/hashtags_info.json"
),
)
hashtags = api.hashtag.get_batch(ids=hashtag_ids)
assert hashtags[hashtag_ids[0]].id == hashtag_ids[0]
hashtags_json = api.hashtag.get_batch(
ids=hashtag_ids,
fields="id,name",
return_json=True,
)
assert hashtags_json[hashtag_ids[0]]["id"] == hashtag_ids[0]
def test_get_top_media(helpers, api):
hashtag_id = "17841562426109234"
with responses.RequestsMock() as m:
m.add(
method=responses.GET,
url=f"https://graph.facebook.com/{api.version}/{hashtag_id}/top_media",
json=helpers.load_json(
"testdata/instagram/apidata/hashtags/hashtag_top_medias_p1.json"
),
)
m.add(
method=responses.GET,
url=f"https://graph.facebook.com/{api.version}/{hashtag_id}/top_media",
json=helpers.load_json(
"testdata/instagram/apidata/hashtags/hashtag_top_medias_p2.json"
),
)
top_media = api.hashtag.get_top_media(
hashtag_id=hashtag_id,
count=None,
limit=25,
)
assert len(top_media.data) == 50
top_media_json = api.hashtag.get_top_media(
hashtag_id=hashtag_id,
count=10,
return_json=True,
)
assert len(top_media_json["data"]) == 10
def test_get_recent_media(helpers, api):
hashtag_id = "17841562426109234"
with responses.RequestsMock() as m:
m.add(
method=responses.GET,
url=f"https://graph.facebook.com/{api.version}/{hashtag_id}/recent_media",
json=helpers.load_json(
"testdata/instagram/apidata/hashtags/hashtag_recent_medias_p1.json"
),
)
m.add(
method=responses.GET,
url=f"https://graph.facebook.com/{api.version}/{hashtag_id}/recent_media",
json=helpers.load_json(
"testdata/instagram/apidata/hashtags/hashtag_recent_medias_p2.json"
),
)
top_media = api.hashtag.get_recent_media(
hashtag_id=hashtag_id,
count=None,
limit=5,
)
assert len(top_media.data) == 10
top_media_json = api.hashtag.get_recent_media(
hashtag_id=hashtag_id,
count=5,
return_json=True,
)
assert len(top_media_json["data"]) == 5
|
"""oauth 1.0 flow for khan-api"""
from __future__ import print_function, unicode_literals
import requests_oauthlib
import requests
import urlparse
import webbrowser
from six.moves import input
# package specific
import pkaaw.constants
def get_request_tokens(consumer_key, consumer_secret):
"""uses request_oauthlib to start the oauth dance."""
khan_auth = requests_oauthlib.OAuth1Session(client_key=consumer_key,
client_secret=consumer_secret)
khan_auth.fetch_request_token(pkaaw.constants.REQUEST_TOKEN_URL)
return khan_auth
def console_auth(khan_auth):
"""for capturing auth credentials in the python console"""
url = khan_auth.authorization_url(pkaaw.constants.AUTHORIZATION_URL)
webbrowser.open(url, new=0, autoraise=True)
redirect_response = input('Paste the full redirect URL here: ')
khan_auth.parse_authorization_response(redirect_response)
return khan_auth
def fetch_access_token(khan_auth):
"""takes request token and exchanges for access token"""
keys = khan_auth.auth.client
oauth = requests_oauthlib.OAuth1(
client_key=keys.client_key,
client_secret=keys.client_secret,
resource_owner_key=keys.resource_owner_key,
resource_owner_secret=keys.resource_owner_secret
)
r = requests.post(
url=pkaaw.constants.ACCESS_TOKEN_URL,
auth=oauth
)
credentials = urlparse.parse_qs(r.content)
tokens = {
'access_token': credentials.get('oauth_token')[0],
'access_token_secret': credentials.get('oauth_token_secret')[0]
}
return tokens
|
from multiprocessing import Process, Queue, Array
import ctypes
import sys
import os
import mappy as mp
import numpy as np
import pysam
from datetime import datetime
from datetime import date
from . import version
def load_manifest(path, preset):
manifest = {
"preset": preset,
"references": [
],
}
manifest_fh = open(path)
for line_i, line in enumerate(manifest_fh):
fields = line.strip().split() # split on any whitespace if you have whitespace in your ref name you have bigger problems
if line[0] == '#':
continue
if len(fields) < 3:
sys.stderr.write("[FAIL] Manifest did not contain a third column mapping a reference to a preset\n")
sys.stderr.write(" Consult the README to ensure you are using a manifest suitable for dehumaniser >= 0.9.0\n")
sys.exit(78) # EX_CONFIG
if fields[2] != preset:
continue
manifest["references"].append({
"name": fields[0],
"path": fields[1],
})
if len(manifest["references"]) == 0:
sys.stderr.write("[FAIL] Manifest did not contain any references for preset=%s\n" % preset)
sys.stderr.write(" Consult the README to ensure your manifest is correctly configured and for\n")
sys.stderr.write(" instructions on how to build your own indexes if needed\n")
sys.exit(65) # EX_DATAERR
else:
sys.stderr.write("[NOTE] Detected %d references in manifest for preset=%s\n" % (len(manifest["references"]), preset))
manifest_fh.close()
return manifest
def dh_bam(log, manifest, bad_set, args):
dirty_bam = pysam.AlignmentFile(args.dirty)
dirty_header = dirty_bam.header.as_dict()
pg_date = date.today().strftime("%Y%m%d")
if args.pg_date:
if len(args.pg_date) > 0:
pg_date = args.pg_date
if "PG" not in dirty_header:
dirty_header["PG"] = []
dirty_header["PG"].append({
"ID": 'dehumanizer.%s' % pg_date,
"PN": 'dehumanizer',
"VN": version.__version__,
"CL": " ".join(sys.argv),
})
clean_header = pysam.AlignmentHeader.from_dict(dirty_header)
clean_bam = pysam.AlignmentFile(args.clean, "wb", header=clean_header)
break_first = not args.nobreak # break on first hit, otherwise we can use this to 'survey' hits to different databases
aligners = []
each_dropped = []
for ref_i, ref_manifest in enumerate(manifest["references"]):
sys.stderr.write("[INFO] Init minimap2 aligner: %s (%s)\n" % (ref_manifest["path"], manifest["preset"]))
aligners.append( mp.Aligner(ref_manifest["path"], preset=manifest["preset"]) )
each_dropped.append(0)
sys.stderr.write("[INFO] minimap2 aligners ready.\n")
n_seqs = 0
n_good = 0
n_trash = 0
n_known = 0
n_collateral = 0
n_baddies = 0
bad_seen = set([])
if dirty_bam.has_index():
n_seqs = dirty_bam.mapped + dirty_bam.unmapped
else:
# First pass to get the number of sequences without an index
for read in dirty_bam.fetch(until_eof=True):
n_seqs += 1
dirty_bam.close()
bad_mask = np.zeros(n_seqs, dtype=np.bool)
# Second pass to establish a bit mask of what to keep
dirty_bam = pysam.AlignmentFile(args.dirty)
for r_i, read in enumerate(dirty_bam.fetch(until_eof=True)):
if not read.query_sequence:
continue # supp alignment or something, its up to the user to trash these
read_is_bad = False
for ref_i, ref_manifest in enumerate(manifest["references"]):
for hit in aligners[ref_i].map(read.query_sequence):
if not args.minlen or not args.minid:
# a hit is a hit
read_is_bad = True
else:
if args.minlen:
st = min(hit.q_st, hit.q_en)
en = max(hit.q_st, hit.q_en)
if ((en - st) / len(read.query_sequence)) * 100 >= args.minlen:
read_is_bad = True
if args.minid:
# http://lh3.github.io/2018/11/25/on-the-definition-of-sequence-identity
# "In the PAF format, column 10 divived by column 11 gives the BLAST identity."
bscore = hit.mlen / hit.blen
if bscore * 100 >= args.minid:
read_is_bad = True
# Criteria satisifed
if read_is_bad:
each_dropped[ref_i] += 1
if break_first:
break
else:
# Continue the outer loop to the next aligner, as no hit was found
continue
# Break the aligner loop as we've already break'ed a hit
break
if read_is_bad:
n_baddies += 1
# Check if the read is trash instead
if not read_is_bad:
if args.trash_minalen:
try:
if (read.reference_length/read.query_length)*100.0 < args.trash_minalen:
read_is_bad = True
n_trash += 1
except ZeroDivisionError:
read_is_bad = True
n_trash += 1
# Check if the read is on the shitlist
if not read_is_bad:
if read.query_name in bad_set:
read_is_bad = True
n_known += 1
if read_is_bad:
bad_mask[r_i] = 1
bad_seen.add(read.query_name)
dirty_bam.close()
# Third and final pass to write
dirty_bam = pysam.AlignmentFile(args.dirty)
for r_i, read in enumerate(dirty_bam.fetch(until_eof=True)):
# If the read really is good, write it out
if not bad_mask[r_i]:
# Finally, check if the QNAME has been tossed out already
if read.query_name in bad_seen:
n_collateral += 1
continue
n_good += 1
clean_bam.write(read)
sys.stderr.write("[INFO] %d sequences in, %d sequences out\n" % (n_seqs, n_good))
log.write("\t".join([str(x) for x in [
os.path.basename(args.clean),
n_seqs,
n_seqs - n_good,
n_good,
n_baddies,
n_trash,
n_known,
n_collateral,
"-"
]] + [str(x) for x in each_dropped]) + '\n')
dirty_bam.close()
clean_bam.close()
#TODO FUTURE Would be good to have another layer of multiproc that poured reads from multiple files to any available aligners
# Need to think carefully about this however; as the mp.Aligner is primed to a particular reference and shared
def dh_fastx(log, manifest, args):
fastx_path = args.dirty
break_first = not args.nobreak # break on first hit, otherwise we can use this to 'survey' hits to different databases
n_seqs = 0
if args.n:
n_seqs = args.n
else:
for name, seq, qual in mp.fastx_read(fastx_path):
n_seqs += 1
sys.stderr.write("[INFO] Preparing memory for flags.\n")
super_flag_matrix = np.frombuffer(Array(ctypes.c_bool, n_seqs*len(manifest["references"]), lock=False), dtype=ctypes.c_bool)
super_flag_matrix = super_flag_matrix.reshape(n_seqs, len(manifest["references"]))
sys.stderr.write("[INFO] Raised %d x %d flags.\n" % (n_seqs, len(manifest["references"])))
#aligners = []
#for ref_i, ref_manifest in enumerate(manifest["references"]):
# aligners.append([])
# sys.stderr.write("[%d/%d] Booting minimap2 aligners.\n" % (ref_i+1, len(manifest["references"])))
#
# for _ in range(args.threads):
# aligners[ref_i].append( mp.Aligner(ref_manifest["path"], preset=manifest["preset"]) )
def map_seqs(work_q, manifest, break_first, block_i):
aligners = []
for ref_i, ref_manifest in enumerate(manifest["references"]):
#sys.stderr.write("[%d:%d/%d] Booting minimap2 aligners.\n" % (block_i, ref_i+1, len(manifest["references"])))
aligners.append( mp.Aligner(ref_manifest["path"], preset=manifest["preset"]) )
sys.stderr.write("[%d:] minimap2 aligners ready.\n" % (block_i))
while True:
work = work_q.get()
if work is None:
return
for ref_i, ref_manifest in enumerate(manifest["references"]):
super_flag_matrix[ work["i"] ][ref_i] = 0
for ref_i, ref_manifest in enumerate(manifest["references"]):
for hit in aligners[ref_i].map(work["seq"]):
if args.minlen:
st = min(hit.q_st, hit.q_en)
en = max(hit.q_st, hit.q_en)
if ((en - st) / len(work["seq"])) * 100 < args.minlen:
continue
if args.minid:
# http://lh3.github.io/2018/11/25/on-the-definition-of-sequence-identity
# "In the PAF format, column 10 divived by column 11 gives the BLAST identity."
bscore = hit.mlen / hit.blen
if bscore * 100 < args.minid:
continue
# Criteria satisifed
super_flag_matrix[ work["i"] ][ref_i] = 1
if break_first:
break
else:
# Continue the outer loop to the next aligner, as no hit was found
continue
# Break the aligner loop as we've already seen a hit
break
sys.stderr.write("[INFO] Counted %d sequences\n" % (n_seqs))
sys.stderr.write("[INFO] %s\n" % (fastx_path))
work_queue = Queue(maxsize=args.threads*5000) # Queue N seqs per process
processes = []
for _ in range(args.threads):
p = Process(target=map_seqs, args=(work_queue,manifest,break_first,_))
processes.append(p)
for p in processes:
p.start()
# Begin adding seqs
sys.stderr.write("[INFO] Feeding sequences to queue\n")
start_clock = datetime.now()
for read_i, read_tuple in enumerate(mp.fastx_read(fastx_path)):
if read_i % args.blockrep == 0:
end_clock = datetime.now()
sys.stderr.write("[NOTE] Queued Read#%d. Last block pushed in %s (%s pseq.)\n" % (read_i, str(end_clock - start_clock), str((end_clock-start_clock)/args.blockrep) ))
start_clock = datetime.now()
if args.n:
if read_i+1 > args.n:
break
# Align
# queue will block until there's room
work_queue.put({"i": read_i, "seq": read_tuple[1]})
sys.stderr.write("[INFO] Finished feeding sequences\n")
# Add sentinels to kill off processes
sys.stderr.write("[INFO] Wait for queues to empty... be patient\n")
for _ in range(args.threads):
work_queue.put(None)
# Wait for processes to complete work
for p in processes:
p.join()
flat_dropped = ( super_flag_matrix.sum(axis=1) > 0 )
total_dropped = flat_dropped.sum()
sys.stderr.write("[INFO] Dropped %d sequences\n" % (flat_dropped.sum()))
# Now...
clean_fq_p = args.clean
if args.clean == "-":
clean_fq = sys.stdout
else:
fp = os.path.basename(fastx_path).split(".")
clean_fq = open(clean_fq_p, 'w')
sys.stderr.write("[INFO] Writing FASTX %s\n" % (clean_fq_p))
# Output FASTX
n_good = 0
for read_i, read_tuple in enumerate(mp.fastx_read(fastx_path)):
if not flat_dropped[read_i]:
n_good += 1
if read_tuple[2] is None:
out_read = ">%s\n%s\n" % (read_tuple[0],
read_tuple[1])
else:
out_read = "@%s\n%s\n+\n%s\n" % (read_tuple[0],
read_tuple[1],
read_tuple[2])
clean_fq.write(out_read)
clean_fq.close()
each_dropped = list( super_flag_matrix.sum(axis=0) )
log.write("\t".join([str(x) for x in [
os.path.basename(clean_fq_p),
n_seqs,
n_seqs - n_good,
n_good,
total_dropped,
0,
0,
0,
"-"
]] + [str(x) for x in each_dropped]) + '\n')
def cli():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("manifest", help="reference manifest")
parser.add_argument("dirty", help="input dirty file")
parser.add_argument("--known", help="new-line delimited list of reads known to be dirty")
type_p = parser.add_mutually_exclusive_group(required=True)
type_p.add_argument("--bam", action="store_true")
type_p.add_argument("--fastx", action="store_true")
parser.add_argument("--preset", help="mappy aligner preset", required=True)
parser.add_argument("-o", "--clean", help="output clean file [default -]", default="-")
parser.add_argument("--log", help="log path [default <dirty>.dehumanizer.log.txt]", default=None)
parser.add_argument("-t", "--threads", help="number of minimap2 process queues to spawn PER REFERENCE [1]", default=1, type=int)
parser.add_argument("-n", help="number of reads (prevents having to count)", type=int)
parser.add_argument("--minid", help="min %%proportion of (L-NM)/L to determine a hit [use all hits]", type=float, default=None)
parser.add_argument("--minlen", help="min %%proportion of read aligned to accept a hit [use all hits]", type=float, default=None)
parser.add_argument("--nobreak", help="dont break on the first database hit [False]", action="store_true", default=False)
parser.add_argument("--blockrep", help="report progress after a block of N sequences [100000]", default=100000, type=int)
# Not really the place for it, but whatever
parser.add_argument("--trash-minalen", help="trash reads whose alignment length is less than this %%proportion of their size [keep everything] ignored if not BAM", type=float, default=None)
parser.add_argument("--pg-date", help="datestamp to insert into BAM PG header [default today in format YYYYMMDD]", default="")
parser.add_argument("--version", action="version", version="%(prog)s " + version.__version__)
args = parser.parse_args()
#if not args.minid and not args.minlen:
# sys.stderr.write("You must set a minimum identity (--minid) and/or minimum length (--minlen).\n")
# sys.exit(1)
if not args.log:
log = open(args.dirty + ".dehumanizer.log.txt", 'w')
else:
log = open(args.log, 'w')
manifest = load_manifest(args.manifest, args.preset)
log.write("\t".join([
"name",
"seqs_in",
"seqs_total_dropped",
"seqs_out",
"n_hits",
"n_clipped",
"n_known",
"n_collateral",
"-"
] + [x["name"] for x in manifest["references"]]) + '\n')
if args.fastx:
dh_fastx(log, manifest, args)
elif args.bam:
bad_set = set([])
if args.known:
bad_set = set([x.strip() for x in open(args.known)])
dh_bam(log, manifest, bad_set, args)
log.close()
if __name__ == "__main__":
cli()
|
"""Tests for the Dirac distributions."""
import unittest
import numpy as np
from probnum import random_variables as rvs
class TestDirac(unittest.TestCase):
"""General test case for the Dirac distributions."""
def setUp(self):
self.supports = [1, np.array([1, 2]), np.array([[0]]), np.array([[6], [-0.3]])]
def test_logpdf(self):
pass
def test_sample_shapes(self):
"""Test whether samples have the correct shapes."""
for supp in self.supports:
for sample_size in [1, (), 10, (4,), (3, 2)]:
with self.subTest():
s = rvs.Dirac(support=supp).sample(size=sample_size)
if sample_size == ():
self.assertEqual(np.shape(supp), np.shape(s))
elif isinstance(sample_size, tuple):
self.assertEqual(sample_size + np.shape(supp), np.shape(s))
else:
self.assertEqual(
tuple([sample_size, *np.shape(supp)]), np.shape(s)
)
if __name__ == "__main__":
unittest.main()
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/dataclassUtil.ipynb (unless otherwise specified).
__all__ = ['enforce_types']
# Cell
import inspect
import typing
from contextlib import suppress
from functools import wraps
# Cell
def enforce_types(callable):
spec = inspect.getfullargspec(callable)
def check_types(*args, **kwargs):
parameters = dict(zip(spec.args, args))
parameters.update(kwargs)
for name, value in parameters.items():
with suppress(KeyError): # Assume un-annotated parameters can be any type
type_hint = spec.annotations[name]
if isinstance(type_hint, typing._SpecialForm):
# No check for typing.Any, typing.Union, typing.ClassVar (without parameters)
continue
try:
actual_type = type_hint.__origin__
except AttributeError:
# In case of non-typing types (such as <class 'int'>, for instance)
actual_type = type_hint
# In Python 3.8 one would replace the try/except with
# actual_type = typing.get_origin(type_hint) or type_hint
if isinstance(actual_type, typing._SpecialForm):
# case of typing.Union[…] or typing.ClassVar[…]
actual_type = type_hint.__args__
if not isinstance(value, actual_type):
raise TypeError('Unexpected type for \'{}\' (expected {} but found {})'.format(name, type_hint, type(value)))
def decorate(func):
@wraps(func)
def wrapper(*args, **kwargs):
check_types(*args, **kwargs)
return func(*args, **kwargs)
return wrapper
if inspect.isclass(callable):
callable.__init__ = decorate(callable.__init__)
return callable
return decorate(callable)
|
from django.conf import settings
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.db import models
from django.utils.translation import ugettext_lazy as _
from wagtail.admin.edit_handlers import FieldPanel
from wagtail.core.fields import RichTextField
from wagtail.core.models import Page
from wagtail.images import get_image_model
from django.shortcuts import render, redirect
from wagtail.contrib.routable_page.models import RoutablePageMixin, route
from taggit.models import Tag
IMAGE_ORDER_TYPES = (
(1, 'Image title'),
(2, 'Newest image first'),
)
class SimpleGalleryIndex(RoutablePageMixin, Page):
intro_title = models.CharField(
verbose_name=_('Intro title'),
max_length=250,
blank=True,
help_text=_('Optional H1 title for the gallery page.')
)
intro_text = RichTextField(
blank=True,
verbose_name=_('Intro text'),
help_text=_('Optional text to go with the intro text.')
)
collection = models.ForeignKey(
'wagtailcore.Collection',
verbose_name=_('Collection'),
null=True,
blank=False,
on_delete=models.SET_NULL,
related_name='+',
help_text=_('Show images in this collection in the gallery view.')
)
images_per_page = models.IntegerField(
default=8,
verbose_name=_('Images per page'),
help_text=_('How many images there should be on one page.')
)
use_lightbox = models.BooleanField(
verbose_name=_('Use lightbox'),
default=True,
help_text=_('Use lightbox to view larger images when clicking the thumbnail.')
)
order_images_by = models.IntegerField(choices=IMAGE_ORDER_TYPES, default=1)
content_panels = Page.content_panels + [
FieldPanel('intro_title', classname='full title'),
FieldPanel('intro_text', classname='full title'),
FieldPanel('collection'),
FieldPanel('images_per_page', classname='full title'),
FieldPanel('use_lightbox'),
FieldPanel('order_images_by'),
]
@property
def images(self, tags=None):
return get_gallery_images(self.collection.name, self)
@property
def tags(self):
return self.get_gallery_tags()
def get_context(self, request):
images = self.images
tags = self.tags
context = super(SimpleGalleryIndex, self).get_context(request)
page = request.GET.get('page')
paginator = Paginator(images, self.images_per_page)
try:
images = paginator.page(page)
except PageNotAnInteger:
images = paginator.page(1)
except EmptyPage:
images = paginator.page(paginator.num_pages)
context['gallery_images'] = images
context['gallery_tags'] = tags
return context
def get_gallery_tags(self, tags=[]):
images = get_gallery_images(self.collection.name, self, tags=tags)
for img in images:
tags += img.tags.all()
tags = sorted(set(tags))
return tags
@route('^tags/$', name='tag_archive')
@route('^tags/([\w-]+)/$', name='tag_archive')
def tag_archive(self, request, tag=None):
try:
tag = Tag.objects.get(slug=tag)
except Tag.DoesNotExist:
return redirect(self.url)
try:
taglist.append(tag)
except NameError:
taglist = []
taglist.append(tag)
images = get_gallery_images(self.collection.name, self, tags=taglist)
tags = self.get_gallery_tags(tags=taglist)
paginator = Paginator(images, self.images_per_page)
page = request.GET.get('page')
try:
images = paginator.page(page)
except PageNotAnInteger:
images = paginator.page(1)
except EmptyPage:
images = paginator.page(paginator.num_pages)
context = self.get_context(request)
context['gallery_images'] = images
context['gallery_tags'] = tags
context['current_tag'] = tag
return render(request, 'wagtail_simple_gallery/simple_gallery_index.html', context)
class Meta:
verbose_name = _('Gallery index')
verbose_name_plural = _('Gallery indices')
template = getattr(settings, 'SIMPLE_GALLERY_TEMPLATE', 'wagtail_simple_gallery/simple_gallery_index.html')
def get_gallery_images(collection, page=None, tags=None):
# Tags must be a list of tag names like ["Hasthag", "Kawabonga", "Winter is coming"]
images = None
try:
images = get_image_model().objects.filter(collection__name=collection).prefetch_related("tags")
if page:
if page.order_images_by == 1:
images = images.order_by('title')
elif page.order_images_by == 2:
images = images.order_by('-created_at')
except Exception as e:
pass
if images and tags:
images = images.filter(tags__name__in=tags).prefetch_related("tags").distinct()
return images
|
from django.db import models
from django.contrib.auth.models import User
class Client(models.Model):
name = models.CharField(max_length=100, unique=True)
account = models.CharField(max_length=20, unique=True)
created_at = models.DateTimeField(auto_now=False, auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True, auto_now_add=False)
def __str__(self):
return self.name
class Meta:
ordering = ['-created_at']
class ClientUser(models.Model):
user = models.OneToOneField(User, related_name="client_user", on_delete=models.CASCADE, null=True, blank=True)
client = models.ForeignKey(to=Client, related_name="users", on_delete=models.CASCADE, null=True, blank=True)
created_at = models.DateTimeField(auto_now=False, auto_now_add=True, null=True)
updated_at = models.DateTimeField(auto_now=True, auto_now_add=False, null=True)
class Meta:
ordering = ['-created_at']
|
"""
# Author
Jakob Krzyston (jakobk@gatech.edu)
# Purpose
Build architecture for I/Q modulation classification as seen in Krzyston et al. 2020
"""
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
##### LINEAR COMBINATION FOR COMPLEX CONVOLUTION #####
class LC(nn.Module):
def __init__(self):
super(LC, self).__init__()
#this matrix adds the first and third columns of the output of Conv2d
def forward(self, x):
i = x[:,:,0:1,:]-x[:,:,2:3,:]
q = x[:,:,1:2,:]
return torch.cat([i,q],dim=2)
##### CLASSIFIER FROM KRZYSTON ET AL. 2020 #####
class Complex(nn.Module):
def __init__(self,
n_classes: int = 11
):
super(Complex, self).__init__()
# define the dropout layer
self.dropout = nn.Dropout(p = 0.5)
# convolutional layers w/ weight initialization
self.conv1 = nn.Conv2d(1, 256, kernel_size=(2,3), stride=1, padding = (1,1), bias = True)
torch.nn.init.xavier_uniform_(self.conv1.weight)
self.conv2 = nn.Conv2d(256, 80, kernel_size=(2,3), stride=1, padding = (0,1), bias = True)
torch.nn.init.xavier_uniform_(self.conv2.weight)
# dense layers w/ weight initialization
self.dense1 = nn.Linear(80*128, 256, bias =True)
torch.nn.init.kaiming_normal_(self.dense1.weight, nonlinearity='relu')
self.dense2 = nn.Linear(256,n_classes, bias = True)
torch.nn.init.kaiming_normal_(self.dense2.weight, nonlinearity='sigmoid')
# Defining the forward pass
def forward(self, x):
x = self.conv1(x)
x = LC.forward(self,x)
x = F.relu(x)
x = self.dropout(x)
x = F.relu(self.conv2(x))
x = self.dropout(x)
x = x.view(x.size(0), -1)
x = F.relu(self.dense1(x))
x = self.dense2(x)
return x |
#Main Sedov Code Module
#Ported to python from fortran code written by James R Kamm and F X Timmes
#Original Paper and code found at http://cococubed.asu.edu/papers/la-ur-07-2849.pdf
import numpy as np
from globalvars import comvars as gv
from sedov_1d import sed_1d
from matplotlib import pyplot as plt
gv.its = 10
# define sedov_main as a function
def sedov_main(geom_in, omega_in, time_in, blast_energy, gamma_in):
##Explicitly set variables
##Standard Cases
##Spherical constant density should reach r=1 at t=1
nstep = 1024
eblast = blast_energy
gv.xgeom = geom_in
gv.omega = omega_in
##input parameters
time = time_in
rho0 = 1.
vel0 = 0.
ener0 = 0.0
pres0 = 0.0
gv.gamma = gamma_in
##number of grid points, spatial domain, spatial stepsize.
##to match hydrocode output, use the mid-sell points.
#zpos = array of spatial points
zlo = 0.0
zhi = 1.2
zstep = (zhi - zlo)/float(nstep)
zpos = np.arange(zlo + zstep, zhi + zstep, zstep)
den, vel, pres, enertot, zpos = sed_1d(time, nstep, zpos, eblast, rho0, vel0, ener0, pres0, gv)
#create final dictionary to pickle
###dictionary is a flexible array
single_time_output = {'density': den, 'velocity': vel, 'pressure': pres,
'total_energy': enertot, 'position': zpos}
zmax = 1.5 * gv.r2
plt.plot(zpos, den)
plt.axis([0, zmax, 0, max(den)])
plt.title('Density vs. Position')
plt.ylabel('Density (kg/m^3)')
plt.xlabel('Position (m)')
plt.show()
plt.plot(zpos, vel)
plt.axis([0, zmax, 0, max(vel)])
plt.title('Velocity vs. Position')
plt.ylabel('Velocity (m/s)')
plt.xlabel('Position (m)')
plt.show()
plt.plot(zpos, enertot)
plt.axis([0, zmax, 0, max(enertot)])
plt.title('Total Energy vs. Position')
plt.ylabel('Energy (J)')
plt.xlabel('Position (m)')
plt.show()
'''
plt.plot(zpos, pres)
plt.axis([0, zmax, 0, max(pres)])
plt.title('Pressure vs. Position')
plt.ylabel('Pressure (Pa)')
plt.xlabel('Position (m)')
plt.show()
'''
def solve():
# Planar 1.0, Cylindrical = 2.0, Spherical = 3.0
sedov_main(2.0, 0.0, 1.0, 0.311357, 1.4)
solve()
|
# Generated by Django 2.0.4 on 2018-04-04 23:37
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('assignments', '0006_auto_20180404_2334'),
]
operations = [
migrations.AlterField(
model_name='assignment',
name='date_created',
field=models.DateTimeField(default=django.utils.timezone.now, editable=False, verbose_name='Date Created'),
),
]
|
from typing import Dict, List, Optional, Tuple, Union
import torch
from torch_geometric.nn import (GlobalAttention, Set2Set, global_add_pool,
global_max_pool, global_mean_pool)
from torch_geometric.utils import softmax
from torch_scatter import scatter_add
class MaxReadOut(torch.nn.Module):
def __init__(self, in_channels: int, **kwargs):
super().__init__()
self.out_dim = in_channels
def forward(self, x: torch.Tensor, batch: torch.LongTensor):
return global_max_pool(x, batch)
class SumReadOut(torch.nn.Module):
def __init__(self, in_channels: int, **kwargs):
super().__init__()
self.out_dim = in_channels
def forward(self, x: torch.Tensor, batch: torch.LongTensor):
return global_add_pool(x, batch)
class MeanReadOut(torch.nn.Module):
def __init__(self, in_channels: int, **kwargs):
super().__init__()
self.out_dim = in_channels
def forward(self, x: torch.Tensor, batch: torch.LongTensor):
return global_mean_pool(x, batch)
class AttentionReadOut(GlobalAttention):
def __init__(self, in_channels: int, full: bool = True, out_channels: Optional[int] = None, **kwargs):
"""
When full is set to true, attention is computed separately on each feature channel.
"""
out = in_channels if out_channels is None else out_channels
self.attention_out_dim = out if full else 1
gate_nn = torch.nn.Sequential(
torch.nn.Linear(in_channels, in_channels),
torch.nn.ReLU(),
torch.nn.Linear(in_channels, self.attention_out_dim),
)
nn = torch.nn.Linear(in_channels, out_channels) if out_channels is not None else None
super().__init__(gate_nn, nn)
self.out_dim = out
def forward(self, x: torch.Tensor, batch: torch.LongTensor):
size = int(batch.max().item() + 1)
if self.attention_out_dim == 1:
return super().forward(x, batch, size=size)
else:
x = x.unsqueeze(-1) if x.dim() == 1 else x
gate = self.gate_nn(x).view(-1, self.attention_out_dim)
x = self.nn(x) if self.nn is not None else x
if not gate.dim() == x.dim() and gate.size(0) == x.size(0):
raise ValueError(f"Wrong input dimension: {gate.shape}, {x.shape}")
gate = softmax(gate, batch, num_nodes=size)
out = scatter_add(gate * x, batch, dim=0, dim_size=size)
return out
class MLPSumReadOut(torch.nn.Module):
def __init__(self, in_channels: int, out_channels: Optional[int] = None, **kwargs):
super().__init__()
out = out_channels if out_channels is not None else in_channels
self.mlp = torch.nn.Sequential(
torch.nn.Linear(in_channels, in_channels),
torch.nn.ReLU(),
torch.nn.Linear(in_channels, out),
)
self.out_dim = out
def forward(self, x: torch.Tensor, batch: torch.LongTensor):
x = self.mlp(x)
return global_add_pool(x, batch)
class Set2SetReadOut(Set2Set):
def __init__(self, in_channels: int, processing_steps=4, num_layers=2, **kwargs):
super().__init__(in_channels, processing_steps, num_layers)
self.out_dim = 2 * in_channels
class CombinedReadOut(torch.nn.Module):
def __init__(self, read_out_list: Union[Tuple[str, ...], List[str]], read_out_kwargs: dict):
super().__init__()
self.read_outs = torch.nn.ModuleList(
[get_read_out(f, read_out_kwargs) for f in read_out_list]
)
self.out_dim = sum([read_out.out_dim for read_out in self.read_outs])
def forward(self, x: torch.Tensor, batch: torch.LongTensor):
return torch.cat([read_out(x, batch) for read_out in self.read_outs], dim=1)
READOUT_FUNCTIONS = {
"max": MaxReadOut,
"sum": SumReadOut,
"mean": MeanReadOut,
"set2set": Set2SetReadOut,
"attention": AttentionReadOut,
"mlp_sum": MLPSumReadOut,
}
def get_read_out(read_out: Union[str, Tuple[str, ...], List[str]], read_out_kwargs: Dict):
if "in_channels" not in read_out_kwargs:
raise ValueError("Can't instantiate read_out without `in_channels` argument")
if isinstance(read_out, tuple) or isinstance(read_out, list):
return CombinedReadOut(read_out, read_out_kwargs)
else:
read_out_fn = READOUT_FUNCTIONS.get(read_out, None)
if read_out_fn is None:
raise ValueError(f"Unknown read_out function : {read_out}")
return read_out_fn(**read_out_kwargs)
|
import pandas as pd
from header_list import rename_list, match_list
import time
def main(filename):
"""Cleans up the CSV file
Takes the CSV file and puts it into a pandas dataframe then cleans it up to make
it easier to upload into the RG bulk import tool.
Args:
filename::pandas dataframe
Location of CSV file.
Returns:
Doesn't return anything it just outputs the CSV file.
"""
# sep=None so pandas tries to get the delimiter and dtype=str so columns don't sometimes have .0 added
df = pd.read_csv(filename, dtype=str, encoding="ISO-8859-1")
df.columns = [
i.lower().replace(" ", "_") for i in df.columns
] # lower case and replace spaces
# removes empty rows then empty columns
df = df.dropna(how="all")
df = df.dropna(axis=1, how="all")
# if first_name and last_name not in file then it tries to see if there is something like name then split it into
# first_name and last_name
if "first_name" not in df.columns or "last_name" not in df.columns:
df = try_creating_first_and_last_name(df)
# trying to change the headers of the file to ones that will automatically match with ones in the system
df = match_column_headers(df)
# moves values in first_name column that are more than 256 characters (that is the limit for the bulk import tool)
# to the long_first_name column so it is not rejected
if (df["first_name"].str.len() > 256).any():
df = move_long_names(df, "first_name")
if (df["last_name"].str.len() > 256).any():
df = move_long_names(df, "last_name")
# tries to join things to make up address if it is not already a column in the file
if "address" not in df.columns:
df = try_creating_address(df)
# tries to join things to make up assigned_agent if it is not already a column in the file
if "assigned_agent" not in df.columns:
df = try_creating_assigned_agent(df)
# tries to join things to make up second_contact_name if it is not already a column in the file
if "second_contact_name" not in df.columns:
df = try_creating_second_contact_name(df)
# if email is a column in the file it attempts to clean up the column by keeping only one email per row and moving
# others to another column. It also tries to move "invalid" emails.
if "email" in df.columns:
df = clean_email_column(df)
# does similar to clean_email_column
if "phone" in df.columns:
df = clean_phone_column(df)
# merges rows with the same email
if "email" in df.columns:
df = merge_rows(df, "email")
if "phone" in df.columns:
df = clean_phone_column(df)
df = cleanup(df)
# Convert header names back so system automatically catches it
df.columns = [i.title().replace("_", " ") for i in df.columns]
df.to_csv("/Users/derrick/Desktop/done.csv", index=False)
def try_creating_first_and_last_name(df):
"""Separate "name" into "first_name" and "last_name".
If "name" it splits it by the first space into "first_name" and "last_name". If "name" not in the df but
"contact" is then it splits it by the first comma into "first_name" and "last_name".
Args:
df::pandas dataframe
The CSV file as a dataframe.
Returns:
df::dataframe
The updated dataframe with the new columns.
"""
# for liondesk
if "name" in df.columns:
if "last_name" in df.columns:
df[["first_name", "last_name"]] = df["name"].str.split(
" ", 1, expand=True
)
elif "last_name" not in df.columns:
df[["first_name", "last_name"]] = df["name"].str.split(
" ", 1, expand=True
)
# for top producer
elif "contact" in df.columns:
df[["last_name", "first_name"]] = df["contact"].str.split(
",", 1, expand=True
)
return df
def match_column_headers(df):
"""Tries to change column headers of df to ones that automatically match when using the RG bulk import tool.
This is trying to match the headers of the column to one of the options in rename_list so they will be
automatically matched by the RG bulk import system when uploaded. The commented out chunk that starts with
`if rename_col not in df.columns and i < 4:` was there originally due to needed certain columns (first_name,
last_name, email, phone) for the merger to work. This [commented out] part tried to guess something close
to these columns but since those aren't needed for the merger anymore this part isn't needed. I left it in
just in case I decided guessing column names may be useful.
Args:
df::pandas dataframe
The CSV file as a dataframe.
Returns:
df::dataframe
The updated dataframe with the new columns.
Raises:
Exception: Shouldn't ever be raised and I haven't seen it raised but it's their just in case.
"""
# for each header column name in rename_list
for i, rename_col in enumerate(rename_list):
# get i nested list in match_list (the list with all possible column names) and assign it to current_list
current_list = match_list[i]
# for each possible column name in current_list
for try_col in current_list:
# if the rename_col not in df
if rename_col not in df.columns:
try:
# try to find try_col in df and rename it to what rename_col is
df.rename(columns={try_col: rename_col}, inplace=True)
"""
if the rename does not add rename_col to df and i is less than 4 then do the below code
I have i < 4 because the first 4 columns (which were first_name, last_name, email, phone) was
needed for the merger so they go through an additional matching attempt by basically trying to
find something close to the column names.
"""
"""
# create empty list of tried column header list from current_list. **If using this section of code
# add tried_colname before `for try_col`
# tried_colname = []
# add try_col to tried_col list so we don't try it again
tried_colname.append(try_col)
if rename_col not in df.columns and i < 4:
# if the number of items we tried equals the number of items in the list
if len(tried_colname) == len(current_list):
try:
# try to find the first column in df that is similar to rename_col
# and rename it to whatever rename_col is
df = df.rename(columns={df.filter(like=rename_col).columns[0]: rename_col})
# print('Filter match', rename_col)
continue
# if try didn't work then throw exception since those 4 columns are needed for merger
except Exception as e:
# print(f"Unable to match a col the same as or close to {rename_col}. - Exception: {e}")
break
else:
continue
# this breaks so it doesn't continue trying to check when it has already been match
"""
if rename_col in df.columns:
# print(f"Matched {rename_col} with {try_col}.")
break
else:
# print(f"Unable to match {rename_col} with {try_col}.")
continue
except Exception as e:
print(f"How did you get here!? - Exception: {e}")
break
return df
def move_long_names(df, type_of_name):
"""Separate "name" into "first_name" and "last_name".
If "name" it splits it by the first space into "first_name" and "last_name". If "name" not in the df but
"contact" is then it splits it by the first comma into "first_name" and "last_name".
Args:
df::pandas dataframe
The CSV file as a dataframe.
type_of_name::str
The column name you want to check if it is too long and if so put it
into another column labeled "long_{type_of_name}".
Returns:
df::dataframe
The updated dataframe with the new columns.
"""
# moves names over 256 characters to new column
df[f"long_{type_of_name}"] = df[df[type_of_name].str.len() > 256][type_of_name]
# only keeps values in the first_name column if it is less than or equal to 256 characters
df[type_of_name] = df[df[type_of_name].str.len() <= 256][type_of_name]
return df
def try_creating_address(df):
"""Joins separate columns into an "address" column.
If "address" not in the dataframe then it attempts to join other columns together to create an address column.
Args:
df::pandas dataframe
The CSV file as a dataframe.
Returns:
df::dataframe
The updated dataframe with the new columns.
"""
# Got rid of astype(str) before .fillna('') and it resolved the random nan showing up in the address field
# if all of these things are columns in the df then combine them under the column name 'address'
if {
"house_number",
"dir_prefix",
"street",
"street_type",
"dir_suffix",
"suite",
"po_box",
}.issubset(df.columns):
df["address"] = (
df["house_number"].fillna("")
+ " "
+ df["dir_prefix"].fillna("")
+ " "
+ df["street"].fillna("")
+ " "
+ df["street_type"].fillna("")
+ " "
+ df["dir_suffix"].fillna("")
+ " "
+ df["suite"].fillna("")
+ " "
+ df["po_box"].fillna("")
)
elif {
"house_number",
"direction_prefix",
"street",
"street_designator",
"suite_no",
}.issubset(df.columns):
df["address"] = (
df["house_number"].fillna("")
+ " "
+ df["direction_prefix"].fillna("")
+ " "
+ df["street"].fillna("")
+ " "
+ df["street_designator"].fillna("")
+ " "
+ df["suite_no"].fillna("")
)
return df
def try_creating_assigned_agent(df):
"""Joins separate columns into an "assigned_agent" column.
If "assigned_agent" is not in the dataframe then it attempts to join other
columns together to create an address column.
Args:
df::pandas dataframe
The CSV file as a dataframe.
Returns:
df::dataframe
The updated dataframe with the new columns.
"""
# if these columns in the df then combine them under the column name 'assigned_agent'
if {"member_first_name", "member_last_name"}.issubset(df.columns):
df["assigned_agent"] = (
df["member_first_name"].fillna("") + " " + df["member_last_name"].fillna("")
)
else:
pass
return df
def try_creating_second_contact_name(df):
"""Joins separate columns into a "second_contact_name" column.
If "second_contact_name" is not in the dataframe then it attempts to join other
columns together to create an second_contact_name column.
Args:
df::pandas dataframe
The CSV file as a dataframe.
Returns:
df::dataframe
The updated dataframe with the new columns.
"""
# if all of these things are columns in the df then combine them under the column name 'second_contact_name'
if {
"secondary_title",
"secondary_first_name",
"secondary_nickname",
"secondary_last_name",
}.issubset(df.columns):
df["second_contact_name"] = (
df["secondary_title"].fillna("")
+ " "
+ df["secondary_first_name"].fillna("")
+ " "
+ df["secondary_nickname"].fillna("")
+ df["secondary_last_name"].fillna("")
)
elif {"first_name_2", "last_name_2"}.issubset(df.columns):
df["second_contact_name"] = (
df["first_name_2"].fillna("") + " " + df["last_name_2"].fillna("")
)
else:
pass
return df
def clean_email_column(df):
"""Cleans email column so there is only 1 valid email per row.
1. Makes the contents of the "email" column lowercase.
2. If rows in the "email" column contain a comma it splits it at the first column and moves everything
after the first column to the "second_contact_email".
3. Moves 'invalid' emails to the "second_contact_email" column.
Args:
df::pandas dataframe
The CSV file as a dataframe.
Returns:
df::dataframe
The updated dataframe with the new columns.
"""
# makes contents of email column lower case so it doesn't think example@Yahoo.com
# and example@yahoo.com are different emails
df["email"] = df.email.astype(str).str.lower()
if (df["email"].str.contains(",")).any():
df["secondary_email"] = None
df.columns = df.columns.fillna("secondary_email")
# splits email list so that everything before comma is put in email and the rest into secondary_email
df["email"], df["secondary_email"] = df["email"].str.split(",", 1).str
if (
"second_contact_email" in df.columns
): # this handles in case they do have the 'second_contact_email' column
# Merges secondary_email into the 'second_contact_email' column
df["second_contact_email"] = (
df["second_contact_email"].fillna("")
+ ", "
+ df["secondary_email"].fillna("")
)
# this is to just clean up column (i.e. remove leadning what space and random extra commas from merge)
df["second_contact_email"] = df["second_contact_email"].replace(
to_replace=r"((, )$|[,]$)|(^\s)", value="", regex=True
)
df = df.iloc[
:, :-1
] # drops last column so there isn't double 'second_contact_email'
df.rename(columns={"secondary_email": "second_contact_email"}, inplace=True)
else:
pass
# if there is a bad email then do stuff. its here to help with speed (not an issue but who knows)
# and to stop adding a second_contact_email when its not needed
if ~df.email.str.contains(r"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-]+$").all():
if "second_contact_email" in df.columns:
# validate email and move bad ones
df["temp_second_contact_email"] = df[
~df["email"].str.contains(
pat=r"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-]+$",
case=False,
na=False,
)
]["email"]
df["email"] = df[
df["email"].str.contains(
pat=r"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-]+$",
case=False,
na=False,
)
]["email"]
# merges columns so original second_contact_email doesn't get replaced by temp_second_contact_email
df["second_contact_email"] = (
df["second_contact_email"].fillna("")
+ ", "
+ df["temp_second_contact_email"].fillna("")
)
del df["temp_second_contact_email"]
# this is to just clean up column (i.e. remove leadning what space and random extra commas from merge)
df["second_contact_email"] = df["second_contact_email"].replace(
to_replace=r"((, )$|[,]$)|(^\s)", value="", regex=True
)
# definitely not needed but one case bothered me so I added it
df["second_contact_email"] = df["second_contact_email"].replace(
to_replace=r"( )", value=" ", regex=True
)
else:
if "second_contact_email" not in df.columns:
df["second_contact_email"] = ""
# validate email and move bad ones
df["temp_second_contact_email"] = df[
~df["email"].str.contains(
pat=r"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-]+$",
case=False,
na=False,
)
]["email"]
df["email"] = df[
df["email"].str.contains(
pat=r"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-]+$",
case=False,
na=False,
)
]["email"]
# merges columns so original second_contact_email doesn't get replaced by temp_second_contact_email
df["second_contact_email"] = (
df["second_contact_email"].fillna("")
+ ", "
+ df["temp_second_contact_email"].fillna("")
)
del df["temp_second_contact_email"]
# this is to just clean up column (i.e. remove leadning what space and random extra commas from merge)
df["second_contact_email"] = df["second_contact_email"].replace(
to_replace=r"((, )$|[,]$)|(^\s)", value="", regex=True
)
# definitely not needed but one case bothered me so I added it
df["second_contact_email"] = df["second_contact_email"].replace(
to_replace=r"( )", value=" ", regex=True
)
return df
def clean_phone_column(df):
"""Cleans email column so there is only 1 valid email per row.
1. If rows in the "phone" column contain a comma it splits it at the first column and moves everything
after the first column to the "second_contact_phone".
2. Gets rid of everything but numbers in the phone column.
3. Moves 'invalid' phone numbers to the "second_contact_phone" column.
Args:
df::pandas dataframe
The CSV file as a dataframe.
Returns:
df::dataframe
The updated dataframe with the new columns.
"""
if df.phone.astype(str).str.contains(",").any():
if "second_contact_phone" in df.columns:
# split phone numbers by comma and add to second_contact_phone
df["phone"], df["temp_phone"] = df["phone"].str.split(",", 1).str
df["second_contact_phone"] = (
df["second_contact_phone"].astype(str).fillna("")
+ ", "
+ df["temp_phone"].astype(str).fillna("")
)
del df["temp_phone"]
if "second_contact_phone" not in df.columns:
df["second_contact_phone"] = ""
df["phone"], df["temp_phone"] = df["phone"].str.split(",", 1).str
df["second_contact_phone"] = (
df["second_contact_phone"].astype(str).fillna("")
+ ", "
+ df["temp_phone"].astype(str).fillna("")
)
del df["temp_phone"]
# only keep numbers in phone column
df["phone"] = df["phone"].replace(to_replace=r"[^0-9]+", value="", regex=True)
# if there is a bad phone then do stuff and hopefully stop adding a second_contact_phone when its not needed
if df.phone.astype(str).str.contains("^(?:(?!^.{,7}$|^.{16,}$).)*$").any():
if "second_contact_phone" in df.columns:
# moves phone numbers less than 8 and greater than 15 digits then removes them from phone
df["temp_second_contact_phone"] = df[
~df["phone"]
.astype(str)
.str.contains(pat=r"^(?:(?!^.{,7}$|^.{16,}$).)*$", case=False, na=False)
]["phone"]
df["phone"] = df[
df["phone"].astype(str).str.contains(pat=r"^(?:(?!^.{,7}$|^.{16,}$).)*$", case=False, na=False)
]["phone"]
# merges columns so original second_contact_email doesn't get replaced by temp_second_contact_email
df["second_contact_phone"] = (
df["second_contact_phone"].astype(str).fillna("")
+ ", "
+ df["temp_second_contact_phone"].astype(str).fillna("")
)
del df["temp_second_contact_phone"]
# this is to just clean up column (i.e. remove leadning what space,
# random extra commas from merge, and random .0)
df["second_contact_phone"] = df["second_contact_phone"].replace(
to_replace=r"((, )$|[,]$|(^\s)|(\.0))", value="", regex=True
)
# definitely not needed but one case bothered me so I added it
df["second_contact_phone"] = df["second_contact_phone"].replace(
to_replace=r"( )", value=" ", regex=True
)
else:
if "second_contact_phone" not in df.columns:
df["second_contact_phone"] = ""
# moves phone numbers less than 8 and greater than 15 digits then removes them from phone
df["temp_second_contact_phone"] = df[
~df["phone"]
.astype(str)
.str.contains(
pat=r"^(?:(?!^.{,7}$|^.{16,}$).)*$", case=False, na=False
)
]["phone"]
df["phone"] = df[
df["phone"]
.astype(str)
.str.contains(
pat=r"^(?:(?!^.{,7}$|^.{16,}$).)*$", case=False, na=False
)
]["phone"]
# merges columns so original second_contact_email doesn't get replaced by temp_second_contact_email
df["second_contact_phone"] = (
df["second_contact_phone"].astype(str).fillna("")
+ ", "
+ df["temp_second_contact_phone"].astype(str).fillna("")
)
del df["temp_second_contact_phone"]
# this is to just clean up column (i.e. remove leadning what space, random extra
# commas from merge, and random .0)
df["second_contact_phone"] = df["second_contact_phone"].replace(
to_replace=r"((, )$|[,]$|(^\s)|(\.0))", value="", regex=True
)
# definitely not needed but one case bothered me so I added it
df["second_contact_phone"] = df["second_contact_phone"].replace(
to_replace=r"( )", value=" ", regex=True
)
return df
def merge_rows(df, merge_on):
"""Merge rows with the same value in the column passed to "merge_on".
1. Creates a temporary dataframe without "first_name" and "last_name" so their aren't instances of Joe, Joe
in the first_name column. Also, removing email since we do not want to combine emails since they are
the same so it will just keep the first instance of the email.
2. Marks the first instance of rows that have duplicate values as True in a new column named "first_dupe"
3. Applies the combine_rows function to the dataframe.
4. Drops the dupe rows but keep first instance since everything should have been merged into
the first instance but ignores cells that are empty because before it would just delete all
rows with an empty email cell but the first one.....
4. Deletes the "first_dupe" column since it is not needed anymore.
Sending two things only works if the other column doesn't have an email if it does this the email
just vanishes so changed it so it only merges email again but made it easier to change what it changes
on in case someone wants to perhaps has it set to email run this script then change it to something like
contact_id and run it again.
Args:
df::pandas dataframe
The CSV file as a dataframe.
merge_on::str
The column you want to merge rows on if more than one row has the same value.
Returns:
df::dataframe
The updated dataframe with the new columns.
Asked for help and this guy helped. There is also a two liner way to do this but doesn't work as well
due to not trying to merge first_name and last_name:
https://github.com/khalido/notebooks/blob/master/pandas-dealing-with-dupes.ipynb
"""
sssss = time.time()
new_df = df[df.columns.difference(["first_name", "last_name", "email"])]
df["first_dupe"] = df.duplicated(merge_on, keep=False) & ~df.duplicated(
merge_on, keep="first"
)
def combine_rows(row, key=merge_on, cols_to_combine=new_df):
"""Merge rows with the same value in the column passed to "merge_on".
1. Looks to see if there are any rows in "first_dupe" that are True.
2. Skips the first row since that is what we want to merge the other dupe rows into.
3. Merges the contents of the dupe row(s) into the the first instance if the contents it is merging
is not already in the cell.
4. Makes "first_dupe" False so it doesn't try to merge it again.
Args:
row::dataframe row
The rows of the dataframe.
key::str
The column you want to merge rows on if more than one row has the same value.
cols_to_combine::pandas dataframe
The columns you want to merge.
Returns:
df::dataframe
The updated dataframe with the new columns.
"""
if row["first_dupe"] is True:
# making a df of dupes item
dupes = df[df[key] == row[key]]
# skipping the first row, since that's our first_dupe
for i, dupe_row in dupes.iloc[1:].iterrows():
for col in cols_to_combine:
dupe_row[col] = str(dupe_row[col])
row[col] = str(row[col])
# so fields don't have multiple of the same thing because of the merge
# e.g. buyer,buyer because 2 merged rows have the type buyer,
# now it just puts buyer there once
if row[col].lower() not in dupe_row[col].lower():
row[col] += ", " + dupe_row[col]
# make sure first_dupe doesn't get processed again
row.first_dupe = False
return row
df = df.apply(combine_rows, axis=1)
df = df[
df[merge_on].isnull()
| ~df[df[merge_on].notnull()].duplicated(subset=merge_on, keep="first")
]
del df["first_dupe"]
fffff = time.time() - sssss
print(f"Merger took {fffff} seconds.")
return df
def cleanup(df):
"""Cleans up the dataframe a bit.
This cleans up the dataframe a bit. It is not totally necessary but makes it look better by removing
random 'nan' and commas that may pop up due to merging things.
Args:
df::pandas dataframe
The CSV file as a dataframe.
Returns:
df::dataframe
The updated dataframe with the new columns.
"""
# gets rid of random nan that pops up sometimes
df = df.replace(to_replace=r"(?:^|\W)nan(?:$|\W)", value="", regex=True)
# these three just cleans up the file and gets rid of random commas.
# Not really necessary and aren't perfect but you know makes the file less ugly
df = df.replace(to_replace=r"^(, )|^(,)", value="", regex=True)
df = df.replace(to_replace=r"(, , )", value=", ", regex=True)
df = df.replace(
to_replace=r"[,]{1}$", value="", regex=True
) # removes trailing commas
return df
if __name__ == "__main__":
# will only be executed when this module is run directly might be useful.
start = time.time()
# point to file location.
file = "/Users/derrick/Documents/Random Stuff/WhyMeCSV/test-csvs/1.csv"
main(file)
finish = time.time() - start
print(f"CSV has been printed in {finish} seconds.")
|
# -*- encoding: utf-8 -*-
#
# Copyright © 2013 eNovance <licensing@enovance.com>
#
# Authors: Mehdi Abaakouk <mehdi.abaakouk@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from ceilometer.openstack.common import context
from ceilometer.openstack.common.rpc import proxy as rpc_proxy
from ceilometer.storage.models import Alarm
OPTS = [
cfg.StrOpt('notifier_rpc_topic',
default='alarm_notifier',
help='the topic ceilometer uses for alarm notifier messages'),
]
cfg.CONF.register_opts(OPTS, group='alarm')
class RPCAlarmNotifier(rpc_proxy.RpcProxy):
def __init__(self):
super(RPCAlarmNotifier, self).__init__(
default_version='1.0',
topic=cfg.CONF.alarm.notifier_rpc_topic)
def notify(self, alarm, previous, reason):
actions = getattr(alarm, Alarm.ALARM_ACTIONS_MAP[alarm.state])
msg = self.make_msg('notify_alarm', data={
'actions': actions,
'alarm_id': alarm.alarm_id,
'previous': previous,
'current': alarm.state,
'reason': reason})
self.cast(context.get_admin_context(), msg)
|
from django.http.response import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
from django import forms
from django.utils.regex_helper import Choice
from . import util
def index(request):
return render(request, "encyclopedia/index.html", {
"entries": util.list_entries(),
"Status": "index"
})
def title(request, TITLE):
status = "title"
entry = util.get_entry(TITLE)
if entry == None:
entry = "Entry not found!"
status = "error"
import markdown2
entry = markdown2.markdown(entry)
return render(request, "encyclopedia/index.html",{
"Title": TITLE,
"Entry": entry,
"Status": status
})
def search(request):
form = request.GET
title = form["q"]
if title != "":
ls = util.list_entries()
array = []
for name in ls:
if title.lower() in name.lower():
array.append(name)
if title.lower() == name.lower():
return HttpResponseRedirect(reverse("title",kwargs={'TITLE': title}))
if len(array) == 0:
return HttpResponseRedirect(reverse("title",kwargs={'TITLE': title}))
return render(request, "encyclopedia/index.html",{
"Title": "Search Results",
"entries": array,
"Status": "search"
})
else:
return HttpResponseRedirect(reverse("index"))
class NewEntryForm(forms.Form):
Title = forms.CharField(label="Title ")
Entry = forms.CharField(label="Entry", widget=forms.Textarea)
class EditEntryForm(forms.Form):
Entry = forms.CharField(label="Entry", widget=forms.Textarea)
def new(request):
if request.method == "GET":
return render(request, "encyclopedia/new.html",{
"form": NewEntryForm()
})
elif request.method == "POST":
form = NewEntryForm(request.POST)
if form.is_valid():
Title = form.cleaned_data["Title"]
Entry = form.cleaned_data["Entry"]
ls = util.list_entries()
for name in ls:
if name.lower() == Title.lower():
return render(request, "encyclopedia/index.html",{
"Title": "Already Exists",
"Entry": Title + " already exists!",
"Status": "error"
})
util.save_entry(title=Title, content=Entry)
return HttpResponseRedirect(reverse("title",kwargs={'TITLE': Title}))
else:
return render(request, "encyclopedia/new.html",{
"form": NewEntryForm()
})
def edit(request, TITLE):
if request.method == "GET":
initial={"Entry":util.get_entry(TITLE)}
form = EditEntryForm(initial=initial)
if initial["Entry"] == None:
return render(request, "encyclopedia/index.html",{
"Title": "Does not Exist",
"Entry": TITLE + " does not exist!",
"Status": "error"
})
return render(request, "encyclopedia/edit.html",{
"Title": TITLE,
"form": form
})
elif request.method == "POST":
form = EditEntryForm(request.POST)
if form.is_valid():
Entry = form.cleaned_data["Entry"]
ls = util.list_entries()
for name in ls:
if name.lower() == TITLE.lower():
util.save_entry(title=TITLE, content=Entry)
return HttpResponseRedirect(reverse("title",kwargs={'TITLE': TITLE}))
return render(request, "encyclopedia/index.html",{
"Title": "Does not Exist",
"Entry": TITLE + " does not exist!",
"Status": "error"
})
else:
return HttpResponseRedirect(reverse("edit",kwargs={'TITLE': TITLE}))
def random(request):
ls = util.list_entries()
import random
return HttpResponseRedirect(reverse("title",kwargs={'TITLE': random.choice(ls)}))
|
# from django.contrib.admin.views.decorators import staff_member_required
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.http import Http404
from django.http.request import split_domain_port
from django.shortcuts import render
from django.utils.translation import gettext_lazy as _
from cms.contexts.models import WebSite
# @staff_member_required
@login_required
def board_base(request):
if not request.user.is_staff:
raise PermissionDenied
# if MAIN_WEBSITE set in settings
# then access to /editorial-board path only from main domain
# else access from any website
if hasattr(settings, 'MAIN_WEBSITE'):
current_site = split_domain_port(request.get_host())[0]
main_pk = getattr(settings, 'MAIN_WEBSITE')
main_website = WebSite.objects.filter(pk=main_pk).first()
main_domain = main_website.domain
if main_domain != current_site:
raise Http404(_("Access this path from main domain {}".format(main_domain)))
template = "board.html"
return render(request, template)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""\
This script creates a triplet sparse matrix
"""
import diypy3
d = diypy3.Diypy3()
d.triplet_sparse_matrix((1, 1, 'hello'),
(2, 1, 'how'),
(1, 2, 'are'),
(2, 2, 'you'))
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Download and build the data if it does not exist.
import json
import os
from parlai.core import build_data
from parlai.utils.io import PathManager
RESOURCES = [
build_data.DownloadableFile(
'http://parl.ai/downloads/blended_skill_talk/blended_skill_talk.tar.gz',
'blended_skill_talk.tar.gz',
'5fbed0068ee89e2d43b93c3ecb341e784617033efa5e8e911a219d4eda6134a6',
),
build_data.DownloadableFile(
'http://parl.ai/downloads/blended_skill_talk/personas_list.txt',
'persona_list.txt',
'59a51adedc78e806a380f16477de3740cefe3494d20f8a2a733841bedaaa3ee5',
zipped=False,
),
build_data.DownloadableFile(
'http://parl.ai/downloads/blended_skill_talk/topic_to_persona_list.txt',
'topic_to_persona_list.txt',
'47cdb6cbee0516ca7400be35fa07761339b86c6c026425bf5dba00e5534e8182',
zipped=False,
),
build_data.DownloadableFile(
'http://parl.ai/downloads/blended_skill_talk/ed_persona_topicifier__train__both_sides.json',
'ed_persona_topicifier__train__both_sides.json',
'ff2ea7c5fcb0449890d57a629cc3e8794ab95ac6db1057bf58d540c2b576e4cc',
zipped=False,
),
build_data.DownloadableFile(
'http://parl.ai/downloads/blended_skill_talk/ed_persona_topicifier__train__experiencer_only.json',
'ed_persona_topicifier__train__experiencer_only.json',
'751f0ba2f421a11eee2bfc896d60ab70d669093c3a5f6cb30e8d202133a90ec7',
zipped=False,
),
build_data.DownloadableFile(
'http://parl.ai/downloads/blended_skill_talk/ed_persona_topicifier__valid__experiencer_only.json',
'ed_persona_topicifier__valid__experiencer_only.json',
'15d5412f5990a8a9c892305009d8597a737322aafe878b03ec71143703b25ba0',
zipped=False,
),
build_data.DownloadableFile(
'http://parl.ai/downloads/blended_skill_talk/ed_persona_topicifier__test__experiencer_only.json',
'ed_persona_topicifier__test__experiencer_only.json',
'2604e977787be0b5edc54561f7ce8a54c40758d235a3fee262fe20fe36b8cd15',
zipped=False,
),
build_data.DownloadableFile(
'http://parl.ai/downloads/blended_skill_talk/safe_personas_2.txt',
'safe_personas.txt',
'2ee292aa0006ea002e9b23d4f7326fe9e17514ce5793d31fd8d679035d4366a7',
zipped=False,
),
]
def build(opt):
dpath = os.path.join(opt['datapath'], 'blended_skill_talk')
version = 'v1.4'
if not build_data.built(dpath, version_string=version):
print('[building data: ' + dpath + ']')
if build_data.built(dpath):
# An older version exists, so remove these outdated files
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data
for downloadable_file in RESOURCES:
downloadable_file.download_file(dpath)
# Format it for use with ParlAIDialogTeacher
_create_parlai_format(dpath)
# Mark the data as built
build_data.mark_done(dpath, version_string=version)
def _create_parlai_format(dpath: str):
"""
Copy data into the format read by ParlAIDialogTeacher.
'text' will be from the free Turker, who speaks first, and 'label' will be from the
guided Turker.
"""
datatypes = ['train', 'valid', 'test']
for datatype in datatypes:
load_path = os.path.join(dpath, f'{datatype}.json')
save_path = os.path.join(dpath, f'{datatype}.txt')
print(f'Loading {load_path}.')
with PathManager.open(load_path, 'r', encoding='utf8') as f_read:
data = json.load(f_read)
print(f'Saving to {save_path}')
with PathManager.open(save_path, 'w', encoding='utf8') as f_write:
for episode in data:
assert (
len(episode['dialog'])
== len(episode['suggestions'])
== len(episode['chosen_suggestions'])
)
num_entries = len(episode['dialog']) // 2
for entry_idx in range(num_entries):
line = _get_line(
episode=episode, num_entries=num_entries, entry_idx=entry_idx
)
f_write.write(f'{line} \n')
def _get_line(episode: dict, num_entries: int, entry_idx: int) -> str:
"""
Return the line to print in the reformatted file.
"""
episode_done = entry_idx == num_entries - 1
# Compile original context
if entry_idx == 0:
# Add those pieces of context that appear in the datasets that this one was
# based on. Specifically:
# - Your persona, but not your partner's persona (from ConvAI2)
# - Topic (from Wizard of Wikipedia)
# - **Not** the situation (from EmpatheticDialogues)
persona_pieces = [
f"your persona: {episode['personas'][1][0]}",
f"your persona: {episode['personas'][1][1]}",
]
if episode['context_dataset'] == 'wizard_of_wikipedia':
additional_context_pieces = [episode['additional_context']]
else:
additional_context_pieces = []
previous_utterance_pieces = [
episode['free_turker_utterance'],
episode['guided_turker_utterance'],
]
original_context = (
'\n'.join(
persona_pieces + additional_context_pieces + previous_utterance_pieces
)
+ '\n'
)
else:
original_context = ''
# Gather messages and suggestions
free_message = episode['dialog'][2 * entry_idx][1]
guided_message = episode['dialog'][2 * entry_idx + 1][1]
single_task_suggestions = {
task: episode['suggestions'][2 * entry_idx + 1][task]
for task in ['convai2', 'empathetic_dialogues', 'wizard_of_wikipedia']
}
guided_chosen_suggestion = episode['chosen_suggestions'][2 * entry_idx + 1]
# Compile into text string
parts = {
'text': original_context + free_message,
'labels': guided_message,
'context_dataset': episode['context_dataset'],
'free_message': free_message,
**single_task_suggestions,
'guided_chosen_suggestion': guided_chosen_suggestion,
}
assert all([isinstance(part, str) for part in parts.values()])
line = '\t'.join([f'{key}:{_escape(value)}' for key, value in parts.items()])
# Add episode_done
if episode_done:
line += '\tepisode_done:True'
# Add label_candidates
if 'label_candidates' in episode:
label_candidates = episode['label_candidates'][entry_idx]
# Note that episode['dialog'] is indexed by utterance (from either Turker) and
# episode['label_candidates'] is indexed by guided Turker response
assert all([isinstance(cand, str) for cand in label_candidates])
escaped_label_candidates = [_escape(cand) for cand in label_candidates]
line += '\tlabel_candidates:' + '|'.join(escaped_label_candidates)
return line
def _escape(value: str) -> str:
return value.replace('\t', '\\t').replace('\n', '\\n').replace('|', '__PIPE__')
|
import click
from ghutil.types import Release
@click.command()
@click.option('-f', '--force', is_flag=True, help='Delete without prompting')
@Release.argument('release', implicit=False)
@click.argument('asset')
def cli(release, asset, force):
""" Delete a release asset """
s = release.asset(asset)
if force or click.confirm(f'Delete assert {s} from release {release}?'):
s.delete()
click.echo(f'Asset {s} deleted')
else:
click.echo('Asset not deleted')
|
# Copyright (c) 2018 Turysaz <turysaz@posteo.org>
import pygame
from .ConfigurationService import create_configuration_parser
from .EventAggregator import EventAggregator
from .IoCContainer import IoCContainer
from .MainControl import MainControl
from .MainView import MainView
from .ObjectFactory import ObjectFactory
from .ProjectParser import ProjectParser
from .SoundProvider import SoundProvider
class Bootstrapper():
def __init__(self):
self.ioc = IoCContainer()
self.ioc.register_singleton("conf", create_configuration_parser)
self.ioc.register_singleton("ea", EventAggregator)
self.ioc.register_singleton("ctrl", MainControl, "conf", "ea", "view", "pp")
self.ioc.register_singleton("view", MainView, "conf", "ea")
self.ioc.register_singleton("of", ObjectFactory, "ea", "sp")
self.ioc.register_singleton("pp", ProjectParser, "ea", "of", "sp")
self.ioc.register_singleton("sp", SoundProvider)
def bootstrap(self):
pygame.init()
ctrl = self.ioc.get_instance("ctrl")
ctrl.run_game()
|
# Question 6 Lab 03
# AB Satyaprakash (180123062)
# imports ----------------------------------------------------------------------
from math import factorial
from fractions import Fraction
import numpy as np
import sympy as sp
# ------------------------------------------------------------------------------
# functions --------------------------------------------------------------------
def forwardDiff(fArray):
sz = len(fArray)
fdArray = [fArray]
for i in range(1, sz):
temp = []
for j in range(sz-i):
temp.append(fdArray[i-1][j+1]-fdArray[i-1][j])
fdArray.append(temp)
return fdArray
def newtonFDPoly(fArray, xArray):
x0, x1 = xArray[0], xArray[1]
h = x1-x0
u = np.array([1/h, -x0/h])
fdArray = forwardDiff(fArray)
sz = len(fArray)
px = np.array([0])
for i in range(sz):
term = np.array([1])
for j in range(i):
term = np.polymul(term, np.polyadd(u, np.array([-j])))
term = term/(j+1)
term = term*fdArray[i][0]
px = np.polyadd(px, term)
return px
# ------------------------------------------------------------------------------
# The following data are given for a polynomial P(x) of unknown degree.
# P(0) = 4, P(1) = 9, P(2) = 15, P(3) = 18
# Determine the coefficient of x^3 in P(x), if all fourth-order forward differences are 1.
print('As we can simply observe, if all 4th order forward differences are 1, this means')
print('All 5th order forward differences will be 0, in other words degree of polynmial is 4')
X = [0, 1, 2, 3]
P = [4, 9, 15, 18]
px = newtonFDPoly(P, X)
# Since we also have informatio about the 4th order FD, we can add 1 more term to P(x)
term = np.polymul([1, 0], np.polymul([1, -1], np.polymul([1, -2], [1, -3])))
term = (term*1)/factorial(4)
px = np.polyadd(px, term)
print('The polynmial is thus given as \n{}'.format(np.poly1d(px)))
print('Clearly, the coefficient of x^3 is {} or {}'.format(px[1], '-11/12'))
# Question 6 ends --------------------------------------------------------------
|
from . import file_hdf4, file_hdf5, file_idl, file_netcdf
|
import warnings
import re
import py
import pytest
from _pytest.recwarn import WarningsRecorder
def test_recwarn_functional(testdir):
reprec = testdir.inline_runsource("""
import warnings
oldwarn = warnings.showwarning
def test_method(recwarn):
assert warnings.showwarning != oldwarn
warnings.warn("hello")
warn = recwarn.pop()
assert isinstance(warn.message, UserWarning)
def test_finalized():
assert warnings.showwarning == oldwarn
""")
res = reprec.countoutcomes()
assert tuple(res) == (2, 0, 0), res
class TestWarningsRecorderChecker(object):
def test_recording(self, recwarn):
showwarning = py.std.warnings.showwarning
rec = WarningsRecorder()
with rec:
assert py.std.warnings.showwarning != showwarning
assert not rec.list
py.std.warnings.warn_explicit("hello", UserWarning, "xyz", 13)
assert len(rec.list) == 1
py.std.warnings.warn(DeprecationWarning("hello"))
assert len(rec.list) == 2
warn = rec.pop()
assert str(warn.message) == "hello"
l = rec.list
rec.clear()
assert len(rec.list) == 0
assert l is rec.list
pytest.raises(AssertionError, "rec.pop()")
assert showwarning == py.std.warnings.showwarning
def test_typechecking(self):
from _pytest.recwarn import WarningsChecker
with pytest.raises(TypeError):
WarningsChecker(5)
with pytest.raises(TypeError):
WarningsChecker(('hi', RuntimeWarning))
with pytest.raises(TypeError):
WarningsChecker([DeprecationWarning, RuntimeWarning])
def test_invalid_enter_exit(self):
# wrap this test in WarningsRecorder to ensure warning state gets reset
with WarningsRecorder():
with pytest.raises(RuntimeError):
rec = WarningsRecorder()
rec.__exit__(None, None, None) # can't exit before entering
with pytest.raises(RuntimeError):
rec = WarningsRecorder()
with rec:
with rec:
pass # can't enter twice
class TestDeprecatedCall(object):
"""test pytest.deprecated_call()"""
def dep(self, i, j=None):
if i == 0:
py.std.warnings.warn("is deprecated", DeprecationWarning,
stacklevel=1)
return 42
def dep_explicit(self, i):
if i == 0:
py.std.warnings.warn_explicit("dep_explicit", category=DeprecationWarning,
filename="hello", lineno=3)
def test_deprecated_call_raises(self):
with pytest.raises(AssertionError) as excinfo:
pytest.deprecated_call(self.dep, 3, 5)
assert str(excinfo).find("did not produce") != -1
def test_deprecated_call(self):
pytest.deprecated_call(self.dep, 0, 5)
def test_deprecated_call_ret(self):
ret = pytest.deprecated_call(self.dep, 0)
assert ret == 42
def test_deprecated_call_preserves(self):
onceregistry = py.std.warnings.onceregistry.copy()
filters = py.std.warnings.filters[:]
warn = py.std.warnings.warn
warn_explicit = py.std.warnings.warn_explicit
self.test_deprecated_call_raises()
self.test_deprecated_call()
assert onceregistry == py.std.warnings.onceregistry
assert filters == py.std.warnings.filters
assert warn is py.std.warnings.warn
assert warn_explicit is py.std.warnings.warn_explicit
def test_deprecated_explicit_call_raises(self):
with pytest.raises(AssertionError):
pytest.deprecated_call(self.dep_explicit, 3)
def test_deprecated_explicit_call(self):
pytest.deprecated_call(self.dep_explicit, 0)
pytest.deprecated_call(self.dep_explicit, 0)
def test_deprecated_call_as_context_manager_no_warning(self):
with pytest.raises(pytest.fail.Exception) as ex:
with pytest.deprecated_call():
self.dep(1)
assert str(ex.value).startswith("DID NOT WARN")
def test_deprecated_call_as_context_manager(self):
with pytest.deprecated_call():
self.dep(0)
def test_deprecated_call_pending(self):
def f():
py.std.warnings.warn(PendingDeprecationWarning("hi"))
pytest.deprecated_call(f)
def test_deprecated_call_specificity(self):
other_warnings = [Warning, UserWarning, SyntaxWarning, RuntimeWarning,
FutureWarning, ImportWarning, UnicodeWarning]
for warning in other_warnings:
def f():
py.std.warnings.warn(warning("hi"))
with pytest.raises(AssertionError):
pytest.deprecated_call(f)
def test_deprecated_function_already_called(self, testdir):
"""deprecated_call should be able to catch a call to a deprecated
function even if that function has already been called in the same
module. See #1190.
"""
testdir.makepyfile("""
import warnings
import pytest
def deprecated_function():
warnings.warn("deprecated", DeprecationWarning)
def test_one():
deprecated_function()
def test_two():
pytest.deprecated_call(deprecated_function)
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines('*=== 2 passed in *===')
class TestWarns(object):
def test_strings(self):
# different messages, b/c Python suppresses multiple identical warnings
source1 = "warnings.warn('w1', RuntimeWarning)"
source2 = "warnings.warn('w2', RuntimeWarning)"
source3 = "warnings.warn('w3', RuntimeWarning)"
pytest.warns(RuntimeWarning, source1)
pytest.raises(pytest.fail.Exception,
lambda: pytest.warns(UserWarning, source2))
pytest.warns(RuntimeWarning, source3)
def test_function(self):
pytest.warns(SyntaxWarning,
lambda msg: warnings.warn(msg, SyntaxWarning), "syntax")
def test_warning_tuple(self):
pytest.warns((RuntimeWarning, SyntaxWarning),
lambda: warnings.warn('w1', RuntimeWarning))
pytest.warns((RuntimeWarning, SyntaxWarning),
lambda: warnings.warn('w2', SyntaxWarning))
pytest.raises(pytest.fail.Exception,
lambda: pytest.warns(
(RuntimeWarning, SyntaxWarning),
lambda: warnings.warn('w3', UserWarning)))
def test_as_contextmanager(self):
with pytest.warns(RuntimeWarning):
warnings.warn("runtime", RuntimeWarning)
with pytest.warns(UserWarning):
warnings.warn("user", UserWarning)
with pytest.raises(pytest.fail.Exception) as excinfo:
with pytest.warns(RuntimeWarning):
warnings.warn("user", UserWarning)
excinfo.match(r"DID NOT WARN. No warnings of type \(.+RuntimeWarning.+,\) was emitted. "
r"The list of emitted warnings is: \[UserWarning\('user',\)\].")
with pytest.raises(pytest.fail.Exception) as excinfo:
with pytest.warns(UserWarning):
warnings.warn("runtime", RuntimeWarning)
excinfo.match(r"DID NOT WARN. No warnings of type \(.+UserWarning.+,\) was emitted. "
r"The list of emitted warnings is: \[RuntimeWarning\('runtime',\)\].")
with pytest.raises(pytest.fail.Exception) as excinfo:
with pytest.warns(UserWarning):
pass
excinfo.match(r"DID NOT WARN. No warnings of type \(.+UserWarning.+,\) was emitted. "
r"The list of emitted warnings is: \[\].")
warning_classes = (UserWarning, FutureWarning)
with pytest.raises(pytest.fail.Exception) as excinfo:
with pytest.warns(warning_classes) as warninfo:
warnings.warn("runtime", RuntimeWarning)
warnings.warn("import", ImportWarning)
message_template = ("DID NOT WARN. No warnings of type {0} was emitted. "
"The list of emitted warnings is: {1}.")
excinfo.match(re.escape(message_template.format(warning_classes,
[each.message for each in warninfo])))
def test_record(self):
with pytest.warns(UserWarning) as record:
warnings.warn("user", UserWarning)
assert len(record) == 1
assert str(record[0].message) == "user"
def test_record_only(self):
with pytest.warns(None) as record:
warnings.warn("user", UserWarning)
warnings.warn("runtime", RuntimeWarning)
assert len(record) == 2
assert str(record[0].message) == "user"
assert str(record[1].message) == "runtime"
def test_double_test(self, testdir):
"""If a test is run again, the warning should still be raised"""
testdir.makepyfile('''
import pytest
import warnings
@pytest.mark.parametrize('run', [1, 2])
def test(run):
with pytest.warns(RuntimeWarning):
warnings.warn("runtime", RuntimeWarning)
''')
result = testdir.runpytest()
result.stdout.fnmatch_lines(['*2 passed in*'])
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import os
import subprocess
import fnmatch
import xml.etree.ElementTree as ET
import string
def get_user_plist_filenames():
files = []
for filename in os.listdir(basepath):
if fnmatch.fnmatch(filename, '[!_|!nobody]*.plist'):
files.append(filename)
return files
def get_plist_contents_from(filename):
path = basepath + filename
result = subprocess.run([
u"sudo /usr/bin/defaults read {}".format(path) +
u" ShadowHashData 2>/dev/null|tr -dc 0-9a-f|xxd -r -p|" +
u"plutil -convert xml1 - -o -"
], universal_newlines=True, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return result.stdout
def remove_whitespace(hash_str):
return hash_str.translate({
ord(x): '' for x in set(string.whitespace)
})
def parse_plist(plist_str):
root = ET.fromstring(plist_str)
for child in root.findall(".//data[1]"):
entropy = child.text.replace(" ", "").strip()
for child in root.findall(".//integer[1]"):
iterations = child.text.strip()
for child in root.findall(".//data[2]"):
salt = child.text.strip()
return {
"entropy": entropy,
"iterations": iterations,
"salt": salt
}
def format_hash(hash_components):
hash_str = remove_whitespace(
u"$ml$" +
hash_components["iterations"] +
u"$" +
hash_components["salt"] +
u"$" +
hash_components["entropy"]
)
return hash_str
def make_crypt_format(user, hash_str):
fmtd = "{}:{}".format(user, hash_str)
return fmtd
if __name__ == '__main__':
basepath = '/var/db/dslocal/nodes/Default/users/'
files = get_user_plist_filenames()
for filename in files:
user = filename.split('.')[0]
plist_contents = get_plist_contents_from(filename)
try:
hash_components = parse_plist(plist_contents)
formatted_hash = format_hash(hash_components)
print(make_crypt_format(user, formatted_hash))
print()
except:
print(u"Oops! Something went wrong trying to extract" +
u" {}'s password hash!".format(user))
print()
|
from flask import Flask, request
from bot import Bot
import os
TOKEN = os.environ.get('VK_TOKEN')
confirmation_code = os.environ.get('VK_CONF')
SECRET = os.environ.get('VK_SECRET')
server = Flask(__name__)
bot = Bot(token=TOKEN)
last_msg = None
@server.route('/'+SECRET, methods=['POST'])
def handle():
data = request.get_json(force=True, silent=True)
if not data or 'type' not in data:
return 'not ok'
if data['type'] == 'confirmation':
return confirmation_code
elif data['type'] == 'message_new':
global last_msg
if data != last_msg:
last_msg = data
bot.handle(data)
return 'ok'
return 'ok'
@server.route('/', methods=["GET"])
def index():
return "alive", 200
if __name__ == "__main__":
server.run(host="0.0.0.0", port=int(os.environ.get('PORT', 5000)))
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
import json
import yaml
from st2tests.base import BaseActionTestCase
class VCloudBaseActionTestCase(BaseActionTestCase):
__test__ = False
def setUp(self):
super(VCloudBaseActionTestCase, self).setUp()
self._blank_config = self.load_yaml('cfg_blank.yaml')
self._good_config = self.load_yaml('cfg_good.yaml')
def load_yaml(self, filename):
return yaml.safe_load(self.get_fixture_content(filename))
def load_json(self, filename):
return json.loads(self.get_fixture_content(filename))
@property
def blank_config(self):
return self._blank_config
@property
def good_config(self):
return self._good_config
def test_run_config_blank(self):
self.assertRaises(ValueError, self.action_cls, self.blank_config)
def test_run_config_new(self):
action = self.get_action_instance(self.good_config)
self.assertIsInstance(action, self.action_cls)
|
from datasets.data_utils.Crawlers import BaiduPic
class WebCrawler:
@staticmethod
def goToFind(keys, limit=8000):
baidu_list = BaiduPic.goToFind(keys, limit)
|
# -*- coding: utf-8 -*-
"""
{{cookiecutter.package_name}}.config
{{(cookiecutter.package_name + ".config") | length * '~'}}
Application configurations.
"""
import os
#pylint: disable=too-few-public-methods
class Config(object):
"""Application's base configuration."""
APP_PATH = os.path.abspath(os.path.dirname(__file__))
PROJECT_PATH = os.path.abspath(os.path.join(APP_PATH, os.pardir))
SECRET_KEY = os.environ.get('{{cookiecutter.package_name | upper}}_SECRET', 'secret-key')
BCRYPT_LOG_ROUNDS = 13
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_ECHO = False
class ProdConfig(Config):
"""Production configuration."""
ENV = 'prod'
class DevConfig(Config):
"""Development configuration."""
ENV = 'dev'
DEBUG = True
DB_NAME = 'dev.db'
SQLALCHEMY_DATABASE_URI = 'sqlite:////{0}/{1}'.format(
Config.PROJECT_PATH, DB_NAME)
class TestConfig(Config):
"""Development configuration."""
ENV = 'dev'
TESTING = True
DEBUG = True
BCRYPT_LOG_ROUNDS = 4
|
class InvalidAuthorizationToken(RuntimeError):
pass
class NoTeamsError(RuntimeError):
pass
class MultipleTeamsError(RuntimeError):
def __init__(self, teams):
self.teams = teams
class CommandError(RuntimeError):
pass
class DataEntryError(ValueError):
pass
class ConfigurationError(RuntimeError):
pass
class ObjectNotFound(RuntimeError):
pass |
__all__ = [
'interfaces',
'pipelines',
'utils',
'wfmaker'
'__version__'
]
from .pipelines import Couple_Preproc_Pipeline, TV_Preproc_Pipeline
from .wfmaker import wfmaker
from .version import __version__
|
#!/usr/bin/env python
r"""
>>> Pattern('/{a}').parse('/foo.html')
PatternResult(a='foo.html')
>>> Pattern('/{a}.html').parse('/foo.html')
PatternResult(a='foo')
Each substitution pattern tries to consume as much as possible by default
>>> Pattern('/{a}').parse('/a/b/c')
PatternResult(a='a/b/c')
Override the ``default_match`` class attribute to adjust this behavior
>>> class StrictPathPattern(Pattern):
... default_match = r"[^/]*" # Consume any non-slash character
>>> StrictPathPattern('/{a}').parse('/c/b/a')
Traceback (most recent call last):
...
ValueError: '/c/b/a' does not match pattern '/{a}'
>>> StrictPathPattern('/{a}/{b}/{c}').parse('/c/b/a').kwargs == \
... dict(a="c", b="b", c="a")
True
>>> Pattern('/{0}bar').parse('/foobar')
PatternResult('foo')
Pattern matching works in reverse too.
>>> Pattern('/{root}/{branch}/{leaf}/{0}').replace(
... 'd', root="a", branch="b", leaf=3)
'/a/b/3/d'
>>> def my_callable(a, option='foo', beta=10):
... return (a + '123', beta - 10, option)
>>> Pattern('/{0}/{beta}', beta=int).parse('/alpha/5').apply(my_callable)
('alpha123', -5, 'foo')
Adjacent matches will consume one character each until the last. This behavior
currently follows the behavior of the regular expression (.+?) and is subject
to change in future versions.
>>> my_pattern = Pattern('{0}{1}{a}')
>>> my_pattern.parse('123456')
PatternResult('1', '2', a='3456')
>>> my_pattern.parse('12')
Traceback (most recent call last):
...
ValueError: '12' does not match pattern '{0}{1}{a}'
>>> my_pattern.regex()
'^(?P<_0>.+?)(?P<_1>.+?)(?P<_a>.+?)$'
If necessary, regular expressions for each object can be added to the pattern's
``matches`` attribute.
>>> my_pattern.matches['a'] = r"[0-9]{2}" # {a} matches exactly 2 digits
>>> my_pattern.parse('123456')
PatternResult('1', '234', a='56')
>>> my_pattern.parse('123')
Traceback (most recent call last):
...
ValueError: '123' does not match pattern '{0}{1}{a}'
"""
import re
class PatternResult(object):
def __init__(self, args, kwargs):
self.args = args
self.kwargs = kwargs
def apply(self, function):
return function(*self.args, **self.kwargs)
def __repr__(self):
return 'PatternResult({})'.format(', '.join(
[repr(arg) for arg in self.args] +
['{}={!r}'.format(*item) for item in self.kwargs.items()]))
class Pattern(object):
default_match = r".+?"
prefix = '^'
postfix = '$'
def __init__(self, pattern_string, *arg_xformers, **transformers):
self.pattern_string = pattern_string
self.transformers = transformers
self.transformers.update({i: v for i, v in enumerate(arg_xformers)})
self.matches = dict()
def match(self, matchobject):
return r"(?P<_%s>%s)" % (
matchobject.group(1),
self.matches.get(matchobject.group(1), self.default_match)
)
def regex(self):
return self.prefix + re.sub(
r"(?<!\\\{)\\\{(0|[_a-zA-Z1-9][_a-zA-Z0-9]*)\\\}",
self.match,
re.escape(self.pattern_string)
) + self.postfix
def parse(self, string):
match = re.match(self.regex(), string)
if not match:
raise ValueError("%r does not match pattern %r" % (
string, self.pattern_string))
args, values = dict(), match.groupdict()
for key in list(values):
if key[0] == '_':
try:
k, which = int(key[1:]), args
except ValueError:
k, which = key[1:], values
which[k] = self.transformers.get(k, lambda x: x)(
values.pop(key))
else:
getattr(self, 'handle_%s' % key)(values.pop(key), args, values)
args = [value for key, value in sorted(args.items())]
return PatternResult(args, values)
def replace(self, *args, **values):
return self.pattern_string.format(*args, **values)
class PathPattern(Pattern):
"""Elements do not cross slash ("/") boundaries.
>>> PathPattern('/{0}').parse('/a')
PatternResult('a')
>>> PathPattern('/{0}').parse('/a/b')
Traceback (most recent call last):
...
ValueError: '/a/b' does not match pattern '/{0}'
"""
default_match = r"[^/]+"
class URLPattern(PathPattern):
postfix = r"(?P<_query>\?.*)$"
if __name__ == '__main__':
import doctest
doctest.testmod()
|
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 12 22:46:16 2020
@author: chens
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from pathlib import Path
from geoist.pfm import normgra
from geoist import DATA_PATH
# 1.读取数据
datapath = Path(Path(normgra.__file__).parent, 'data')
filename = Path(datapath, 'ynyx_grav.csv') #'ynyx_grav.csv') #ynp1_grav.csv
gradata = pd.read_csv(filename)
print('1. 重力数据已经读入,格式为:{}'.format(gradata.keys()))
# 2. 计算FGA
gradata['freeair'] = normgra.gamma_closed_form(gradata['lat'], gradata['elev'])
gradata['buglayer'] = normgra.bouguer_plate(gradata['elev'])
gradata['FGA'] = gradata['grav'] - gradata['freeair']
gradata['BGA_s'] = gradata['grav'] - gradata['freeair'] - gradata['buglayer']
# 2.1 正常场计算方法不同
#gradata['BGA_s1'] = gradata['grav'] - normgra.gamma_somigliana_free_air(gradata['lat'], gradata['elev']) - gradata['buglayer']
# 2.2 输出结果
print('2. 重力异常计算完成,已保存到:{}'.format(Path(DATA_PATH, 'ynp1_grav_anomaly.csv')))
# 3. 投影变换
import pyproj
p_jw = "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs"
p_lcc = "+proj=lcc +lon_0=102.5 +lat_0=24.38 +lat_1=45 +ellps=WGS84 +datum=WGS84 +no_defs"
proj_xy = pyproj.Proj(p_lcc) #projection = pyproj.Proj(proj="merc", lat_ts=gradata['lat'].mean())
proj_coords = proj_xy(gradata['lon'].values, gradata['lat'].values)
gradata['x'] = proj_coords[0]
gradata['y'] = proj_coords[1]
proj_jw = pyproj.Proj(p_jw) # 目标坐标系统
origin_lon, origin_lat = pyproj.transform(proj_xy, proj_jw, gradata['x'].values, gradata['y'].values)
fig, (ax0, ax1) = plt.subplots(nrows=1, ncols=2,figsize=(12,6))
ax0.set_title("Locations of gravity anomlay")
ax0.plot(gradata['lon'], gradata['lat'], "ok", markersize=1.5)
ax0.set_xlabel("Longitude")
ax0.set_ylabel("Latitude")
ax0.set_aspect("equal")
ax1.set_title("Projected coordinates of gravity anomlay")
ax1.plot(gradata['x'], gradata['y'], "ok", markersize=1.5)
ax1.set_xlabel("Easting (m)")
ax1.set_ylabel("Northing (m)")
ax1.set_aspect("equal")
plt.tight_layout()
plt.show()
# 4. 网格化
from geoist.gridder import spline, mask, trend
BGA = spline.Spline().fit(proj_coords, gradata['BGA_s'].values)
res_BGA = gradata['BGA_s'].values - BGA.predict(proj_coords)
grid = BGA.grid(spacing=2e2, data_names=["BGAs"])
print('4. 网格化信息如下:',grid)
type(grid)
grid1 = mask.distance_mask(proj_coords, maxdist=5e2, grid=grid)
grid1.BGAs.plot.pcolormesh()
# 5. 去趋势
trend = trend.Trend(degree = 1).fit(proj_coords, gradata['BGA_s'].values)
print('4. 拟合的趋势系数:'.format(trend.coef_))
trend_values = trend.predict(proj_coords)
residuals = gradata['BGA_s'].values - trend_values
gradata['resBGA'] = residuals
gradata.to_csv(Path(DATA_PATH, 'ynp1_grav_anomaly.csv'), index = False, sep = ',')
ori = proj_coords #(origin_lon, origin_lat)
rBGA = spline.Spline().fit(ori, residuals)
grid2 = rBGA.grid(spacing=200, data_names=["resBGA"])
# grid3 = mask.distance_mask(ori, maxdist=5e2, grid=grid2)
plt.figure()
grid2.resBGA.plot.pcolormesh()
# grid3.resBGAs.plot.pcolormesh()
# 6. 数据转换
from geoist.others.utils import grid2Grid,map2DGrid
from geoist.pfm.grdio import grddata
rBGAg2d = grid2Grid(grid2.resBGA.easting, grid2.resBGA.northing, grid2.resBGA.values)
print(rBGAg2d)
g1out = grddata()
g1out.cols = rBGAg2d.getGeoDict().nx
g1out.rows = rBGAg2d.getGeoDict().ny
g1out.xmin = rBGAg2d.getGeoDict().xmin
g1out.xmax = rBGAg2d.getGeoDict().xmax
g1out.ymin = rBGAg2d.getGeoDict().ymin
g1out.ymax = rBGAg2d.getGeoDict().ymax
g1out.data0 = grid2.resBGA.values
g1out.export_surfer(Path(DATA_PATH, 'ynyx_bgar.grd'), False, 'ascii')
g1 = spline.Spline().fit(ori, gradata['FGA'])
gridx = g1.grid(spacing=200, data_names=["vals"])
g1out.data0 = gridx.vals.values
g1out.export_surfer(Path(DATA_PATH, 'ynyx_fga.grd'), False, 'ascii')
g2 = spline.Spline().fit(ori, gradata['BGA_s'])
gridx = g2.grid(spacing=200, data_names=["vals"])
g1out.data0 = gridx.vals.values
g1out.export_surfer(Path(DATA_PATH, 'ynyx_bgas.grd'), False, 'ascii')
g3= spline.Spline().fit(ori, gradata['elev'])
gridx = g3.grid(spacing=200, data_names=["vals"])
g1out.data0 = gridx.vals.values
g1out.export_surfer(Path(DATA_PATH, 'ynyx_elev.grd'), False, 'ascii')
#plt.figure()
#map2DGrid(None,rBGAg2d,'residual BGA', 200,200, isLeft=True)
plt.figure()
plt.imshow(grid2.resBGA.values[::-1])
np.save('d:\grid',grid2.resBGA.values[::-1])
|
#!/usr/bin/python
## "non-linear barotropically unstable shallow water test case"
## example provided by Jeffrey Whitaker
## https://gist.github.com/jswhit/3845307
##
## Running the script should pop up a window with this image:
## http://i.imgur.com/ZlxR1.png
import numpy as np
import shtns
class Spharmt(object):
"""
wrapper class for commonly used spectral transform operations in
atmospheric models. Provides an interface to shtns compatible
with pyspharm (pyspharm.googlecode.com).
"""
def __init__(self,nlons,nlats,ntrunc,rsphere,gridtype='gaussian'):
"""initialize
nlons: number of longitudes
nlats: number of latitudes"""
self._shtns = shtns.sht(ntrunc, ntrunc, 1, \
shtns.sht_orthonormal+shtns.SHT_NO_CS_PHASE)
if gridtype == 'gaussian':
#self._shtns.set_grid(nlats,nlons,shtns.sht_gauss_fly|shtns.SHT_PHI_CONTIGUOUS,1.e-10)
self._shtns.set_grid(nlats,nlons,shtns.sht_quick_init|shtns.SHT_PHI_CONTIGUOUS,1.e-10)
elif gridtype == 'regular':
self._shtns.set_grid(nlats,nlons,shtns.sht_reg_dct|shtns.SHT_PHI_CONTIGUOUS,1.e-10)
self.lats = np.arcsin(self._shtns.cos_theta)
self.lons = (2.*np.pi/nlons)*np.arange(nlons)
self.nlons = nlons
self.nlats = nlats
self.ntrunc = ntrunc
self.nlm = self._shtns.nlm
self.degree = self._shtns.l
self.lap = -self.degree*(self.degree+1.0).astype(np.complex)
self.invlap = np.zeros(self.lap.shape, self.lap.dtype)
self.invlap[1:] = 1./self.lap[1:]
self.rsphere = rsphere
self.lap = self.lap/rsphere**2
self.invlap = self.invlap*rsphere**2
def grdtospec(self,data):
"""compute spectral coefficients from gridded data"""
return self._shtns.analys(data)
def spectogrd(self,dataspec):
"""compute gridded data from spectral coefficients"""
return self._shtns.synth(dataspec)
def getuv(self,vrtspec,divspec):
"""compute wind vector from spectral coeffs of vorticity and divergence"""
return self._shtns.synth((self.invlap/self.rsphere)*vrtspec, (self.invlap/self.rsphere)*divspec)
def getvrtdivspec(self,u,v):
"""compute spectral coeffs of vorticity and divergence from wind vector"""
vrtspec, divspec = self._shtns.analys(u, v)
return self.lap*self.rsphere*vrtspec, self.lap*rsphere*divspec
def getgrad(self,divspec):
"""compute gradient vector from spectral coeffs"""
vrtspec = np.zeros(divspec.shape, dtype=np.complex)
u,v = self._shtns.synth(vrtspec,divspec)
return u/rsphere, v/rsphere
if __name__ == "__main__":
import matplotlib.pyplot as plt
import time
# non-linear barotropically unstable shallow water test case
# of Galewsky et al (2004, Tellus, 56A, 429-440).
# "An initial-value problem for testing numerical models of the global
# shallow-water equations" DOI: 10.1111/j.1600-0870.2004.00071.x
# http://www-vortex.mcs.st-and.ac.uk/~rks/reprints/galewsky_etal_tellus_2004.pdf
# requires matplotlib for plotting.
# grid, time step info
nlons = 256 # number of longitudes
ntrunc = int(nlons/3) # spectral truncation (for alias-free computations)
nlats = int(nlons/2) # for gaussian grid.
dt = 150 # time step in seconds
itmax = 6*int(86400/dt) # integration length in days
# parameters for test
rsphere = 6.37122e6 # earth radius
omega = 7.292e-5 # rotation rate
grav = 9.80616 # gravity
hbar = 10.e3 # resting depth
umax = 80. # jet speed
phi0 = np.pi/7.; phi1 = 0.5*np.pi - phi0; phi2 = 0.25*np.pi
en = np.exp(-4.0/(phi1-phi0)**2)
alpha = 1./3.; beta = 1./15.
hamp = 120. # amplitude of height perturbation to zonal jet
efold = 3.*3600. # efolding timescale at ntrunc for hyperdiffusion
ndiss = 8 # order for hyperdiffusion
# setup up spherical harmonic instance, set lats/lons of grid
x = Spharmt(nlons,nlats,ntrunc,rsphere,gridtype='gaussian')
lons,lats = np.meshgrid(x.lons, x.lats)
f = 2.*omega*np.sin(lats) # coriolis
# zonal jet.
vg = np.zeros((nlats,nlons),np.float)
u1 = (umax/en)*np.exp(1./((x.lats-phi0)*(x.lats-phi1)))
ug = np.zeros((nlats),np.float)
ug = np.where(np.logical_and(x.lats < phi1, x.lats > phi0), u1, ug)
ug.shape = (nlats,1)
ug = ug*np.ones((nlats,nlons),dtype=np.float) # broadcast to shape (nlats,nlonss)
# height perturbation.
hbump = hamp*np.cos(lats)*np.exp(-(lons/alpha)**2)*np.exp(-(phi2-lats)**2/beta)
# initial vorticity, divergence in spectral space
vrtspec, divspec = x.getvrtdivspec(ug,vg)
vrtg = x.spectogrd(vrtspec)
divg = x.spectogrd(divspec)
# create hyperdiffusion factor
hyperdiff_fact = np.exp((-dt/efold)*(x.lap/x.lap[-1])**(ndiss/2))
# solve nonlinear balance eqn to get initial zonal geopotential,
# add localized bump (not balanced).
vrtg = x.spectogrd(vrtspec)
tmpg1 = ug*(vrtg+f); tmpg2 = vg*(vrtg+f)
tmpspec1, tmpspec2 = x.getvrtdivspec(tmpg1,tmpg2)
tmpspec2 = x.grdtospec(0.5*(ug**2+vg**2))
phispec = x.invlap*tmpspec1 - tmpspec2
phig = grav*(hbar + hbump) + x.spectogrd(phispec)
phispec = x.grdtospec(phig)
# initialize spectral tendency arrays
ddivdtspec = np.zeros(vrtspec.shape+(3,), np.complex)
dvrtdtspec = np.zeros(vrtspec.shape+(3,), np.complex)
dphidtspec = np.zeros(vrtspec.shape+(3,), np.complex)
nnew = 0; nnow = 1; nold = 2
# time loop.
time1 = time.clock()
for ncycle in range(itmax+1):
t = ncycle*dt
# get vort,u,v,phi on grid
vrtg = x.spectogrd(vrtspec)
ug,vg = x.getuv(vrtspec,divspec)
phig = x.spectogrd(phispec)
print('t=%6.2f hours: min/max %6.2f, %6.2f' % (t/3600.,vg.min(), vg.max()))
# compute tendencies.
tmpg1 = ug*(vrtg+f); tmpg2 = vg*(vrtg+f)
ddivdtspec[:,nnew], dvrtdtspec[:,nnew] = x.getvrtdivspec(tmpg1,tmpg2)
dvrtdtspec[:,nnew] *= -1
tmpg = x.spectogrd(ddivdtspec[:,nnew])
tmpg1 = ug*phig; tmpg2 = vg*phig
tmpspec, dphidtspec[:,nnew] = x.getvrtdivspec(tmpg1,tmpg2)
dphidtspec[:,nnew] *= -1
tmpspec = x.grdtospec(phig+0.5*(ug**2+vg**2))
ddivdtspec[:,nnew] += -x.lap*tmpspec
# update vort,div,phiv with third-order adams-bashforth.
# forward euler, then 2nd-order adams-bashforth time steps to start.
if ncycle == 0:
dvrtdtspec[:,nnow] = dvrtdtspec[:,nnew]
dvrtdtspec[:,nold] = dvrtdtspec[:,nnew]
ddivdtspec[:,nnow] = ddivdtspec[:,nnew]
ddivdtspec[:,nold] = ddivdtspec[:,nnew]
dphidtspec[:,nnow] = dphidtspec[:,nnew]
dphidtspec[:,nold] = dphidtspec[:,nnew]
elif ncycle == 1:
dvrtdtspec[:,nold] = dvrtdtspec[:,nnew]
ddivdtspec[:,nold] = ddivdtspec[:,nnew]
dphidtspec[:,nold] = dphidtspec[:,nnew]
vrtspec += dt*( \
(23./12.)*dvrtdtspec[:,nnew] - (16./12.)*dvrtdtspec[:,nnow]+ \
(5./12.)*dvrtdtspec[:,nold] )
divspec += dt*( \
(23./12.)*ddivdtspec[:,nnew] - (16./12.)*ddivdtspec[:,nnow]+ \
(5./12.)*ddivdtspec[:,nold] )
phispec += dt*( \
(23./12.)*dphidtspec[:,nnew] - (16./12.)*dphidtspec[:,nnow]+ \
(5./12.)*dphidtspec[:,nold] )
# implicit hyperdiffusion for vort and div.
vrtspec *= hyperdiff_fact
divspec *= hyperdiff_fact
# switch indices, do next time step.
nsav1 = nnew; nsav2 = nnow
nnew = nold; nnow = nsav1; nold = nsav2
time2 = time.clock()
print('CPU time = ',time2-time1)
# make a contour plot of potential vorticity in the Northern Hem.
fig = plt.figure(figsize=(12,4))
# dimensionless PV
pvg = (0.5*hbar*grav/omega)*(vrtg+f)/phig
print('max/min PV',pvg.min(), pvg.max())
lons1d = (180./np.pi)*x.lons-180.; lats1d = (180./np.pi)*x.lats
levs = np.arange(-0.2,1.801,0.1)
cs=plt.contourf(lons1d,lats1d,pvg,levs,cmap=plt.cm.spectral,extend='both')
cb=plt.colorbar(cs,orientation='horizontal') # add colorbar
cb.set_label('potential vorticity')
plt.grid()
plt.xlabel('degrees longitude')
plt.ylabel('degrees latitude')
plt.xticks(np.arange(-180,181,60))
plt.yticks(np.arange(-5,81,20))
plt.axis('equal')
plt.axis('tight')
plt.ylim(0,lats1d[0])
plt.title('PV (T%s with hyperdiffusion, hour %6.2f)' % (ntrunc,t/3600.))
plt.show()
|
import argparse
import editdistance
import numpy as np
import os
import sys
import multiprocessing as mp
import subprocess
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_path', type=str, help='This should point to the folder that contains your demultiplexed R2C2 fasta and subread (ending on _subs.fastq) files for each cell.')
parser.add_argument('-o', '--output_path', type=str, help='Merged fasta and subread files will be written to this folder')
parser.add_argument('-c', '--config_file', type=str, help='Same config file used for C3POa')
parser.add_argument('-m', '--score_matrix', type=str, help='Same matrix file used for C3POa')
parser.add_argument('-t', '--threads', type=str, help='defines the number of threads the multiprocessing will use')
args = parser.parse_args()
path = args.output_path+'/'
temp = path+'/temp'
os.system('mkdir '+temp)
input_path = args.input_path+'/'
config_file = args.config_file
score_matrix = args.score_matrix
threads = int(args.threads)
subsample = 200
def reverse_complement(sequence):
Seq = ''
complement = {'A':'T', 'C':'G', 'G':'C', 'T':'A', 'N':'N', '-':'-'}
for item in sequence[::-1]:
Seq = Seq + complement[item]
return Seq
def configReader(configIn):
'''Parses the config file.'''
progs = {}
for line in open(configIn):
if line.startswith('#') or not line.rstrip().split():
continue
line = line.rstrip().split('\t')
progs[line[0]] = line[1]
# should have minimap, poa, racon, water, consensus
# check for extra programs that shouldn't be there
possible = set(['poa', 'minimap2', 'gonk', 'consensus', 'racon', 'blat','emtrey', 'psl2pslx'])
inConfig = set()
for key in progs.keys():
inConfig.add(key)
if key not in possible:
raise Exception('Check config file')
# check for missing programs
# if missing, default to path
for missing in possible-inConfig:
if missing == 'consensus':
path = 'consensus.py'
else:
path = missing
progs[missing] = path
sys.stderr.write('Using ' + str(missing)
+ ' from your path, not the config file.\n')
return progs
progs = configReader(config_file)
poa = progs['poa']
minimap2 = progs['minimap2']
racon = progs['racon']
consensus = progs['consensus']
def determine_consensus(fasta, fastq, temp_folder, process_count):
'''Aligns and returns the consensus'''
corrected_consensus = ''
pc = process_count
out_F = fasta
fastq_reads = read_fastq_file(fastq)
out_Fq = temp_folder + '/subsampled.' + pc + '.fastq'
out = open(out_Fq,'w')
indexes = np.random.choice(np.arange(0, len(fastq_reads), 1), min(len(fastq_reads), subsample), replace=False)
subsample_fastq_reads = []
for index in indexes:
subsample_fastq_reads.append(fastq_reads[index])
for read in subsample_fastq_reads:
out.write('@' + read[0] + '\n' + read[2] + '\n+\n' + read[3] + '\n')
out.close()
poa_cons = temp_folder + '/consensus.' + pc + '.fasta'
final = temp_folder + '/corrected_consensus.' + pc + '.fasta'
overlap = temp_folder +'/overlaps.' + pc + '.sam'
pairwise = temp_folder + '/prelim_consensus.' + pc + '.fasta'
max_coverage = 0
reads = read_fasta(out_F)
repeats = 0
qual = []
raw = []
before = []
after = []
for read in reads:
best = read
out_cons_file = open(poa_cons, 'w')
out_cons_file.write('>' + best + '\n' + reads[best].replace('-', '') + '\n')
out_cons_file.close()
final = poa_cons
for i in np.arange(1, 2, 1):
if i == 1:
input_cons = poa_cons
output_cons = poa_cons.replace('.fasta', '_' + str(i) + '.fasta')
else:
input_cons = poa_cons.replace('.fasta', '_' + str(i-1) + '.fasta')
output_cons = poa_cons.replace('.fasta', '_' + str(i) + '.fasta')
minimap2_command = '%s -t 1 --secondary=no -ax map-ont \
%s %s >%s 2>./minimap2_messages' \
%(minimap2, input_cons, out_Fq,overlap)
minimap2_process = subprocess.run(minimap2_command, shell=True)
racon_command = '%s -q 5 -t 1 %s %s %s >%s 2>./racon_messages.txt' \
%(racon, out_Fq, overlap, input_cons, output_cons)
racon_process = subprocess.run(racon_command, shell=True)
final = output_cons
reads = read_fasta(final)
if len(reads) == 0:
reads = read_fasta(poa_cons)
for read in reads:
corrected_consensus = reads[read]
return corrected_consensus
def read_fasta(infile):
reads = {}
sequence = ''
for line in open(infile):
if line:
a = line.strip()
if len(a) > 0:
if a[0] == '>':
if sequence != '':
reads[name] = sequence
name = ('-').join(a[1:].split('_')[0].split('-')[:5])
sequence = ''
else:
sequence += a
if sequence != '':
reads[name] = sequence
return reads
def read_subreads(seq_file):
lineNum = 0
lastPlus = False
read_dict = {}
for line in open(seq_file):
line = line.rstrip()
# make an entry as a list and append the header to that list
if lineNum % 4 == 0:
if line[0] == '@':
if lastPlus:
if root_name not in read_dict:
read_dict[root_name] = [] # chrom_reads needs to contain root_names
read_dict[root_name].append((root_name,seq,qual))
name = line[1:]
root_name = ('-').join(name.split('_')[0].split('-')[:5])
if lineNum % 4 == 1:
seq = line
if lineNum % 4 == 2:
lastPlus = True
if lineNum % 4 == 3 and lastPlus:
qual = line
lineNum += 1
read_dict[root_name].append((root_name, seq, qual))
return read_dict
def read_fastq_file(seq_file):
'''
Takes a FASTQ file and returns a list of tuples
In each tuple:
name : str, read ID
seed : int, first occurrence of the splint
seq : str, sequence
qual : str, quality line
average_quals : float, average quality of that line
seq_length : int, length of the sequence
'''
lineNum = 0
lastPlus = False
read_list = []
for line in open(seq_file):
line = line.rstrip()
if not line:
continue
# make an entry as a list and append the header to that list
if lineNum % 4 == 0 and line[0] == '@':
if lastPlus == True:
read_list.append((name, '', seq, qual, average_quals, seq_length))
name = line[1:]
if lineNum % 4 == 1:
seq = line
seq_length = len(seq)
if lineNum % 4 == 2:
lastPlus = True
if lineNum % 4 == 3 and lastPlus:
qual = line
quals = []
for character in qual:
number = ord(character) - 33
quals.append(number)
average_quals = np.average(quals)
lineNum += 1
return read_list
def make_consensus(Molecule, subreads, process_count):
subread_file = temp + '/' + process_count + '.temp_subreads.fastq'
fastaread_file = temp + '/' + process_count + '.temp_consensusreads.fasta'
subs = open(subread_file, 'w')
fasta = open(fastaread_file, 'w')
raw_count = 0
combined_root_name = list(Molecule)[0][1:].split('\n')[0]
for read in Molecule:
fasta.write(read)
root_name = read[1:].split('\n')[0]
if root_name in subreads:
raw = subreads[root_name]
for entry in raw:
if entry[1]:
raw_count += 1
subs.write('@' + combined_root_name + '_' + str(raw_count) + '\n' + entry[1] + '\n+\n' + entry[2] + '\n')
subs.close()
fasta.close()
if len(read_fastq_file(subread_file)) > 0:
corrected_consensus = determine_consensus(fastaread_file, subread_file, temp, process_count)
return '>%s\n%s\n' %(combined_root_name, corrected_consensus)
else:
return list(Molecule)[0].split('\n')[0] + '~N\n' + list(Molecule)[0].split('\n')[1] + '\n'
def group_reads(reads, subreads, final, final_UMI_only, final_subreads, matched_reads, process_count):
UMI_dict = {}
unique_number = 0
for read,sequence in reads.items():
umi = reverse_complement(sequence)[56:66]
if umi[4:10] == 'TTTTTT':
unique_number += 1
umi = str(unique_number)
if umi not in UMI_dict:
UMI_dict[umi] = []
UMI_dict[umi].append('>' + read + '\n' + sequence + '\n')
for umi, Molecule in UMI_dict.items():
if len(Molecule) == 1:
final.write(list(Molecule)[0])
if list(Molecule)[0][1:].split('\n')[0] in subreads:
for subread in subreads[list(Molecule)[0][1:].split('\n')[0]]:
root_name, sequence, qual = subread[0], subread[1], subread[2]
final_subreads.write('@' + root_name + '\n' + sequence + '\n+\n' + qual + '\n')
elif len(Molecule) > 1:
matched_reads.write(umi + '\t' + str(umi.count('TT')) + '\t')
for entry in list(Molecule):
matched_reads.write(entry[1:].split('\n')[0] + '_' + str(len(entry.split('\n')[1])) + ',')
matched_reads.write('\n')
new_read = make_consensus(list(Molecule), subreads, process_count)
if new_read != 'nope':
final.write(new_read)
final_UMI_only.write(new_read)
combined_root_name = new_read[1:].split('\n')[0]
for molecule in list(Molecule):
name = molecule.split('\n')[0][1:]
if name in subreads:
for subread in subreads[name]:
root_name, sequence, qual = subread[0], subread[1], subread[2]
final_subreads.write('@' + combined_root_name + '\n' + sequence + '\n+\n' + qual + '\n')
def processing(fasta_file, subreads_file, process_count):
final = open(path + '/' + process_count + '.merged.fasta', 'w')
final_UMI_only = open(path + '/' + process_count + '.UMI_only.fasta', 'w')
final_subreads = open(path + '/' + process_count + '.merged.subreads.fastq', 'w')
matched_reads = open(path + '/' + process_count + '.matched_reads.txt', 'w')
print('reading reads required for subprocess ' + process_count)
reads = read_fasta(fasta_file)
print(len(reads))
print('reading subreads ' + process_count)
subreads = read_subreads(subreads_file)
print(len(subreads))
print('grouping and merging consensus reads ' + process_count)
group_reads(reads, subreads, final, final_UMI_only, final_subreads, matched_reads, process_count)
def main():
pool = mp.Pool(processes=threads)
print('kmer-matching UMIs')
print('reading consensus reads')
count = 0
big_count = 0
sub_reads = {}
fileList = []
for file in os.listdir(input_path):
if '.fasta' in file and 'cell' in file and 'merged' not in file:
fileList.append(file)
for file in sorted(fileList, key=lambda x: int(x.split('_')[1])):
fasta_file = input_path + '/' + file
sub_reads = input_path + '/' + file.split('.')[0] + '_subs.fastq'
pool.apply_async(processing, [fasta_file, sub_reads, file.split('.')[0]])
count = 0
sub_reads = {}
pool.close()
pool.join()
main()
|
#!/usr/bin/env python2
"""
Client for Project 2
"""
import socket
import sys
__author__ = 'Matthew Wang and Tony Tan'
__copyright__ = "Copyright 2019, Matthew Wang and Tony Tan"
__license__ = "MIT"
__version__ = "1.0"
def arg_check():
"""
Parses and verifies command line arguments.
:return: none
"""
if len(sys.argv) != 4:
usage()
else:
# Verify ip
ip = str(sys.argv[1])
if '.' in ip:
socket.inet_pton(socket.AF_INET, ip)
elif ':' in ip:
socket.inet_pton(socket.AF_INET6, ip)
else:
usage()
# Verify port number
port = int(sys.argv[2])
if port < 0 or port > 65535:
usage()
# return arguments as tuple
return sys.argv[1], sys.argv[2], sys.argv[3]
def usage():
"""
Prints out usage information for the program and then exits.
:return: none
"""
print('Error. Usage:')
print('To start the client and connect to the server, use the following command and format:')
print('./ttweetcli.py <ServerIP> <ServerPort> <Username>')
print('')
print('ServerIP, ServerPort, and Username must be valid.')
exit()
def run_client(server_host, server_port, username):
"""
Runs the client application.
:param server_host: ip address of remote server.
:param server_port: Port number of remote application.
:param username: Username to use for the connection.
:return: none
"""
# Stores the client's received tweets.
messages = []
# Connects to server.
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.connect((server_host, int(server_port)))
# Sends the server the user's username.
client_socket.send("set username " + username)
timeline_requested = False
while True:
server_response = client_socket.recv(1024)
if server_response == '':
print("Error: Server offline. Exiting.")
return
responses = server_response.split('"""')
for response in responses:
if response:
# Server tells client it's ready for a command.
if response == "command":
if timeline_requested:
for message in messages:
print(username + " receive message from " + message)
messages = []
timeline_requested = False
command = raw_input("Command: ")
# Carries out command locally if the command is "timeline".
if command == "timeline":
for message in messages:
print(username + " receive message from " + message)
messages = []
timeline_requested = True
# Sends command to server.
client_socket.send(command)
# Server sends tweet message to client.
elif len(response) > 6 and response[:6] == "tweet ":
messages.append(response[6:])
# Server tells client to close.
elif response == "exit":
print("Goodbye!")
return
# Prints a message from the server.
else:
print(response)
if __name__ == "__main__":
"""
Interprets and responds to the command line arguments.
"""
try:
host, port, username = arg_check()
run_client(host, port, username)
# Ensure that any exceptions lead to a graceful exit with usage information
except Exception:
usage()
|
# -*- coding: utf-8 -*-
"""
Common handlers for ibpy
Created on Thu Mar 19 22:34:20 2015
@author: Jev Kuznetsov
License: BSD
"""
import pandas as pd
from ib.ext.Order import Order
class Logger(object):
""" class for logging and displaying icoming messages """
def __init__(self,tws):
tws.registerAll(self.handler)
def handler(self,msg):
print(msg)
class Account(object):
""" class for holding account data """
def __init__(self,tws):
self.tws = tws
self.tws.register(self._handler, 'UpdateAccountValue')
self.tws.register(self._timeHandler,'UpdateAccountTime')
self.lastUpdate = None # timestamp of the last update
self._data = {} # internal data structure, {key: (value,currency)}
self.dataReady = False # for checking if data has been received
def _handler(self,msg):
""" handles all incoming values """
self._data[msg.key] = (msg.value, msg.currency)
def _timeHandler(self,msg):
""" triggered when a new update comes in """
self.lastUpdate = msg.timeStamp
self.dataReady = True
def data(self):
""" return account data as a pandas DataFrame """
df = pd.DataFrame(self._data).T
df.columns = ['value','currency']
return df
def to_csv(self,fName):
""" save data to csv """
assert self.dataReady , "No data received yet "
self.data().to_csv(fName)
class Portfolio(object):
""" class for keeping track of portfolio data """
def __init__(self,tws):
self.tws = tws
self.tws.register(self._handler, 'UpdatePortfolio')
self._data = {} # internal data dictionary
self._header = ('symbol','position','marketPrice','marketValue','averageCost')
def _handler(self,msg):
conId = msg.contract.m_conId # unique contract identifier
self._data[conId] = [msg.contract.m_symbol] # init with symbol
for h in self._header[1:]: # set the rest
self._data[conId].append(getattr(msg,h))
def data(self):
""" return internal data as a pandas DataFrame """
df = pd.DataFrame(self._data).T # convert from dictionary to a DataFrame, transpose
df.columns = self._header
return df
def to_csv(self,fName):
self.data().to_csv(fName)
class Orders(object):
""" class for working with orders """
def __init__(self,tws):
self.tws = tws
# init variables
self._data = {} # internal orders dictionary orderIds are dict keys
self.nextValidOrderId = None
self.endReceived = False # will be set on OpenOrderEnd event
# register handlers
self.tws.register(self._h_orderStauts, 'OrderStatus')
self.tws.register(self._h_nextValidId, 'NextValidId')
self.tws.register(self._h_openOrder, 'OpenOrder')
self.tws.register(self._h_openOrderEnd, 'OpenOrderEnd')
#----------order placement
def placeOrder(self,contract, shares,limit = None, transmit=0):
'''
create order object
Parameters
-----------
shares: number of shares to buy or sell. Negative for sell order.
limit : price limit, None for MKT order
transmit: transmit immideatelly from tws
Returns:
-----------
orderId : The order Id. You must specify a unique value.
When the order status returns, it will be identified by this tag.
This tag is also used when canceling the order.
'''
action = {-1:'SELL',1:'BUY'}
orderId = self.nextValidOrderId
self.nextValidOrderId += 1 # increment
o = Order()
o.m_orderId = orderId
o.m_action = action[cmp(shares,0)]
o.m_totalQuantity = abs(shares)
o.m_transmit = transmit
if limit is not None:
o.m_orderType = 'LMT'
o.m_lmtPrice = limit
else:
o.m_orderType = 'MKT'
# place order
self.tws.placeOrder(orderId, contract, o) # place order
return orderId
# ----------data retrieval
def data(self):
""" get open order data as a pandas DataFrame """
df = pd.DataFrame(self._data).T
cols = ['symbol','orderId','filled','remaining','lastFillPrice','avgFillPrice']
return df[cols]
#-----------handlers
def _h_orderStauts(self,msg):
""" status handler """
for k,v in list(msg.items()):
self._data[msg.orderId][k] = v
def _h_openOrder(self,msg):
""" openOrder message handler """
self._data[msg.orderId] = {'symbol': msg.contract.m_symbol}
def _h_nextValidId(self,msg):
""" next valid id handler """
self.nextValidOrderId = msg.orderId
def _h_openOrderEnd(self,msg):
""" called at the end of sending orders """
self.endReceived = True
|
#!/usr/bin/python3
import logging.handlers
import sys
import signal
import time
import traceback
import discord_logging
import praw_wrapper
import argparse
from praw_wrapper import PushshiftType
log = discord_logging.init_logging(
backup_count=20
)
import counters
from database import Database
import static
import messages
import comments
import subreddits
import notifications
import utils
import stats
database = None
def signal_handler(signal, frame):
log.info("Handling interrupt")
database.close()
discord_logging.flush_discord()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Reddit UpdateMe bot")
parser.add_argument("user", help="The reddit user account to use")
parser.add_argument("--once", help="Only run the loop once", action='store_const', const=True, default=False)
parser.add_argument("--debug_db", help="Use the debug database", action='store_const', const=True, default=False)
parser.add_argument(
"--no_post", help="Print out reddit actions instead of posting to reddit", action='store_const', const=True,
default=False)
parser.add_argument(
"--no_backup", help="Don't backup the database", action='store_const', const=True, default=False)
parser.add_argument(
"--reset_comment", help="Reset the last comment read timestamp", action='store_const', const=True,
default=False)
parser.add_argument("--debug", help="Set the log level to debug", action='store_const', const=True, default=False)
parser.add_argument(
"--pushshift", help="Select the pushshift client to use", action='store',
choices=["prod", "beta", "auto"], default="prod")
args = parser.parse_args()
if args.pushshift == "prod":
pushshift_client = PushshiftType.PROD
elif args.pushshift == "beta":
pushshift_client = PushshiftType.BETA
elif args.pushshift == "auto":
pushshift_client = PushshiftType.AUTO
else:
log.warning(f"Invalid pushshift client: {args.pushshift}")
sys.exit(1)
counters.init(8000)
counters.errors.labels(type="startup").inc()
if args.debug:
discord_logging.set_level(logging.DEBUG)
discord_logging.init_discord_logging(args.user, logging.WARNING, 1)
static.ACCOUNT_NAME = args.user
reddit_message = praw_wrapper.Reddit(
args.user, args.no_post, "message", static.USER_AGENT, pushshift_client=pushshift_client)
reddit_search = praw_wrapper.Reddit(args.user, args.no_post, "search", static.USER_AGENT)
static.ACCOUNT_NAME = reddit_message.username
database = Database(debug=args.debug_db)
if args.reset_comment:
log.info("Resetting comment processed timestamp")
database.save_datetime("comment_timestamp", utils.datetime_now())
last_backup = None
last_comments = None
while True:
startTime = time.perf_counter()
log.debug("Starting run")
actions = 0
errors = 0
counters.objects.labels(type="subscriptions").set(database.get_count_all_subscriptions())
counters.objects.labels(type="comments").set(database.get_count_all_comments())
counters.objects.labels(type="submissions").set(database.get_count_all_submissions())
counters.objects.labels(type="stats").set(database.get_count_all_stats())
counters.objects.labels(type="users").set(database.get_count_all_users())
counters.objects.labels(type="subreddits").set(database.get_count_all_subreddits())
try:
actions += messages.process_messages(reddit_message, database)
except Exception as err:
utils.process_error(f"Error processing messages", err, traceback.format_exc())
errors += 1
try:
subreddits.scan_subreddits(reddit_search, database)
except Exception as err:
utils.process_error(f"Error scanning subreddits", err, traceback.format_exc())
errors += 1
try:
actions += comments.process_comments(reddit_message, database)
except Exception as err:
utils.process_error(f"Error processing comments", err, traceback.format_exc())
errors += 1
try:
actions += notifications.send_queued_notifications(reddit_message, database)
except Exception as err:
utils.process_error(f"Error sending notifications", err, traceback.format_exc())
errors += 1
try:
subreddits.profile_subreddits(reddit_search, database)
except Exception as err:
utils.process_error(f"Error profiling subreddits", err, traceback.format_exc())
errors += 1
try:
actions += subreddits.recheck_submissions(reddit_search, database)
except Exception as err:
utils.process_error(f"Error rechecking submissions", err, traceback.format_exc())
errors += 1
if utils.time_offset(last_comments, minutes=30):
try:
actions += comments.update_comments(reddit_message, database)
last_comments = utils.datetime_now()
except Exception as err:
utils.process_error(f"Error updating comments", err, traceback.format_exc())
errors += 1
latest_stats = database.get_datetime("stats_day", is_date=True)
current_day = utils.date_now()
if latest_stats is None:
database.save_datetime("stats_day", current_day)
elif latest_stats != current_day:
log.info(f"Saving stats for day {current_day.strftime('%Y-%m-%d')}")
stats.save_stats_for_day(database, latest_stats)
database.save_datetime("stats_day", current_day)
if not args.no_backup and utils.time_offset(last_backup, hours=8):
try:
database.backup()
last_backup = utils.datetime_now()
database.clean()
except Exception as err:
utils.process_error(f"Error backing up database", err, traceback.format_exc())
errors += 1
run_time = time.perf_counter() - startTime
counters.run_time.observe(round(run_time, 2))
log.debug(f"Run complete after: {int(run_time)}")
discord_logging.flush_discord()
database.commit()
if args.once:
break
sleep_time = max(30 - actions, 0) + (30 * errors)
counters.sleep_time.observe(sleep_time)
log.debug(f"Sleeping {sleep_time}")
time.sleep(sleep_time)
|
#
# Copyright (C) [2020] Futurewei Technologies, Inc.
#
# FORCE-RISCV is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import Log
import traceback
def assert_true(expression, msg=""):
if not expression:
err_msg = "%s is false. %s" % (str(expression), msg)
log_failure(err_msg)
def assert_false(expression, msg=""):
if expression:
err_msg = "%s is true. %s" % (str(expression), msg)
log_failure(err_msg)
def assert_equal(left, right, msg=""):
if left != right:
err_msg = "%s != %s. %s" % (str(left), str(right), msg)
log_failure(err_msg)
def assert_less_equal(left, right, msg=""):
if left > right:
err_msg = "%s > %s. %s" % (str(left), str(right), msg)
log_failure(err_msg)
def assert_greater_equal(left, right, msg=""):
if left < right:
err_msg = "%s < %s. %s" % (str(left), str(right), msg)
log_failure(err_msg)
def assert_not_in(elem, container, msg=""):
if elem in container:
err_msg = "%s is in %s. %s" % (str(elem), str(container), msg)
log_failure(err_msg)
def log_failure(err_msg):
stack_frame_str = get_stack_frame_string()
Log.fail("%s\n%s" % (err_msg, stack_frame_str))
def get_stack_frame_string():
stack_frames = traceback.format_list(traceback.extract_stack())
stack_frame_str = "".join(stack_frames[:-1])
return stack_frame_str
|
from bottle import request, redirect
def authenticate(func):
def wrapper(*args, **kwargs):
authenticated = True
session = request.environ.get('beaker.session')
if not session.get('Logged-In') or not session['Logged-In']:
authenticated = False
session['Logged-In'] = False
session.save()
elif not session.get('User-Agent') or session['User-Agent'] != request.headers.get('User-Agent'):
session.delete()
authenticated = False
if not authenticated:
return redirect('/login')
return func(*args, **kwargs)
return wrapper
|
# terrascript/rundeck/__init__.py
import terrascript
class rundeck(terrascript.Provider):
pass
|
import requests
from typing import Dict, List
class LIFX:
'''
docs: https://api.developer.lifx.com
selectors: https://api.developer.lifx.com/docs/selectors
'''
url = 'https://api.lifx.com'
def __init__(self, token):
self.headers = {
'Authorization': f'Bearer {token}',
'Content-Type': 'application/json'
}
def list_lights(self, selector: str = 'all'):
'''
Args:
selector = what lights to list
Returns:
response object
'''
response = requests.get(
url=f'{LIFX.url}/v1/lights/{selector}',
headers=self.headers
)
return response
def set_state(self, color: str, selector: str = 'all', power: str = 'on', brightness: float = 1, duration: float = 0, fast: bool = True):
'''
Args
selector = what lights to change
power = on|off
color = color to change state to
brightness = 0.0 - 1.0
duration = how long until state is full
fast = don't make checks and just change
Returns
response object
'''
response = requests.put(
url=f'{LIFX.url}/v1/lights/{selector}/state',
headers=self.headers,
json={
'power': power,
'color': color,
'brightness': brightness,
'duration': duration,
'fast': fast
}
)
return response
def set_states(self, states: List = [], defaults: Dict = {}, fast: bool = True):
'''
Args:
states = a list of state objects
defaults = default parameters for each state object
fast = don't make checks and just change
Returns:
response object
'''
response = requests.put(
url=f'{LIFX.url}/v1/lights/states',
headers=self.headers,
json={
'states': states,
'defaults': defaults,
'fast': fast
}
)
return response
def pulse_effect(self, color: str, selector: str = 'all', from_color: str = '', period: float = 2, cycles: float = 5, power_on: bool = True):
'''
Args:
color = the color for the effect
from_color = the color to start the effect from
period = time in seconds for one cycle
cycles = number of times to repeat
power_on = turn on the light if not already on
Returns:
response object
'''
response = requests.post(
url=f'{LIFX.url}/v1/lights/{selector}/effects/pulse',
headers=self.headers,
json={
'color': color,
'from_color': from_color,
'period': period,
'cycles': cycles,
'power_on': power_on
}
)
return response
def effects_off(self, selector: str = 'all', power_off: bool = False):
'''
Args:
power_off = also turn the lights off
Returns:
response object
'''
response = requests.post(
url=f'{LIFX.url}/v1/lights/{selector}/effects/off',
headers=self.headers,
json={'power_off': power_off}
)
return response |
from django.apps import AppConfig
class TmmConfig(AppConfig):
name = 'tmm'
|
import sys, requests, fire, json, xml
server = "https://rest.ensembl.org"
exp_message = "\nExport output?\nIf Yes, it will overwrite output.(json/xml)\nY or N\n"
json_search_message = "Do you want to search in the json?\nY or N\n"
def GET_tax_id(id, format):
"""
Search for a taxonomic term by its identifier or name
| id: NCBI taxon id or a name
| format: either JSON or XML
"""
ext = "/taxonomy/id/"
exp = None
url = server + ext + str(id) + "?"
if format == "json":
r = requests.get(url, headers = {"Content-Type" : "application/json"})
data = json.loads(r.text)
print(json.dumps(data, indent = 2))
exp = input(exp_message)
if exp == "Y":
with open("output.json", "w") as json_f:
json.dump(data, json_f, indent=2)
elif format == "xml":
r = requests.get(url, headers = {"Content-Type" : "application/xml"})
print(r.text)
exp = input(exp_message)
if exp == "Y":
with open("output.xml", "w") as xml_f:
xml_f.write(r.text)
def GET_tax_name(name, format):
"""
Search for a taxonomic id by a non-scientific name
| name: a non-scientific species name
| format: either JSON or XML
"""
ext = "/taxonomy/name/"
exp = None
url = server + ext + str(name) + "?"
if format == "json":
r = requests.get(url, headers = {"Content-Type" : "application/json"})
data = json.loads(r.text)
print(json.dumps(data, indent = 2))
exp = input(exp_message)
if exp == "Y":
with open("output.json", "w") as json_f:
json.dump(data, json_f, indent=2)
elif format == "xml":
r = requests.get(url, headers = {"Content-Type" : "application/xml"})
print(r.text)
exp = input(exp_message)
if exp == "Y":
with open("output.xml", "w") as xml_f:
xml_f.write(r.text)
def GET_tax_classification(id, format):
"""
Return the taxonomic classification of a taxon node
| id: a taxon identifier. Can be a NCBI taxon id or a name
| format: either JSON or XML
"""
ext = "/taxonomy/classification/"
exp = None
url = server + ext + str(id) + "?"
if format == "json":
r = requests.get(url, headers = {"Content-Type" : "application/json"})
data = json.loads(r.text)
print(json.dumps(data, indent = 2))
exp = input(exp_message)
if exp == "Y":
with open("output.json", "w") as json_f:
json.dump(data, json_f, indent=2)
elif format == "xml":
r = requests.get(url, headers = {"Content-Type" : "application/xml"})
print(r.text)
exp = input(exp_message)
if exp == "Y":
with open("output.xml", "w") as xml_f:
xml_f.write(r.text)
def GET_onto_id (id, ontology, format):
"""
Search for an ontological term by its identifier (digits)
| id: ontology term identifier
| ontology: ontology (see the README)
| format: either JSON or XML
"""
ext = "/ontology/id/" + str(ontology) + ":"
exp = None
url = server + ext + str(id) + "?"
if format == "json":
r = requests.get(url, headers = {"Content-Type" : "application/json"})
data = json.loads(r.text)
print(json.dumps(data, indent = 2))
exp = input(exp_message)
if exp == "Y":
with open("output.json", "w") as json_f:
json.dump(data, json_f, indent=2)
elif format == "xml":
r = requests.get(url, headers = {"Content-Type" : "application/xml"})
print(r.text)
exp = input(exp_message)
if exp == "Y":
with open("output.xml", "w") as xml_f:
xml_f.write(r.text)
def GET_onto_name (name, format):
"""
Search for a list of ontological terms by their name
| name: an ontology name
| format: either JSON or XML
"""
ext = "/ontology/name/"
exp = None
url = server + ext + str(name) + "?"
if format == "json":
r = requests.get(url, headers = {"Content-Type" : "application/json"})
data = json.loads(r.text)
print(json.dumps(data, indent = 2))
exp = input(exp_message)
if exp == "Y":
with open("output.json", "w") as json_f:
json.dump(data, json_f, indent=2)
elif format == "xml":
r = requests.get(url, headers = {"Content-Type" : "application/xml"})
print(r.text)
exp = input(exp_message)
if exp == "Y":
with open("output.xml", "w") as xml_f:
xml_f.write(r.text)
if __name__ == "__main__":
fire.Fire({
"tax_id" : GET_tax_id,
"tax_name" : GET_tax_name,
"tax_classif" : GET_tax_classification,
"onto_id" : GET_onto_id,
"onto_name" : GET_onto_name
}) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 7 10:31:29 2020
@author: twguest
"""
###############################################################################
import sys
sys.path.append("/opt/WPG/") # LOCAL PATH
sys.path.append("/gpfs/exfel/data/user/guestt/WPG") # DESY MAXWELL PATH
sys.path.append("/opt/spb_model") # LOCAL PATH
sys.path.append("/gpfs/exfel/data/user/guestt/spb_model") # DESY MAXWELL PATH
###############################################################################
###############################################################################
import multiprocessing
from model.beamline.structure import propagation_parameters
from model.beamline.structure import Instrument
from model.src.coherent import construct_SA1_wavefront
from wpg import srwlib
from wpg.srwlib import SRWLOptD as Drift
from felpy.model.core.wavefront import Wavefront
from felpy.model.core.beamline import Beamline
from wpg.wpg_uti_wf import calc_pulse_energy, calculate_fwhm, get_axial_power_density, get_centroid
from wpg.wpg_uti_wf import plot_intensity_map as plotIntensity
from wpg.misc import calcDivergence
from os import listdir
from tqdm import tqdm
if __name__ == '__main__':
wfr = construct_SA1_wavefront(1124, 1423, 4.96, 0.25)
centroid = get_centroid(wfr) |
import asyncio
import aiohttp
import aiozipkin as az
from aiohttp import web
page = """
<html lang="en">
<head>
<title>aiohttp producer consumer demo</title>
</head>
<body>
<h1>Your click event send to consumer</h1>
</body>
</html>
"""
backend_service = 'http://127.0.0.1:9011/consume'
async def index(request):
span = az.request_span(request)
tracer = az.get_tracer(request.app)
session = request.app['session']
with tracer.new_child(span.context) as span_producer:
span_producer.kind(az.PRODUCER)
span_producer.name('produce event click')
span_producer.remote_endpoint('broker', ipv4='127.0.0.1', port=9011)
headers = span_producer.context.make_headers()
message = {
'payload': 'click',
'headers': headers}
resp = await session.post(backend_service, json=message)
resp = await resp.text()
assert resp == 'ok'
await asyncio.sleep(0.01)
return web.Response(text=page, content_type='text/html')
async def make_app(host, port):
app = web.Application()
app.router.add_get('/', index)
session = aiohttp.ClientSession()
app['session'] = session
zipkin_address = 'http://127.0.0.1:9411'
endpoint = az.create_endpoint('frontend', ipv4=host, port=port)
tracer = await az.create(zipkin_address, endpoint, sample_rate=1.0)
az.setup(app, tracer)
return app
if __name__ == '__main__':
host = '127.0.0.1'
port = 9010
loop = asyncio.get_event_loop()
app = loop.run_until_complete(make_app(host, port))
web.run_app(app, host=host, port=port)
|
from adventurelib_with_characters import *
""" adventure specific settings """
"""define the items available and where to find them"""
Room.items = Bag()
axe = Item('an axe', 'axe')
key = Item('a key', 'key')
letter = Item('a letter from Dàin', 'letter')
moonstone = Item('a moonstone', 'moonstone')
runepaper = Item('a runepaper', 'runepaper')
winebottle = Item('a winebottle', 'winebottle', 'bottle')
longbow = Item('a longbow', 'longbow', 'bow')
arrows = Item('some arrows', 'arrows')
ham = Item('a huge, good-looking ham', 'ham')
spoon = Item('a spoon', 'spoon')
ring = Item('a ring with elven writing', 'ring')
beads = Item('some golden beads', 'beads')
gems = Item('some beautiful gemstones', 'gemstones', 'gems')
sword = Item('a very rusty sword', 'rusty sword', 'sword')
"""define characters"""
Room.characters = Group()
Nadihm = Character('Nadihm', 'Nadihm')
Fundor = Character('Fundor', 'Fundor', 'Fundór')
Frain = Character('Frain', 'Frain', 'Fráin')
"""define the rooms available, their descriptions, contained items, people and connections"""
hall = Room("""You are in a hall.""")
hall.characters = Group({Fundor,})
hall.items = Bag({longbow,})
living = hall.north = Room("""You are in room which seems to be used as living room. """)
living.characters = Group({Nadihm,})
living.items = Bag({arrows,})
supplyI = hall.east = Room("""You are in a supply room containing tools and other useful stuff.""")
supplyI.items = Bag({axe,})
dining = living.east = Room("""You are in the dining room. This never seems to have been used, but you see a big bowl with some small treasures standing on a big cupboard at the eastern wall.""")
dining.items = Bag({ring, moonstone, beads, gems})
kitchen = dining.north = Room("""You are in the kitchen.""")
kitchen.characters = Group({Frain,})
kitchen.items = Bag({spoon,})
supplyII = kitchen.east = Room("""You are in a supply room with food and drinks. Most things in here you would need to be very desperate to eat, but surely something useful can be found here.""")
supplyII.items = Bag({winebottle,ham})
sleeping = kitchen.west = Room("""You are in a small room containing three sleeping arrangements. You don't want to go your fellow dwarfs' personal belongings, but something seems to be placed here just for you...""")
sleeping.items = Bag({runepaper,})
""" init """
current_room = hall
inventory = Bag({letter,})
UsedSteps = 0
CountVisitsToNadihm = 0 |
# main.py
# Author: Richard Gibson
#
# Launch point for the app. Defines all of the URL handles, including a
# default handler for all non-matching URLs.
#
import webapp2
import front
import signup
import login
import logout
import submit
import presubmit
import runnerpage
import gamepage
import handler
import gamelist
import runnerlist
import deleterun
import updatebkt
import xmlpage
import edit_table
import asup
import cleanup_games
import cleanup_games_now
import change_categories
import fixerupper
DEBUG = False
class Default( handler.Handler ):
def get( self, url ):
user = self.get_user()
self.error( 404 )
self.render( "404.html", user=user )
MY_RE = r'([a-zA-Z0-9_+-]+)'
RUN_RE = r'([0-9]+)'
app = webapp2.WSGIApplication( [ ('/', front.Front),
('/signup/?', signup.Signup),
('/login/?', login.Login),
('/logout/?', logout.Logout),
('/submit/' + MY_RE + '/?', submit.Submit),
('/submit/?', presubmit.PreSubmit),
('/games(?:\.json)?/?', gamelist.GameList),
('/runners(?:\.json)?/?',
runnerlist.RunnerList),
('/runner/' + MY_RE + '(?:\.json)?/?',
runnerpage.RunnerPage),
('/runner/' + MY_RE + '/edit-table/?',
edit_table.EditTable),
('/game/' + MY_RE + '/update-bkt/?',
updatebkt.UpdateBkt),
('/game/' + MY_RE + '(?:\.json)?/?',
gamepage.GamePage),
('/delete/' + RUN_RE + '/?',
deleterun.DeleteRun),
('/faq/?', xmlpage.XmlPage),
('/blog/?', xmlpage.XmlPage),
('/asup/?', asup.Asup),
('/cleanup-games-now',
cleanup_games_now.CleanupGamesNow),
('/change-categories',
change_categories.ChangeCategories),
('/fixerupper', fixerupper.FixerUpper),
('/' + r'(.*)', Default) ],
debug=DEBUG)
|
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.db import models
class SingleGFK(models.Model):
"""
An abstract base model to simplify the creation of models with a GFK'd
"object" relationship.
"""
object_type = models.ForeignKey(ContentType, related_name="related_%(class)s")
object_id = models.IntegerField(db_index=True)
object = generic.GenericForeignKey(ct_field="object_type", fk_field="object_id")
class Meta:
abstract = True
def __unicode__(self):
return u"%s - %s" % (self.object_type, self.object)
class DualGfk(SingleGFK):
"""
An abstract base model to simplify the creation of models with dual-ended
GFKs.
"""
parent_type = models.ForeignKey(ContentType, related_name="child_%(class)s")
parent_id = models.IntegerField(db_index=True)
parent = generic.GenericForeignKey(ct_field="parent_type", fk_field="parent_id")
dnorm_parent = models.CharField(max_length=200)
class Meta:
abstract = True
def save(self, **kwargs):
self.dnorm_parent = "%s.%s" % (self.parent_type.id, self.parent_id)
self.save_base(**kwargs)
class GenericglueRelation(generic.GenericRelation):
"""
A simple override of Django's GenericRelation class to assume the default field names DualGfk uses.
"""
def __init__(self, model, **kwargs):
defaults = dict(object_id_field="parent_id", content_type_field="parent_type")
defaults.update(kwargs)
return super(GenericglueRelation, self).__init__(model, **defaults)
|
"""
Calculates PSNR for JPG with respect to a dataset.
Note: Currently hard-coded for Cifar10
"""
import sys
import os
sys.path.append(os.path.abspath('..'))
import argparse
import numpy as np
import torch
import cv2
from collections import namedtuple
from PIL import Image
from tqdm import tqdm
from utils import util_funcs
from models.model_utils import get_dataset
result_tuple = namedtuple('JPEG_res', ['quality', 'ratio', 'psnr', ])
def tensor2img(tensor):
ndarr = tensor.clone().mul_(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()
im = Image.fromarray(ndarr)
return im
def get_PSNR(quality, dataset, size):
total_psnr = 0
total_steps = 0
MAX_i = 255
psnr_term = 20 * np.log10(np.ones(1) * MAX_i)
compression_ratio = 0
_tqdm = tqdm(dataset, desc=f'Quality: {quality}')
for batch in _tqdm:
# Load data
image = (batch[0] + 1) * MAX_i / 2 # from [-1,1] to [0, 255]
assert image.max() <= MAX_i and image.min() >= 0, f'bad image valuse, in [{image.max()}, {image.min()}]'
if image.shape[0] == 3:
image = image.transpose(0, 2)
rgb_image = np.array(image, dtype=np.uint8)
open_cv_image = rgb_image[:, :, ::-1].copy()
# Translate to and back from jpg
jpg_str = cv2.imencode('.jpg', open_cv_image, [cv2.IMWRITE_JPEG_QUALITY, quality])[1].tostring()
np_arr = np.fromstring(jpg_str, np.uint8)
decoded_img = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
# Calculate encoded length
raw_size = size * size * 3
# bmp_size = len(cv2.imencode('.bmp', open_cv_image)[1])
jpg_size = len(jpg_str)
# Calculate compression ratio and PSNR
compression_ratio += raw_size / float(jpg_size)
mse = np.mean(np.square(np.array(decoded_img) - rgb_image))
psnr = psnr_term - 10 * np.log10(mse)
total_psnr += psnr
total_steps += 1.0
if total_steps % 1000 == 0:
_tqdm.set_postfix({'psnr': np.round(total_psnr/total_steps, 2), 'ratio': compression_ratio/total_steps })
print('Calculating for JPG with quality measure: {}'.format(quality))
print('PSNR: {}'.format(total_psnr / total_steps))
print('compression_ratio: {}'.format(compression_ratio / total_steps))
return result_tuple(
quality=quality,
psnr=total_psnr / total_steps,
ratio=compression_ratio / total_steps,
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser = util_funcs.base_parser(parser)
parser = util_funcs.vqvae_parser(parser)
parser = util_funcs.code_extraction_parser(parser)
args = parser.parse_args()
print('setting up datasets')
_, test_dataset = get_dataset(args.dataset, args.data_path, args.size)
all_res = list()
for quality in range(0, 110, 10):
res = get_PSNR(quality, test_dataset, args.size)
all_res.append(res)
[print(f'{r.quality}, {r.psnr}, {r.ratio}') for r in all_res]
with open('/tmp/jpeg_res.csv', 'w') as fp:
[fp.write(f'{r.quality}, {r.psnr}, {r.ratio} \n') for r in all_res]
|
# Copyright 2021 Pants project contributors.
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from datetime import time
from django.http import Http404, HttpResponse
from helloworld.greet.models import Greeting
def index(request, slug):
try:
greeting = Greeting.objects.get(slug=slug)
return HttpResponse(greeting.salutation)
except Greeting.DoesNotExist:
raise Http404(f"No such greeting: {slug}")
def for_time_of_day(request, time_of_day: str):
greeting = Greeting.for_time_of_day(time.fromisoformat(time_of_day))
if not greeting:
# Fall back to a generic greeting.
greeting = Greeting.objects.get(slug="hello")
if greeting is None:
raise Http404(f"No greeting found for time of day: {time_of_day}")
return HttpResponse(greeting.slug)
|
import sqlite3
import pendulum
import psycopg2
class SqliteDatabase:
def __init__(self, host, db, user, pw):
self.host = host
self.db = db
self.user = user
self.pw = pw
self.create()
def connect(self):
return psycopg2.connect(host=self.host,database=self.db, user=self.user, password=self.pw)
def create(self):
with self.connect() as con:
with con.cursor() as cur:
cur.execute("""
create table if not exists candles (
symbol varchar(10) NOT NULL,
time timestamptz NOT NULL,
open decimal(18,10) NOT NULL,
close decimal(18,10) NOT NULL,
high decimal(18,10) NOT NULL,
low decimal(18,10) NOT NULL,
volume decimal(18,10) NOT NULL,
PRIMARY KEY (symbol, time)
);
create index if not exists candle_symbol_idx on candles (symbol);
create index if not exists candle_time_idx on candles (time);
""")
cur.execute("""
create table if not exists fundings (
symbol varchar(10) NOT NULL,
id int NOT NULL,
time timestamptz NOT NULL,
amount decimal(18,10) NOT NULL,
rate decimal(18,10) NOT NULL,
period smallint NOT NULL,
PRIMARY KEY (symbol, time)
)
""")
cur.execute("""
create table if not exists tradings (
symbol varchar(10) NOT NULL,
id int NOT NULL PRIMARY KEY,
time timestamptz NOT NULL,
amount decimal(18,10) NOT NULL,
price decimal(18,10) NOT NULL
);
create index if not exists symbol_idx on tradings (symbol);
create index if not exists time_idx on tradings (time);
create index if not exists symbol_time_idx on tradings (symbol, time);
""")
con.close()
def insert_candles(self, symbol, candles):
for candle in candles:
candle.insert(0, symbol)
with self.connect() as con:
with con.cursor() as cur:
args = [cur.mogrify('(%s, TO_TIMESTAMP(%s/1000), %s, %s, %s, %s, %s)', x).decode('utf-8') for x in candles]
args_str = ','.join(args)
cur.execute("""
insert into candles(
symbol, time, open, close, high, low, volume)
values """ + args_str + "on conflict do nothing")
con.close()
def insert_trades(self, symbol, trades):
for trade in trades:
trade.insert(0, symbol)
with self.connect() as con:
with con.cursor() as cur:
args = [cur.mogrify('(%s, %s, TO_TIMESTAMP(%s/1000), %s, %s)', x).decode('utf-8') for x in trades]
args_str = ','.join(args)
cur.execute("""
insert into tradings(
symbol, id, time, amount, price)
values""" + args_str + "on conflict do nothing")
con.close()
def insert_funding_trades(self, symbol, trades):
for trade in trades:
trade.insert(0, symbol)
with self.connect() as con:
with con.cursor() as cur:
args = [cur.mogrify('(%s, %s, TO_TIMESTAMP(%s/1000), %s, %s, %s)', x).decode('utf-8') for x in trades]
args_str = ','.join(args)
cur.execute("""
insert into fundings(
symbol, id, time, amount, rate, period)
values""" + args_str + "on conflict do nothing")
con.close()
def get_latest_candle_date(self, symbol):
"""
Get the time of the most recent candle for a symbol
"""
with self.connect() as con:
with con.cursor() as cur:
cur.execute('select max(time) from candles where symbol=%s',
(symbol,))
result = cur.fetchone()[0]
if result is None:
return
else:
return pendulum.instance(result)
con.close()
def get_latest_trading_date(self, symbol):
"""
Get the time of the most recent trading for a symbol
"""
with self.connect() as con:
with con.cursor() as cur:
cur.execute('select max(time) from tradings where symbol=%s',
(symbol,))
result = cur.fetchone()[0]
if result is None:
return
else:
return pendulum.instance(result)
con.close()
def get_latest_funding_date(self, symbol):
"""
Get the time of the most recent funding for a symbol
"""
with self.connect() as con:
with con.cursor() as cur:
cur.execute('select max(time) from fundings where symbol=%s',
(symbol,))
result = cur.fetchone()[0]
if result is None:
return
else:
return pendulum.instance(result)
con.close()
|
"""See README.md for package information."""
__version__ = '0.6.0'
if "bpy" in locals():
import importlib
importlib.reload(cli)
importlib.reload(client)
importlib.reload(parser)
importlib.reload(server)
importlib.reload(stats)
else:
from . import cli
from . import client
from . import parser
from . import server
from . import stats
import bpy |
'''
Write and read csv file
'''
import csv
def write_to_csv(v):
offset = 0
size = len(v)
lines = 50
with open('data.csv', 'w') as f:
csvout = csv.writer(f)
csvout.writerows(v)
def read_from_csv():
with open('data.csv', 'r') as f:
csvin = csv.reader(f)
for row in csvin:
print row
def read_from_csv_dict():
with open('data.csv', 'rt') as f:
csvin = csv.DictReader(f, fieldnames=['first', 'last'])
for row in csvin:
print row
if __name__ == '__main__':
villains = [
['Doctor', 'No'],
['Rosa', 'Klebb'],
['Mister', 'Big'],
['Auric', 'Goldfinger'],
['Ernst', 'Blofeld']
]
write_to_csv(villains)
read_from_csv()
read_from_csv_dict() |
"""
Construct a FeatureExtraction class to retrieve
'key points', 'partitions', `saliency map' of an image
in a black-box or grey-box pattern.
Author: Min Wu
Email: min.wu@cs.ox.ac.uk
"""
import copy
import numpy as np
import cv2
import random
from scipy.stats import norm
from keras import backend as K
from matplotlib import pyplot as plt
# Define a Feature Extraction class.
class FeatureExtraction:
def __init__(self, pattern='grey-box'):
self.PATTERN = pattern
# black-box parameters
self.IMG_ENLARGE_RATIO = 1
self.IMAGE_SIZE_BOUND = 100
self.MAX_NUM_OF_PIXELS_PER_KEY_POINT = 1000000
# grey-box parameters
self.NUM_PARTITION = 10
self.PIXEL_BOUNDS = (0, 1)
self.NUM_OF_PIXEL_MANIPULATION = 2
# Get key points of an image.
def get_key_points(self, image, num_partition=10):
self.NUM_PARTITION = num_partition
# Black-box pattern: get key points from SIFT,
# enlarge the image if it is small.
if self.PATTERN == 'black-box':
image = copy.deepcopy(image)
sift = cv2.xfeatures2d.SIFT_create() # cv2.SIFT() # cv2.SURF(400)
if np.max(image) <= 1:
image = (image * 255).astype(np.uint8)
else:
image = image.astype(np.uint8)
if max(image.shape) < self.IMAGE_SIZE_BOUND:
# For a small image, SIFT works by enlarging the image.
image = cv2.resize(image, (0, 0), fx=self.IMG_ENLARGE_RATIO, fy=self.IMG_ENLARGE_RATIO)
key_points, _ = sift.detectAndCompute(image, None)
for i in range(len(key_points)):
old_pt = (key_points[i].pt[0], key_points[i].pt[1])
key_points[i].pt = (int(old_pt[0] / self.IMG_ENLARGE_RATIO),
int(old_pt[1] / self.IMG_ENLARGE_RATIO))
else:
key_points, _ = sift.detectAndCompute(image, None)
# Grey-box pattern: get key points from partition ID.
elif self.PATTERN == 'grey-box':
key_points = [key for key in range(self.NUM_PARTITION)]
else:
print("Unrecognised feature extraction pattern. "
"Try 'black-box' or 'grey-box'.")
return key_points
# Get partitions of an image.
def get_partitions(self, image, model=None, num_partition=10, pixel_bounds=(0, 1)):
self.NUM_PARTITION = num_partition
self.PIXEL_BOUNDS = pixel_bounds
# Grey-box pattern: must specify a neural network.
if self.PATTERN == 'grey-box' and model is None:
print("For 'grey-box' feature extraction, please specify a neural network.")
exit
# Grey-box pattern: get partitions from saliency map.
if self.PATTERN == 'grey-box':
print("Extracting image features using '%s' pattern." % self.PATTERN)
saliency_map = self.get_saliency_map(image, model)
partitions = {}
quotient, remainder = divmod(len(saliency_map), self.NUM_PARTITION)
for key in range(self.NUM_PARTITION):
partitions[key] = [(int(saliency_map[idx, 0]), int(saliency_map[idx, 1])) for idx in
range(key * quotient, (key + 1) * quotient)]
if key == self.NUM_PARTITION - 1:
partitions[key].extend((int(saliency_map[idx, 0]), int(saliency_map[idx, 1])) for idx in
range((key + 1) * quotient, len(saliency_map)))
return partitions
# Black-box pattern: get partitions from key points.
elif self.PATTERN == 'black-box':
print("Extracting image features using '%s' pattern." % self.PATTERN)
key_points = self.get_key_points(image)
print("%s keypoints are found. " % (len(key_points)))
partitions = {}
# For small images, such as MNIST, CIFAR10.
if max(image.shape) < self.IMAGE_SIZE_BOUND:
for x in range(max(image.shape)):
for y in range(max(image.shape)):
ps = 0
maxk = -1
for i in range(len(key_points)):
k = key_points[i - 1]
dist2 = np.linalg.norm(np.array([x, y]) - np.array([k.pt[0], k.pt[1]]))
ps2 = norm.pdf(dist2, loc=0.0, scale=k.size)
if ps2 > ps:
ps = ps2
maxk = i
if maxk in partitions.keys():
partitions[maxk].append((x, y))
else:
partitions[maxk] = [(x, y)]
# If a partition gets too many pixels, randomly remove some pixels.
if self.MAX_NUM_OF_PIXELS_PER_KEY_POINT > 0:
for mk in partitions.keys():
begining_num = len(partitions[mk])
for i in range(begining_num - self.MAX_NUM_OF_PIXELS_PER_KEY_POINT):
partitions[mk].remove(random.choice(partitions[mk]))
return partitions
# For large images, such as ImageNet.
else:
key_points = key_points[:200]
each_num = max(image.shape) ** 2 / len(key_points)
maxk = 1
partitions[maxk] = []
for x in range(max(image.shape)):
for y in range(max(image.shape)):
if len(partitions[maxk]) <= each_num:
partitions[maxk].append((x, y))
else:
maxk += 1
partitions[maxk] = [(x, y)]
return partitions
else:
print("Unrecognised feature extraction pattern."
"Try 'black-box' or 'grey-box'.")
# Get saliency map of an image.
def get_saliency_map(self, image, model, pixel_bounds=(0, 1)):
self.PIXEL_BOUNDS = pixel_bounds
image_class, _ = model.predict(image)
new_pixel_list = np.linspace(self.PIXEL_BOUNDS[0], self.PIXEL_BOUNDS[1], self.NUM_OF_PIXEL_MANIPULATION)
image_batch = np.kron(np.ones((self.NUM_OF_PIXEL_MANIPULATION, 1, 1, 1)), image)
manipulated_images = []
(row, col, chl) = image.shape
for i in range(0, row):
for j in range(0, col):
# need to be very careful about image.copy()
changed_image_batch = image_batch.copy()
for p in range(0, self.NUM_OF_PIXEL_MANIPULATION):
changed_image_batch[p, i, j, :] = new_pixel_list[p]
manipulated_images.append(changed_image_batch) # each loop append [pixel_num, row, col, chl]
manipulated_images = np.asarray(manipulated_images) # [row*col, pixel_num, row, col, chl]
manipulated_images = manipulated_images.reshape(row * col * self.NUM_OF_PIXEL_MANIPULATION, row, col, chl)
# Use softmax logits instead of probabilities,
# as probabilities may not reflect precise influence of one single pixel change.
features_list = model.softmax_logits(manipulated_images)
feature_change = features_list[:, image_class].reshape(-1, self.NUM_OF_PIXEL_MANIPULATION).transpose()
min_indices = np.argmin(feature_change, axis=0)
min_values = np.amin(feature_change, axis=0)
min_idx_values = min_indices.astype('float32') / (self.NUM_OF_PIXEL_MANIPULATION - 1)
[x, y] = np.meshgrid(np.arange(row), np.arange(col))
x = x.flatten('F') # to flatten in column-major order
y = y.flatten('F') # to flatten in column-major order
target_feature_list = np.hstack((np.split(x, len(x)),
np.split(y, len(y)),
np.split(min_values, len(min_values)),
np.split(min_idx_values, len(min_idx_values))))
saliency_map = target_feature_list[target_feature_list[:, 2].argsort()]
return saliency_map
def plot_saliency_map(self, image, partitions, path):
heatmap = np.zeros(image.shape[0:2])
for partitionID in partitions.keys():
pixels = partitions[partitionID]
for pixel in pixels:
heatmap[pixel] = partitionID + 1
plt.imsave(path, heatmap)
|
print('''
so this is a simple
milti-line
print
=================
| |
| |
| Box |
| |
| |
=================
''')
|
from category.models import Category
from django.db.models import Q
from django.utils import timezone
from profile.models import UserProfile
from rest_framework.generics import get_object_or_404
from rest_framework.pagination import PageNumberPagination
from rest_framework.request import Request
from .models import Event
def filter_from_datetime_query(events_serializer):
return (
Q(start_datetime__gte=events_serializer.data.get("from_datetime"))
if events_serializer.data.get("from_datetime")
else Q()
)
def filter_until_datetime_query(events_serializer):
return (
Q(end_datetime__lte=events_serializer.data.get("until_datetime"))
if events_serializer.data.get("until_datetime")
else Q()
)
def filter_owner_query(owner):
return Q(owner=owner)
def filter_participant_query(events_serializer):
return (
Q(participants__in=[events_serializer.data.get("participant_id")])
if events_serializer.data.get("participant_id")
else Q()
)
def filter_type_query(events_serializer):
if events_serializer.data.get("type").value == "all":
return Q()
if events_serializer.data.get("type").value == "running":
return Q(start_datetime__lt=timezone.datetime.now()) & Q(end_datetime__gt=timezone.datetime.now())
if events_serializer.data.get("type").value == "done":
return Q(end_datetime__lt=timezone.datetime.now())
def final_filter_query(events_serializer, owner):
return (
Q(title__icontains=events_serializer.data.get("search_query"))
& filter_from_datetime_query(events_serializer)
& filter_until_datetime_query(events_serializer)
& filter_owner_query(owner)
& filter_participant_query(events_serializer)
& filter_type_query(events_serializer)
)
def final_filter_query_client(events_serializer):
return (
Q(title__icontains=events_serializer.data.get("search_query"))
& filter_from_datetime_query(events_serializer)
& filter_until_datetime_query(events_serializer)
& filter_participant_query(events_serializer)
& filter_type_query(events_serializer)
)
def get_sorted_events(events_serializer, owner):
return Event.objects.filter(final_filter_query(events_serializer, owner)).order_by(
events_serializer.data.get("sort").value
)
def get_sorted_events_client(events_serializer):
return Event.objects.filter(final_filter_query_client(events_serializer)).order_by(
events_serializer.data.get("sort").value
)
def create_an_event(validated_data, owner):
event = Event.objects.create(
start_datetime=validated_data["start_datetime"],
end_datetime=validated_data["end_datetime"],
title=validated_data["title"],
discount=validated_data["discount"],
description=validated_data["description"],
category=get_object_or_404(Category, id=validated_data["category_id"]),
owner=owner,
price=validated_data["price"]
)
return event
def get_event_history_client(client: UserProfile, page: int, page_count: int, request: Request):
paginator = PageNumberPagination()
paginator.page_size = page_count
paginator.page = page
events = Event.objects.filter(participants=client).order_by('-updated_at')
return paginator.paginate_queryset(events, request)
|
from datetime import date, datetime
import robin_stocks.robinhood as r
from pyrh import Robinhood
import pandas as pd
import csv
from collections import Counter
from robin_stocks.robinhood.export import export_completed_option_orders
import xlsxwriter as xl
from jproperties import Properties
def getCredentials():
configs = Properties()
with open(r'C:\Users\cthax\git\Stonk-Tracker\credentials.properties', 'rb') as read_prop:
configs.load(read_prop)
email = configs.get("email").data
password = configs.get("password").data
return email,password
def loginToRH(email, password):
login = r.login(email, password)
def getAllOptions(allPositions):
allPositions = r.get_all_option_positions()
return allPositions
def getOptionTrades(allPositions):
optionNames = []
entryPrices = []
calls = 0
puts = 0
for i in range(1, len(allPositions), 2):
option = r.get_option_instrument_data_by_id(allPositions[i]["option_id"])
ticker = option["chain_symbol"]
strike = option["strike_price"]
callOrPut = option["type"]
date = option["expiration_date"]
optionNames.append("{} ${} {} {}".format(ticker, strike, callOrPut, date))
entryPrice = allPositions[i]["average_price"]
entryPrices.append("$" + entryPrice)
if "call" in callOrPut:
calls +=1
else:
puts +=1
return optionNames,entryPrices,calls,puts
def getFrequentTickers(allPositions):
tickerList = []
for i in range(1, len(allPositions), 2):
ticker = allPositions[i]['chain_symbol']
tickerList.append(ticker)
c = Counter(tickerList)
return dict(c.most_common(10))
def CallPutChart(chartCell, writer, workbook, worksheet, label1, label2):
workbook = writer.book
worksheet = writer.sheets["Options"]
writer.sheets['Options'] = worksheet
chart1 = workbook.add_chart({'type': 'pie'})
# Configure the series. Note the use of the list syntax to define ranges:
chart1.add_series({
'name': 'Pie sales data',
'data_labels': {'percentage': True, 'category': True},
'categories': ["Options", 0, 4, 0, 5],
'values': ["Options", 1, 4, 1, 5]
})
# Add a title.
chart1.set_title({'name': 'Call vs Put Frequency'})
# Set an Excel chart style. Colors with white outline and shadow.
chart1.set_style(10)
# Insert the chart into the worksheet (with an offset).
worksheet.insert_chart(chartCell, chart1)
def tickerFrequencyChart(chartCell, writer, workbook, worksheet, label1, label2):
workbook = writer.book
worksheet = writer.sheets["Options"]
writer.sheets['Options'] = worksheet
chart1 = workbook.add_chart({'type': 'pie'})
# Configure the series. Note the use of the list syntax to define ranges:
chart1.add_series({
'name': 'Ticker Frequency',
'data_labels': {'value': True, 'category': True},
'categories': ["Options", 1, 13, 10, 13],
'values': ["Options", 1, 14, 10, 14]
})
# Add a title.
chart1.set_title({'name': 'Ticker Frequency'})
# Set an Excel chart style. Colors with white outline and shadow.
chart1.set_style(10)
# Insert the chart into the worksheet (with an offset).
worksheet.insert_chart(chartCell, chart1)
def writeOptionInfo(listOfTickers, optionNames, entryPrices, calls, puts):
# Configure the series. Note the use of the list syntax to define ranges:
excelPath = r"C:\Users\cthax\Desktop\OptionTrades.xlsx"
df1 = pd.DataFrame({"Ticker" : listOfTickers.keys(), "Frequency" : listOfTickers.values()})
df2 = pd.DataFrame({"Option Name" : optionNames, "Entry Price" : entryPrices})
df3 = pd.DataFrame({"Calls" : [calls], "Puts" : [puts]})
df4 = pd.DataFrame({"Last updated:" : [datetime.now().strftime("%d/%m/%Y %H:%M")]})
writer = pd.ExcelWriter(excelPath, engine = 'xlsxwriter', datetime_format='mmm d yyyy hh:mm')
workbook = writer.book
worksheet = workbook.add_worksheet('Options')
writer.sheets['Options'] = worksheet
df1.to_excel(writer, sheet_name="Options", startcol=13, startrow=0, index= False)
df2.to_excel(writer, sheet_name="Options", startcol=0, startrow=0, index= False)
df3.to_excel(writer, sheet_name="Options", startcol=4, startrow=0, index= False)
df4.to_excel(writer, sheet_name="Options", startcol=7, startrow=0, index= False)
for i, col in enumerate(df2.columns):
# find length of column i
column_len = df2[col].astype(str).str.len().max()
# Setting the length if the column header is larger
# than the max column value length
column_len = max(column_len, len(col)) + 2
# set the column length
worksheet.set_column(i, i, column_len)
CallPutChart("E12", writer, workbook, worksheet, "Calls", "Puts")
tickerFrequencyChart("N12", writer, workbook, worksheet, "Ticker", "Amount")
writer.save
return writer, excelPath
def closeAndSave(writer):
writer.save()
r.logout() |
# =============================================================================
# Imports
# =============================================================================
import seaborn as sns
from main import main
import numpy as np
import matplotlib.pyplot as plt
import random
import math
import sys
import pandas as pd
# =============================================================================
# Define the experimentation function, i.e. a function that will perform a series
# of experiments to analyse the behavior of main(), wrt p
# main (function name, network parameter, number of iterations, network type, network status)
# =============================================================================
def experiment_curve_p(rep,tau,y,pstart,pend,pincr,x,z):
random.seed(9)
p = pstart
i = 0
final_results = np.zeros((math.ceil((pend-pstart)*1/pincr),rep))
p_axis = []
while p <= pend:
print(np.round(p,2))
r = 0
while r < rep:
mean_fitness, fitness_history, time_history, A = main(y, p,tau,x,z)
final_results[i,r] = mean_fitness
r += 1
p_axis.append(p)
p += pincr
i += 1
return final_results, p_axis, fitness_history, time_history
# =============================================================================
# Experiments programme
# 1- ER network, p in [0,1,0.01], tau=20, cons
# 1b- ER network, p in [0,1,0.01], tau=20, dyn (optional)
# 2- ER network, p in [0,1,0.01], tau=50, cons
# 2b- ER network, p in [0,1,0.01], tau=50, dyn (optional)
# 3- ER network, p in [0,1,0.01], tau=100, cons
# 3b- ER network, p in [0,1,0.01], tau=100, dyn (optional)
# 4- AB network, m in [1,49,1], tau=20, cons
# 4b- AB network, m in [1,49,1], tau=20, dyn (optional)
# 5- AB network, m in [1,49,1], tau=50, cons
# 5b- AB network, m in [1,49,1], tau=50, dyn (optional)
# 6- AB network, m in [1,49,1], tau=100, cons
# 6b- AB network, m in [1,49,1], tau=100, dyn (optional)
# =============================================================================
# Numerical experiments to get the data for Erdos
# =============================================================================
ackley_results_1, p_axis, fitness_history_sphere, time_history = experiment_curve_p(10,20,'ackley',0,1.01,0.01,'erdos','cons')
ackley_results_2, p_axis, fitness_history_sphere, time_history = experiment_curve_p(10,50,'ackley',0,1.01,0.01,'erdos','cons')
ackley_results_3, p_axis, fitness_history_sphere, time_history = experiment_curve_p(10,100,'ackley',0,1.01,0.01,'erdos','cons')
rastrigin_results_1, p_axis, fitness_history_sphere, time_history = experiment_curve_p(10,20,'rastrigin',0,1.01,0.01,'erdos','cons')
rastrigin_results_2, p_axis, fitness_history_sphere, time_history = experiment_curve_p(10,50,'rastrigin',0,1.01,0.01,'erdos','cons')
rastrigin_results_3, p_axis, fitness_history_sphere, time_history = experiment_curve_p(10,100,'rastrigin',0,1.01,0.01,'erdos','cons')
sphere_results_1, p_axis, fitness_history_sphere, time_history = experiment_curve_p(10,20,'sphere',0,1.01,0.01,'erdos','cons')
sphere_results_2, p_axis, fitness_history_sphere, time_history = experiment_curve_p(10,50,'sphere',0,1.01,0.01,'erdos','cons')
sphere_results_3, p_axis, fitness_history_sphere, time_history = experiment_curve_p(10,100,'sphere',0,1.01,0.01,'erdos','cons')
# =============================================================================
# Saving the data
# =============================================================================
sphere1 = pd.DataFrame(sphere_results_1)
sphere1.to_csv('er_sphere1.csv')
sphere2 = pd.DataFrame(sphere_results_2)
sphere2.to_csv('er_sphere2.csv')
sphere3 = pd.DataFrame(sphere_results_3)
sphere3.to_csv('er_sphere3.csv')
rastrigin1 = pd.DataFrame(rastrigin_results_1)
rastrigin1.to_csv('er_rastrigin1.csv')
rastrigin2 = pd.DataFrame(rastrigin_results_2)
rastrigin2.to_csv('er_rastrigin2.csv')
rastrigin3 = pd.DataFrame(rastrigin_results_3)
rastrigin3.to_csv('er_rastrigin3.csv')
ackley1 = pd.DataFrame(ackley_results_1)
ackley1.to_csv('er_ackley1.csv')
ackley2 = pd.DataFrame(ackley_results_2)
ackley2.to_csv('er_ackley2.csv')
ackley3 = pd.DataFrame(ackley_results_3)
ackley3.to_csv('er_ackley3.csv')
# =============================================================================
# Plotting the results for ERDOS
# =============================================================================
fig, ((ax, ax2,ax3),(ax4,ax5,ax6),(ax7,ax8,ax9)) = plt.subplots(3, 3, sharey='row', sharex = True , figsize=(11,5))
# 1st
sns.regplot(p_axis,np.mean(rastrigin_results_1,axis=1),order=4, ci=95,line_kws={"color": "red"}, scatter_kws={'s':2,"color": "blue"}, ax=ax, truncate = True)
ax.set_ylabel('Rastrigin function')
ax.set_title('$t=20$')
# 2nd
sns.regplot(p_axis,np.mean(rastrigin_results_2,axis=1),order=4, ci=95,line_kws={"color": "red"}, scatter_kws={'s':2,"color": "blue"}, ax=ax2, truncate = True)
ax2.set_title('$t=50$')
# 3rd
sns.regplot(p_axis,np.mean(rastrigin_results_3,axis=1),order=4, ci=95,line_kws={"color": "red"}, scatter_kws={'s':2,"color": "blue"}, ax=ax3, truncate = True)
ax3.set_title('$t=100$')
#4th
sns.regplot(p_axis,np.mean(sphere_results_1,axis=1),order=4, ci=95,line_kws={"color": "red"}, scatter_kws={'s':2,"color": "blue"}, ax=ax4, truncate = True)
ax4.set_ylabel('Sphere function')
# 5th
sns.regplot(p_axis,np.mean(sphere_results_2,axis=1),order=4, ci=95,line_kws={"color": "red"}, scatter_kws={'s':2,"color": "blue"}, ax=ax5, truncate = True)
# 6th
sns.regplot(p_axis,np.mean(sphere_results_3,axis=1),order=4, ci=95,line_kws={"color": "red"}, scatter_kws={'s':2,"color": "blue"}, ax=ax6, truncate = True)
# 7th
sns.regplot(p_axis,np.mean(ackley_results_1,axis=1),order=4, ci=95,line_kws={"color": "red"}, scatter_kws={'s':2,"color": "blue"}, ax=ax7, truncate = True)
ax7.set_ylabel('Ackley function')
ax7.set_xlabel('$p$')
# 8th
sns.regplot(p_axis,np.mean(ackley_results_2,axis=1),order=4, ci=95,line_kws={"color": "red"}, scatter_kws={'s':2,"color": "blue"}, ax=ax8, truncate = True)
ax8.set_xlabel('$p$')
# 9th
sns.regplot(p_axis,np.mean(ackley_results_3,axis=1),order=4, ci=95,line_kws={"color": "red"}, scatter_kws={'s':2,"color": "blue"}, ax=ax9, truncate = True)
ax9.set_xlabel('$p$')
# General
fig.suptitle(' ER network ')
plt.savefig("results_erdos.png", format="png",dpi=300)
plt.show()
# =============================================================================
# Numerical experiments to get the data for Albert
# =============================================================================
ackley_results_4, p_axis, fitness_history_ackley_4, time_history = experiment_curve_p(10,20,'ackley',1,49.01,1,'albert','cons')
ackley_results_5, p_axis, fitness_history_ackley_5, time_history = experiment_curve_p(10,50,'ackley',1,49.01,1,'albert','cons')
ackley_results_6, p_axis, fitness_history_ackley_6, time_history = experiment_curve_p(10,100,'ackley',1,49.01,1,'albert','cons')
rastrigin_results_4, p_axis, fitness_history_sphere, time_history = experiment_curve_p(10,20,'rastrigin',1,49.01,1,'albert','cons')
rastrigin_results_5, p_axis, fitness_history_sphere, time_history = experiment_curve_p(10,50,'rastrigin',1,49.01,1,'albert','cons')
rastrigin_results_6, p_axis, fitness_history_sphere, time_history = experiment_curve_p(10,100,'rastrigin',1,49.01,1,'albert','cons')
sphere_results_4, p_axis, fitness_history_sphere, time_history = experiment_curve_p(10,20,'sphere',1,49.01,1,'albert','cons')
sphere_results_5, p_axis, fitness_history_sphere, time_history = experiment_curve_p(10,50,'sphere',1,49.01,1,'albert','cons')
sphere_results_6, p_axis, fitness_history_sphere, time_history = experiment_curve_p(10,100,'sphere',1,49.01,1,'albert','cons')
# =============================================================================
# Saving data from Albert runs
# =============================================================================
sphere4 = pd.DataFrame(sphere_results_4)
sphere4.to_csv('ab_sphere4.csv')
sphere5 = pd.DataFrame(sphere_results_5)
sphere5.to_csv('ab_sphere5.csv')
sphere6 = pd.DataFrame(sphere_results_6)
sphere6.to_csv('ab_sphere6.csv')
rastrigin4 = pd.DataFrame(rastrigin_results_4)
rastrigin4.to_csv('ab_rastrigin4.csv')
rastrigin5 = pd.DataFrame(rastrigin_results_5)
rastrigin5.to_csv('ab_rastrigin5.csv')
rastrigin6 = pd.DataFrame(rastrigin_results_6)
rastrigin6.to_csv('ab_rastrigin6.csv')
ackley4 = pd.DataFrame(ackley_results_4)
ackley4.to_csv('ab_ackley4.csv')
ackley5 = pd.DataFrame(ackley_results_5)
ackley5.to_csv('ab_ackley5.csv')
ackley6 = pd.DataFrame(ackley_results_6)
ackley6.to_csv('ab_ackley6.csv')
df = pd.DataFrame()
df['sphere1'] = np.mean(sphere_results_1,axis=1)
df['sphere2'] = np.mean(sphere_results_2,axis=1)
df['sphere3'] = np.mean(sphere_results_3,axis=1)
df['ackley1'] = np.mean(ackley_results_1,axis=1)
df['ackley2'] = np.mean(ackley_results_2,axis=1)
df['ackley3'] = np.mean(ackley_results_3,axis=1)
df['rastrigin1'] = np.mean(rastrigin_results_1,axis=1)
df['rastrigin2'] = np.mean(rastrigin_results_2,axis=1)
df['rastrigin3'] = np.mean(rastrigin_results_3,axis=1)
df.to_csv('data_nw_er.csv')
df = pd.DataFrame()
df['sphere4'] = np.mean(sphere_results_4,axis=1)
df['sphere5'] = np.mean(sphere_results_5,axis=1)
df['sphere6'] = np.mean(sphere_results_6,axis=1)
df['ackley4'] = np.mean(ackley_results_4,axis=1)
df['ackley5'] = np.mean(ackley_results_5,axis=1)
df['ackley6'] = np.mean(ackley_results_6,axis=1)
df['rastrigin4'] = np.mean(rastrigin_results_4,axis=1)
df['rastrigin5'] = np.mean(rastrigin_results_5,axis=1)
df['rastrigin6'] = np.mean(rastrigin_results_6,axis=1)
df.to_csv('data_nw_ab.csv')
# =============================================================================
# Plotting the results for ALBERT
# =============================================================================
fig, ((ax, ax2,ax3),(ax4,ax5,ax6),(ax7,ax8,ax9)) = plt.subplots(3, 3, sharey='row', sharex = True , figsize=(11,5))
# 1st
sns.regplot(p_axis,np.mean(rastrigin_results_4,axis=1),order=4, ci=95,line_kws={"color": "red"}, scatter_kws={'s':2,"color": "blue"}, ax=ax, truncate = True)
ax.set_ylabel('Rastrigin function')
ax.set_title('$t=20$')
# 2nd
sns.regplot(p_axis,np.mean(rastrigin_results_5,axis=1),order=4, ci=95,line_kws={"color": "red"}, scatter_kws={'s':2,"color": "blue"}, ax=ax2, truncate = True)
ax2.set_title('$t=50$')
# 3rd
sns.regplot(p_axis,np.mean(rastrigin_results_6,axis=1),order=4, ci=95,line_kws={"color": "red"}, scatter_kws={'s':2,"color": "blue"}, ax=ax3, truncate = True)
ax3.set_title('$t=100$')
#4th
sns.regplot(p_axis,np.mean(sphere_results_4,axis=1),order=4, ci=95,line_kws={"color": "red"}, scatter_kws={'s':2,"color": "blue"}, ax=ax4, truncate = True)
ax4.set_ylabel('Sphere function')
# 5th
sns.regplot(p_axis,np.mean(sphere_results_5,axis=1),order=4, ci=95,line_kws={"color": "red"}, scatter_kws={'s':2,"color": "blue"}, ax=ax5, truncate = True)
# 6th
sns.regplot(p_axis,np.mean(sphere_results_6,axis=1),order=4, ci=95,line_kws={"color": "red"}, scatter_kws={'s':2,"color": "blue"}, ax=ax6, truncate = True)
# 7th
sns.regplot(p_axis,np.mean(ackley_results_4,axis=1),order=4, ci=95,line_kws={"color": "red"}, scatter_kws={'s':2,"color": "blue"}, ax=ax7, truncate = True)
ax7.set_ylabel('Ackley function')
ax7.set_xlabel('$m$')
# 8th
sns.regplot(p_axis,np.mean(ackley_results_5,axis=1),order=4, ci=95,line_kws={"color": "red"}, scatter_kws={'s':2,"color": "blue"}, ax=ax8, truncate = True)
ax8.set_xlabel('$m$')
# 9th
sns.regplot(p_axis,np.mean(ackley_results_6,axis=1),order=4, ci=95,line_kws={"color": "red"}, scatter_kws={'s':2,"color": "blue"}, ax=ax9, truncate = True)
ax9.set_xlabel('$m$')
# General
fig.suptitle('AB network')
plt.savefig("results_albert.png", format="png",dpi=300)
plt.show()
# =============================================================================
# Data for the results table
# What we are recording:
# For each test function, at the three terminal times considered:
# - The best fitness scores, averaged across the 10 runs, across different all networks for the ER and AB type
# - Thee average fitness score of the traditional "complete" GA
# =============================================================================
# rastrigin
rastrigin_avg20_ER = np.round(np.min(np.mean(rastrigin_results_1,axis=1)),3)
rastrigin_avg50_ER = np.round(np.min(np.mean(rastrigin_results_2,axis=1)),3)
rastrigin_avg100_ER = np.round(np.min(np.mean(rastrigin_results_3,axis=1)),3)
rastrigin_avg20_AB = np.round(np.min(np.mean(rastrigin_results_4,axis=1)),3)
rastrigin_avg50_AB = np.round(np.min(np.mean(rastrigin_results_5,axis=1)),3)
rastrigin_avg100_AB = np.round(np.min(np.mean(rastrigin_results_6,axis=1)),3)
rastrigin_avg20_GA = np.round(np.mean(rastrigin_results_1,axis=1)[-1],3)
rastrigin_avg50_GA = np.round(np.mean(rastrigin_results_2,axis=1)[-1],3)
rastrigin_avg100_GA = np.round(np.mean(rastrigin_results_3,axis=1)[-1],3)
# sphere
sphere_avg20_ER = np.round(np.min(np.mean(sphere_results_1,axis=1)),3)
sphere_avg50_ER = np.round(np.min(np.mean(sphere_results_2,axis=1)),3)
sphere_avg100_ER = np.round(np.min(np.mean(sphere_results_3,axis=1)),3)
sphere_avg20_AB = np.round(np.min(np.mean(sphere_results_4,axis=1)),3)
sphere_avg50_AB = np.round(np.min(np.mean(sphere_results_5,axis=1)),3)
sphere_avg100_AB = np.round(np.min(np.mean(sphere_results_6,axis=1)),3)
sphere_avg20_GA = np.round(np.mean(sphere_results_1,axis=1)[-1],3)
sphere_avg50_GA = np.round(np.mean(sphere_results_2,axis=1)[-1],3)
sphere_avg100_GA = np.round(np.mean(sphere_results_3,axis=1)[-1],3)
# ackley
ackley_avg20_ER = np.round(np.min(np.mean(ackley_results_1,axis=1)),3)
ackley_avg50_ER = np.round(np.min(np.mean(ackley_results_2,axis=1)),3)
ackley_avg100_ER = np.round(np.min(np.mean(ackley_results_3,axis=1)),3)
ackley_avg20_AB = np.round(np.min(np.mean(ackley_results_4,axis=1)),3)
ackley_avg50_AB = np.round(np.min(np.mean(ackley_results_5,axis=1)),3)
ackley_avg100_AB = np.round(np.min(np.mean(ackley_results_6,axis=1)),3)
ackley_avg20_GA = np.round(np.mean(ackley_results_1,axis=1)[-1],3)
ackley_avg50_GA = np.round(np.mean(ackley_results_2,axis=1)[-1],3)
ackley_avg100_GA = np.round(np.mean(ackley_results_3,axis=1)[-1],3)
|
from __future__ import print_function
from builtins import range
import numpy as np
from sklearn.base import BaseEstimator, clone
from sklearn.utils.validation import check_X_y, check_array, check_is_fitted
# from scipy.spatial.distance import cdist
from sklearn.metrics.pairwise import pairwise_distances as cdist
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
from clr import best_clr
class CLRcRegressor(BaseEstimator):
def __init__(self, num_planes, kmeans_coef, constr_id,
num_tries=1, clr_lr=None, max_iter=5):
self.num_planes = num_planes
self.kmeans_coef = kmeans_coef
self.num_tries = num_tries
self.constr_id = constr_id
self.clr_lr = clr_lr
self.max_iter = max_iter
def fit(self, X, y, init_labels=None,
seed=None, verbose=False):
if seed is not None:
np.random.seed(seed)
constr = np.empty(X.shape[0], dtype=np.int)
for i, c_id in enumerate(np.unique(X[:, self.constr_id])):
constr[X[:, self.constr_id] == c_id] = i
self.labels_, self.models_, _, _ = best_clr(
X, y, k=self.num_planes, kmeans_X=self.kmeans_coef,
constr=constr, max_iter=self.max_iter, num_tries=self.num_tries,
lr=self.clr_lr,
)
# TODO: optimize this
self.constr_to_label = {}
for i in range(X.shape[0]):
self.constr_to_label[X[i, self.constr_id]] = self.labels_[i]
def init_fit(self, labels, models, constr_to_label):
self.labels_ = labels
self.models_ = models
self.constr_to_label = constr_to_label
def predict(self, X, test_constr=None):
check_is_fitted(self, ['labels_', 'models_'])
if test_constr is None:
test_constr = X[:, self.constr_id]
# TODO: optimize this
test_labels = np.zeros(X.shape[0], np.int)
for i in range(X.shape[0]):
test_labels[i] = self.constr_to_label[test_constr[i]]
preds = np.empty(X.shape[0])
for cl_idx in range(self.num_planes):
if np.sum(test_labels == cl_idx) == 0:
continue
y_pred = self.models_[cl_idx].predict(X[test_labels == cl_idx])
preds[test_labels == cl_idx] = y_pred
return preds
class FuzzyCLRRegressor(BaseEstimator):
def __init__(self, num_planes, kmeans_coef,
clr_lr=None, num_tries=1):
self.num_planes = num_planes
self.kmeans_coef = kmeans_coef
self.num_tries = num_tries
self.clr_lr = clr_lr
def fit(self, X, y, init_labels=None, max_iter=20,
seed=None, verbose=False):
if seed is not None:
np.random.seed(seed)
self.labels_, self.models_, self.weights_, _ = best_clr(
X, y, k=self.num_planes, kmeans_X=self.kmeans_coef,
max_iter=max_iter, num_tries=self.num_tries,
lr=self.clr_lr, fuzzy=True
)
self.X_ = X
def predict(self, X):
check_is_fitted(self, ['labels_', 'models_', 'weights_'])
preds = np.empty((X.shape[0], self.num_planes))
for cl_idx in range(self.num_planes):
preds[:, cl_idx] = self.models_[cl_idx].predict(X)
preds = np.sum(preds * self.weights_, axis=1)
return preds
class CLRpRegressor(BaseEstimator):
def __init__(self, num_planes, kmeans_coef, clr_lr=None, max_iter=5,
num_tries=1, clf=None, weighted=False, fuzzy=False):
self.num_planes = num_planes
self.kmeans_coef = kmeans_coef
self.num_tries = num_tries
self.weighted = weighted
self.clr_lr = clr_lr
self.fuzzy = fuzzy
self.max_iter = max_iter
if clf is None:
self.clf = RandomForestClassifier(n_estimators=20)
else:
self.clf = clf
def fit(self, X, y, init_labels=None,
seed=None, verbose=False):
if seed is not None:
np.random.seed(seed)
self.labels_, self.models_, _, _ = best_clr(
X, y, k=self.num_planes, kmeans_X=self.kmeans_coef,
max_iter=self.max_iter, num_tries=self.num_tries,
lr=self.clr_lr, fuzzy=self.fuzzy
)
self.X_ = X
if verbose:
label_score = self.get_label_score_()
print("Label prediction: {:.6f} +- {:.6f}".format(
label_score.mean(), label_score.std()))
if np.unique(self.labels_).shape[0] == 1:
self.labels_[0] = 1 if self.labels_[0] == 0 else 0
self.clf.fit(X, self.labels_)
def init_fit(self, X, labels, models):
self.labels_ = labels
self.models_ = models
self.X_ = X
self.clf.fit(X, self.labels_)
def get_label_score_(self):
return cross_val_score(self.clf, self.X_, self.labels_, cv=3).mean()
def predict(self, X):
check_is_fitted(self, ['labels_', 'models_'])
if self.weighted:
if 'n_classes_' in self.clf.__dict__ and self.clf.n_classes_ == self.num_planes:
planes_probs = self.clf.predict_proba(X)
else:
planes_probs = np.zeros((X.shape[0], self.num_planes))
planes_probs[:, self.clf.classes_] = self.clf.predict_proba(X)
preds = np.empty((X.shape[0], self.num_planes))
for cl_idx in range(self.num_planes):
preds[:, cl_idx] = self.models_[cl_idx].predict(X)
preds = np.sum(preds * planes_probs, axis=1)
else:
test_labels = self.clf.predict(X)
preds = np.empty(X.shape[0])
for cl_idx in range(self.num_planes):
if np.sum(test_labels == cl_idx) == 0:
continue
y_pred = self.models_[cl_idx].predict(X[test_labels == cl_idx])
preds[test_labels == cl_idx] = y_pred
return preds
class KPlaneLabelPredictor(BaseEstimator):
def __init__(self, num_planes, weight_mode='kplane'):
self.num_planes = num_planes
self.n_classes_ = num_planes
self.weight_mode = weight_mode
def fit(self, X, y):
if self.weight_mode == 'size':
self.weights = np.empty(self.num_planes)
for cl in range(self.num_planes):
self.weights[cl] = np.sum(y == cl)
self.weights /= np.sum(self.weights)
else:
self.centers_ = np.empty((self.num_planes, X.shape[1]))
for cl in range(self.num_planes):
if np.sum(y == cl) == 0:
# filling with inf empty clusters
self.centers_[cl] = np.ones(X.shape[1]) * 1e5
continue
self.centers_[cl] = np.mean(X[y == cl], axis=0)
def predict(self, X):
if self.weight_mode == 'size':
probs = self.predict_proba
return np.argmax(probs)
dst = cdist(self.centers_, X)
return np.argmin(dst, axis=0)
def predict_proba(self, X):
if self.weight_mode == 'size':
return self.weights
dst = cdist(self.centers_, X)
return dst.T / np.sum(dst.T, axis=1, keepdims=True)
def score(self, X, y):
return np.mean(self.predict(X) == y)
class KPlaneRegressor(CLRpRegressor):
def __init__(self, num_planes, kmeans_coef, fuzzy=False, max_iter=5,
num_tries=1, weighted=False, clr_lr=None):
weighted_param = True if weighted == 'size' else weighted
super(KPlaneRegressor, self).__init__(
num_planes, kmeans_coef,
num_tries=num_tries, fuzzy=fuzzy, max_iter=max_iter,
clf=KPlaneLabelPredictor(num_planes, weight_mode=weighted),
weighted=weighted_param, clr_lr=clr_lr,
)
class RegressorEnsemble(BaseEstimator):
def __init__(self, rgr, n_estimators=10):
self.rgr = rgr
self.n_estimators = n_estimators
self.rgrs = []
for i in range(self.n_estimators):
self.rgrs.append(clone(self.rgr))
def fit(self, X, y, init_labels=None,
seed=None, verbose=False):
if seed is not None:
np.random.seed(seed)
for i in range(self.n_estimators):
self.rgrs[i].fit(X, y, init_labels, verbose=verbose)
def predict(self, X):
ans = np.zeros(X.shape[0])
for i in range(self.n_estimators):
ans += self.rgrs[i].predict(X)
return ans / len(self.rgrs)
|
from functools import partial
import itertools
import numpy as np
from rlkit.core.distribution import DictDistribution
from rlkit.samplers.data_collector.contextual_path_collector import (
ContextualPathCollector
)
from rlkit.envs.contextual import ContextualRewardFn
from gym.spaces import Box
from rlkit.samplers.rollout_functions import contextual_rollout
from rlkit import pythonplusplus as ppp
from collections import OrderedDict
from typing import Any, Callable, Dict
Observation = Dict
Goal = Any
class MaskDictDistribution(DictDistribution):
def __init__(
self,
env,
desired_goal_keys=('desired_goal',),
mask_format='vector',
masks=None,
mask_distr=None,
max_subtasks_to_focus_on=None,
prev_subtask_weight=None,
mask_ids=None,
):
self._env = env
self._desired_goal_keys = desired_goal_keys
self.mask_keys = list(masks.keys())
self.mask_dims = []
for key in self.mask_keys:
self.mask_dims.append(masks[key].shape[1:])
env_spaces = self._env.observation_space.spaces
self._spaces = {
k: env_spaces[k]
for k in self._desired_goal_keys
}
for mask_key, mask_dim in zip(self.mask_keys, self.mask_dims):
self._spaces[mask_key] = Box(
low=np.zeros(mask_dim),
high=np.ones(mask_dim),
dtype=np.float32,
)
self.mask_format = mask_format
self.masks = masks
self.mask_ids = mask_ids
if self.mask_ids is None:
self.mask_ids = np.arange(next(iter(masks.values())).shape[0])
self.mask_ids = np.array(self.mask_ids)
self._num_atomic_masks = len(self.mask_ids)
self._max_subtasks_to_focus_on = max_subtasks_to_focus_on
if self._max_subtasks_to_focus_on is not None:
assert isinstance(self._max_subtasks_to_focus_on, int)
self._prev_subtask_weight = prev_subtask_weight
if self._prev_subtask_weight is not None:
assert isinstance(self._prev_subtask_weight, float)
for key in mask_distr:
assert key in ['atomic', 'subset', 'full']
assert mask_distr[key] >= 0
for key in ['atomic', 'subset', 'full']:
if key not in mask_distr:
mask_distr[key] = 0.0
if np.sum(list(mask_distr.values())) > 1:
raise ValueError("Invalid distribution sum: {}".format(
np.sum(list(mask_distr.values()))
))
self.mask_distr = mask_distr
self.subset_masks = None
self.full_masks = None
@property
def spaces(self):
return self._spaces
def sample(self, batch_size: int):
goals = self.sample_masks(batch_size)
### sample the desired_goal ###
if self.mask_format == 'distribution':
### the desired goal is exactly the same as mu ###
goals.update({
k: goals['mask_mu']
for k in self._desired_goal_keys
})
else:
env_samples = self._env.sample_goals(batch_size)
goals.update({
k: env_samples[k]
for k in self._desired_goal_keys
})
return goals
def sample_masks(self, batch_size):
num_atomic_masks = int(batch_size * self.mask_distr['atomic'])
num_subset_masks = int(batch_size * self.mask_distr['subset'])
num_full_masks = batch_size - num_atomic_masks - num_subset_masks
mask_goals = []
if num_atomic_masks > 0:
mask_goals.append(self.sample_atomic_masks(num_atomic_masks))
if num_subset_masks > 0:
mask_goals.append(self.sample_subset_masks(num_subset_masks))
if num_full_masks > 0:
mask_goals.append(self.sample_full_masks(num_full_masks))
def concat(*x):
return np.concatenate(x, axis=0)
mask_goals = ppp.treemap(concat, *tuple(mask_goals),
atomic_type=np.ndarray)
return mask_goals
def sample_atomic_masks(self, batch_size):
sampled_masks = {}
sampled_mask_ids = np.random.choice(self.mask_ids, batch_size)
for mask_key in self.mask_keys:
sampled_masks[mask_key] = self.masks[mask_key][sampled_mask_ids]
return sampled_masks
def sample_subset_masks(self, batch_size):
if self.subset_masks is None:
self.create_subset_and_full_masks()
sampled_masks = {}
sampled_mask_ids = np.random.choice(self._num_subset_masks, batch_size)
for mask_key in self.mask_keys:
sampled_masks[mask_key] = self.subset_masks[mask_key][sampled_mask_ids]
return sampled_masks
def sample_full_masks(self, batch_size):
if self.full_masks is None:
self.create_subset_and_full_masks()
sampled_masks = {}
sampled_mask_ids = np.random.choice(self._num_full_masks, batch_size)
for mask_key in self.mask_keys:
sampled_masks[mask_key] = self.full_masks[mask_key][sampled_mask_ids]
return sampled_masks
def create_subset_and_full_masks(self):
self.subset_masks = {k: [] for k in self.mask_keys}
self.full_masks = {k: [] for k in self.mask_keys}
def nCkBitmaps(n, k):
"""
Shamelessly pilfered from
https://stackoverflow.com/questions/1851134/generate-all-binary-strings-of-length-n-with-k-bits-set
"""
result = []
for bits in itertools.combinations(range(n), k):
s = [0] * n
for bit in bits:
s[bit] = 1
result.append(s)
return np.array(result)
def npify(d):
for key in d.keys():
d[key] = np.array(d[key])
return d
def append_to_dict(d, keys, bm):
for k in keys:
d[k].append(
(bm @ self.masks[k].reshape((self._num_atomic_masks, -1))).reshape(list(self._spaces[k].shape))
)
n = self._max_subtasks_to_focus_on \
if (self._max_subtasks_to_focus_on is not None) \
else self._num_atomic_masks
for k in range(1, n + 1):
list_of_bitmaps = nCkBitmaps(n, k)
for bm in list_of_bitmaps:
append_to_dict(self.subset_masks, self.mask_keys, bm)
if k == n:
append_to_dict(self.full_masks, self.mask_keys, bm)
self.subset_masks = npify(self.subset_masks)
self.full_masks = npify(self.full_masks)
self._num_subset_masks = next(iter(self.subset_masks.values())).shape[0]
self._num_full_masks = next(iter(self.full_masks.values())).shape[0]
def get_atomic_mask_to_indices(self, masks):
assert self.mask_format in ['vector']
atomic_masks_to_indices = OrderedDict()
for mask in self.masks['mask']:
atomic_masks_to_indices[tuple(mask)] = np.where(np.all(masks == mask, axis=1))[0]
return atomic_masks_to_indices
class MaskPathCollector(ContextualPathCollector):
def __init__(
self,
*args,
mask_sampler=None,
mask_distr=None,
mask_ids=None,
max_path_length=100,
rollout_mask_order='fixed',
concat_context_to_obs_fn=None,
prev_subtask_weight=False,
max_subtasks_to_focus_on=None,
max_subtasks_per_rollout=None,
**kwargs
):
super().__init__(*args, **kwargs)
self.mask_sampler = mask_sampler
for key in mask_distr:
assert key in ['atomic', 'full', 'atomic_seq', 'cumul_seq']
assert mask_distr[key] >= 0
for key in ['atomic', 'full', 'atomic_seq', 'cumul_seq']:
if key not in mask_distr:
mask_distr[key] = 0.0
if np.sum(list(mask_distr.values())) > 1:
raise ValueError("Invalid distribution sum: {}".format(
np.sum(list(mask_distr.values()))
))
self.mask_distr = mask_distr
if mask_ids is None:
mask_ids = self.mask_sampler.mask_ids.copy()
self.mask_ids = np.array(mask_ids)
assert rollout_mask_order in ['fixed', 'random']
self.rollout_mask_order = rollout_mask_order
self.max_path_length = max_path_length
self.rollout_masks = []
self._concat_context_to_obs_fn = concat_context_to_obs_fn
self._prev_subtask_weight = prev_subtask_weight
self._max_subtasks_to_focus_on = max_subtasks_to_focus_on
self._max_subtasks_per_rollout = max_subtasks_per_rollout
def obs_processor(o):
if len(self.rollout_masks) > 0:
mask_dict = self.rollout_masks[0]
self.rollout_masks = self.rollout_masks[1:]
for k in mask_dict:
o[k] = mask_dict[k]
self._env._rollout_context_batch[k] = mask_dict[k][None]
obs_and_context = {
'observations': o[self._observation_key][None],
'next_observations': o[self._observation_key][None],
}
for k in self._context_keys_for_policy:
obs_and_context[k] = o[k][None]
return self._concat_context_to_obs_fn(obs_and_context)['observations'][0]
def unbatchify(d):
for k in d:
d[k] = d[k][0]
return d
def reset_callback(env, agent, o):
self.rollout_masks = []
rollout_types = list(self.mask_distr.keys())
probs = list(self.mask_distr.values())
rollout_type = np.random.choice(rollout_types, 1, replace=True, p=probs)[0]
if rollout_type == 'full':
mask = unbatchify(self.mask_sampler.sample_full_masks(1))
for _ in range(self.max_path_length):
self.rollout_masks.append(mask)
else:
atomic_masks = self.mask_sampler.masks
mask_ids_for_rollout = self.mask_ids.copy()
if self.rollout_mask_order == 'random':
np.random.shuffle(mask_ids_for_rollout)
if self._max_subtasks_per_rollout is not None:
mask_ids_for_rollout = mask_ids_for_rollout[:self._max_subtasks_per_rollout]
if rollout_type == 'atomic':
mask_ids_for_rollout = mask_ids_for_rollout[:1]
num_steps_per_mask = self.max_path_length // len(mask_ids_for_rollout)
for i in range(len(mask_ids_for_rollout)):
mask = {}
for k in atomic_masks.keys():
if rollout_type in ['atomic_seq', 'atomic']:
mask[k] = atomic_masks[k][mask_ids_for_rollout[i]]
elif rollout_type == 'cumul_seq':
if self._max_subtasks_to_focus_on is not None:
start_idx = max(0, i + 1 - self._max_subtasks_to_focus_on)
end_idx = i + 1
atomic_mask_ids_for_rollout_mask = mask_ids_for_rollout[start_idx:end_idx]
else:
atomic_mask_ids_for_rollout_mask = mask_ids_for_rollout[0:i + 1]
atomic_mask_weights = np.ones(len(atomic_mask_ids_for_rollout_mask))
if self._prev_subtask_weight is not None:
assert isinstance(self._prev_subtask_weight, float)
atomic_mask_weights[:-1] = self._prev_subtask_weight
mask[k] = np.sum(
atomic_masks[k][atomic_mask_ids_for_rollout_mask] * atomic_mask_weights[:, np.newaxis],
axis=0
)
else:
raise NotImplementedError
num_steps = num_steps_per_mask
if i == len(mask_ids_for_rollout) - 1:
num_steps = self.max_path_length - len(self.rollout_masks)
self.rollout_masks += num_steps*[mask]
self._rollout_fn = partial(
contextual_rollout,
context_keys_for_policy=self._context_keys_for_policy,
observation_key=self._observation_key,
obs_processor=obs_processor,
reset_callback=reset_callback,
)
class ContextualMaskingRewardFn(ContextualRewardFn):
def __init__(
self,
achieved_goal_from_observation: Callable[[Observation], Goal],
desired_goal_key='desired_goal',
achieved_goal_key='achieved_goal',
mask_keys=None,
mask_format=None,
use_g_for_mean=True,
use_squared_reward=False,
):
self._desired_goal_key = desired_goal_key
self._achieved_goal_key = achieved_goal_key
self._achieved_goal_from_observation = achieved_goal_from_observation
self._mask_keys = mask_keys
self._mask_format = mask_format
self._use_g_for_mean = use_g_for_mean
self._use_squared_reward = use_squared_reward
def __call__(self, states, actions, next_states, contexts):
del states
achieved = self._achieved_goal_from_observation(next_states)
obs = {
self._achieved_goal_key: achieved,
self._desired_goal_key: contexts[self._desired_goal_key],
}
for key in self._mask_keys:
obs[key] = contexts[key]
return default_masked_reward_fn(
actions, obs,
mask_format=self._mask_format,
use_g_for_mean=self._use_g_for_mean,
use_squared_reward=self._use_squared_reward,
)
def default_masked_reward_fn(actions, obs, mask_format, use_g_for_mean, use_squared_reward):
achieved_goals = obs['state_achieved_goal']
if mask_format == 'vector':
desired_goals = obs['state_desired_goal']
mask = obs['mask']
prod = (achieved_goals - desired_goals) * mask
dist = np.linalg.norm(prod, axis=-1)
elif mask_format in ['matrix', 'distribution', 'cond_distribution']:
mu = obs['state_desired_goal']
if mask_format == 'matrix':
mask = obs['mask']
elif mask_format == 'distribution':
mask = obs['mask_sigma_inv']
elif mask_format == 'cond_distribution':
mask = obs['mask_sigma_inv']
if not use_g_for_mean:
mu_w = obs['mask_mu_w']
mu_g = obs['mask_mu_g']
mu_A = obs['mask_mu_mat']
mu = mu_w + np.squeeze(
mu_A @ np.expand_dims(obs['state_desired_goal'] - mu_g, axis=-1),
axis=-1
)
else:
raise TypeError
batch_size, state_dim = achieved_goals.shape
diff = (achieved_goals - mu).reshape((batch_size, state_dim, 1))
prod = (diff.transpose(0, 2, 1) @ mask @ diff).reshape(batch_size)
dist = np.sqrt(prod)
else:
raise TypeError
if use_squared_reward:
return -dist**2
else:
return -dist |
#!/usr/local/miniconda2/bin/python
# _*_ coding: utf-8 _*_
"""
python train_word2vec_model.py wiki.en.text(语料库) word2vec_wiki.en.text.model word2vec_wiki.en.text.vector
得到了一个gensim中默认格式的word2vec model和一个原始c版本word2vec的vector格式的模型: wiki.en.text.vector
@author: MarkLiu
@time : 17-8-30 下午4:46
"""
from __future__ import absolute_import, division, print_function
import os
import sys
module_path = os.path.abspath(os.path.join('..'))
sys.path.append(module_path)
import logging
import os
import sys
import multiprocessing
from gensim.models import Word2Vec
from gensim.models.word2vec import LineSentence
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# check and process input arguments
if len(sys.argv) < 4:
print(globals()['__doc__'] % locals())
sys.exit(1)
inp, outp1, outp2 = sys.argv[1:4]
model = Word2Vec(LineSentence(inp), size=400, window=5, min_count=5,
workers=multiprocessing.cpu_count())
# trim unneeded model memory = use(much) less RAM
# model.init_sims(replace=True)
model.save(outp1)
model.wv.save_word2vec_format(outp2, binary=False)
|
from django.shortcuts import render, get_object_or_404
from django.views import generic
from django.views.generic import DetailView, CreateView
# from django.contrib.auth.forms import (
# UserCreationForm,
# UserChangeForm,
# PasswordChangeForm,
# )
from django.contrib.auth.views import PasswordChangeView
from django.urls import reverse_lazy
from .forms import (
SignUpForm,
EditProfileForm,
PasswordChangingForm,
ProfilePageForm
)
from blogy.models import Profile
# Create your views here.
class CreateProfilePageView(CreateView):
model = Profile
form_class = ProfilePageForm
template_name = 'registration/create_user_profile_page.html'
# fields = '__all__'
# This function let me know the user ID. Substitutes the javascript code in the HTML file.
def form_valid(self, form):
form.instance.user = self.request.user
return super().form_valid(form)
class EditProfilePageView(generic.UpdateView):
model = Profile
template_name = 'registration/edit_profile_page.html'
fields = [
'bio', 'profile_pic', 'website_url',
'facebook_url', 'twitter_url', 'instagram_url',
'pinterest_url'
]
success_url = reverse_lazy('home')
class ShowProfilePageView(DetailView):
model = Profile
template_name = 'registration/user_profile.html'
def get_context_data(self, *args, **kwargs):
# users = Profile.objects.all()
context = super(ShowProfilePageView, self).get_context_data(*args, **kwargs)
page_user = get_object_or_404(Profile, id=self.kwargs['pk'])
context['page_user'] = page_user
return context
class PasswordsChangeView(PasswordChangeView):
form_class = PasswordChangingForm
# form_class = PasswordChangeForm
success_url = reverse_lazy('password-success')
# success_url = reverse_lazy('home')
def password_success(request):
return render(request, 'registration/password_success.html', {})
class UserRegisterView(generic.CreateView):
form_class = SignUpForm
template_name = 'registration/register.html'
success_url = reverse_lazy('login')
class UserEditView(generic.UpdateView):
form_class = EditProfileForm
template_name = 'registration/edit_profile.html'
success_url = reverse_lazy('home')
def get_object(self):
return self.request.user
|
# Maciej Izydorek
# prints out a random fruit
import random
# lists of fruits
fruits = ('apple', 'banana', 'kiwi', 'orange', 'grapefruit', 'raspberry')
# random number starting at 0 up to length of list - 1 to do not get out of the list index
random = fruits[random.randint(0,len(fruits)-1)]
print('A random fruit: {}'.format(random)) |
from setuptools import setup
s_args = {
'name': 'eqclustering',
'version': '0.1.0',
'description': 'Statistical earthquake clustering algorithm',
'author': 'Mark Williams',
'maintainer': 'Nevada Seismological Laboratory',
'maintainer_email': 'nvseismolab@gmail.com',
'url': 'https//github.com/NVSeismoLab/eqclustering',
'py_modules': ['eqclustering'],
'install_requires': [
'numpy',
],
}
# Go
setup(**s_args)
|
import ast
import inspect
from .handler import handle
def compile(func):
import os
import tempfile
fname = tempfile.NamedTemporaryFile(delete=False, suffix='.c')
out = tempfile.NamedTemporaryFile(delete=False, suffix='.out')
source = transpile(func)
with open(fname.name, 'w') as f:
f.write(source)
cmd = ' '.join(['gcc', '-fPIC', '--shared', fname.name, '-o', out.name])
os.system(cmd)
import ctypes
return ctypes.cdll.LoadLibrary(out.name)
def transpile(func):
tree = ast.parse(inspect.getsource(func))
fdef = tree.body[0]
body = fdef.body
name = fdef.name
exprs = []
for expr in body:
res = handle(expr)
exprs.append(res)
return '\n'.join(exprs).strip()
class _callable_str:
def __init__(self, s):
self.s = s
def __call__(self, *args, **kwargs):
return self.s
def __str__(self):
return self.s
def __repr__(self):
return self.s
def transpiler(f):
return transpile(f)
|
from ezweb.objects.soup import EzSoup
from ezweb.objects.source import EzSource
from ezweb.objects.product import EzProduct |
#!/usr/bin/env python
'''Farmware Tools: Device.'''
from __future__ import print_function
import os
import sys
import uuid
from functools import wraps
import requests
from ._util import _request_write, _response_read, _mqtt_request, _mqtt_status
from .auxiliary import Color
from .env import Env
COLOR = Color()
ENV = Env()
ALLOWED_AXIS_VALUES = ['x', 'y', 'z', 'all']
ALLOWED_MESSAGE_TYPES = [
'success', 'busy', 'warn', 'error', 'info', 'fun', 'debug']
ALLOWED_MESSAGE_CHANNELS = ['ticker', 'toast', 'email', 'espeak']
ALLOWED_PACKAGES = ['farmbot_os', 'arduino_firmware', 'farmware']
RESPONSE_ERROR_LOG_UUID = str(uuid.uuid4())
def _on_error():
if ENV.farmware_api_available():
sys.exit(1)
def _check_celery_script(command):
try:
kind = command['kind']
args = command['args']
except (KeyError, TypeError):
_cs_error('celery script', command)
_on_error()
else:
body = command.get('body')
if body is not None:
if not isinstance(body, list):
_cs_error(kind, body)
_on_error()
return kind, args, body
def rpc_wrapper(command, rpc_id=None):
"""Wrap a command in `rpc_request` with the given `rpc_id`."""
return {
'kind': 'rpc_request',
'args': {'label': rpc_id or str(uuid.uuid4())},
'body': [command]}
def _device_request(method, endpoint, payload=None):
'Make a request to the device Farmware API.'
try:
base_url = os.environ['FARMWARE_URL']
token = os.environ['FARMWARE_TOKEN']
except KeyError:
return
url = base_url + 'api/v1/' + endpoint
request_kwargs = {}
request_kwargs['headers'] = {
'Authorization': 'Bearer ' + token,
'content-type': 'application/json'}
response_error_log = False
if payload is not None:
request_kwargs['json'] = payload
response_error_log = payload.get(
'args', {}).get('label') == RESPONSE_ERROR_LOG_UUID
response = requests.request(method, url, **request_kwargs)
if response.status_code != 200 and not response_error_log:
log('{} request `{}` error ({})'.format(
endpoint, payload or '', response.status_code), 'error',
rpc_id=RESPONSE_ERROR_LOG_UUID)
_on_error()
return response
def _device_request_v2(payload):
'Make a request to the device Farmware API (v2).'
if not ENV.farmware_api_available():
return
_request_write(payload)
rpc_uuid = payload.get('args', {}).get('label')
return _response_read(rpc_uuid)
def _device_state_fetch_v2():
'Get info from the device Farmware API (v2).'
if ENV.bot_state_dir is None:
return
def _crawl(path):
if os.path.isdir(path):
return {n: _crawl(os.path.join(path, n)) for n in os.listdir(path)}
with open(path, 'r') as value_file:
value = value_file.read()
return value if value != '' else None
return _crawl(ENV.bot_state_dir)
def _post(endpoint, payload):
"""Post a payload to the device Farmware API.
Since the only available endpoint is 'celery_script',
use `send_celery_script(command)` instead.
Args:
endpoint (str): 'celery_script'
payload (dict): i.e., {'kind': 'take_photo', 'args': {}}
Returns:
requests response object
"""
if ENV.use_v2():
return _device_request_v2(payload)
if ENV.use_mqtt():
return _mqtt_request(payload)
return _device_request('POST', endpoint, payload)
def _get(endpoint):
"""Get info from the device Farmware API.
Since the only available endpoint is 'bot/state',
use `get_bot_state()` instead.
Args:
endpoint (str): 'bot/state'
Returns:
requests response object
"""
if ENV.use_v2():
return _device_state_fetch_v2()
if ENV.use_mqtt():
return _mqtt_status()
return _device_request('GET', endpoint)
def get_bot_state():
"""Get the device state."""
bot_state = _get('bot/state')
if bot_state is None:
_error('Device info could not be retrieved.')
_on_error()
return {}
return bot_state if (ENV.use_v2() or ENV.use_mqtt()) else bot_state.json()
def _send(function):
@wraps(function)
def wrapper(*args, **kwargs):
'Send Celery Script to the device.'
rpc_id = kwargs.pop('rpc_id', None)
if not isinstance(rpc_id, str):
return send_celery_script(function(*args, **kwargs))
return send_celery_script(function(*args, **kwargs), rpc_id=rpc_id)
return wrapper
def send_celery_script(command, rpc_id=None):
"""Send a Celery Script command."""
kind, args, body = _check_celery_script(command)
temp_no_rpc_kinds = ['read_pin', 'write_pin',
'set_pin_io_mode', 'update_farmware']
no_rpc = kind in temp_no_rpc_kinds and not ENV.fbos_at_least(7, 0, 1)
if kind == 'rpc_request' or no_rpc:
rpc = command
else:
rpc = rpc_wrapper(command, rpc_id=rpc_id)
response = _post('celery_script', rpc)
if response is None:
print(COLOR.colorize_celery_script(kind, args, body))
return {
'command': command,
'sent': rpc,
'response': response if (ENV.use_v2() or ENV.use_mqtt()) else {}
}
def log(message, message_type='info', channels=None, rpc_id=None):
"""Send a send_message command to post a log to the Web App.
Args:
message (str): log message contents
message_type (str, optional): One of ALLOWED_MESSAGE_TYPES.
Defaults to 'info'.
channels (list, optional): Any of ALLOWED_MESSAGE_CHANNELS.
Defaults to None.
"""
return send_message(message, message_type, channels, rpc_id=rpc_id)
def _assemble(kind, args, body=None):
'Assemble a celery script command.'
if body is None:
return {'kind': kind, 'args': args}
return {'kind': kind, 'args': args, 'body': body}
def _error(error_text):
if ENV.farmware_api_available():
log(error_text, 'error')
else:
print(COLOR.error(error_text))
def _cs_error(kind, arg):
if ENV.farmware_api_available():
log('Invalid arg `{}` for `{}`'.format(arg, kind), 'error')
else:
print(COLOR.error('Invalid input `{arg}` in `{kind}`'.format(
arg=arg, kind=kind)))
def _check_arg(kind, arg, accepted):
'Error and exit for invalid command arguments.'
arg_ok = True
if arg not in accepted:
_cs_error(kind, arg)
_on_error()
arg_ok = False
return arg_ok
def assemble_coordinate(coord_x, coord_y, coord_z):
"""Assemble a coordinate Celery Script node from x, y, and z."""
return {
'kind': 'coordinate',
'args': {'x': coord_x, 'y': coord_y, 'z': coord_z}}
def _assemble_channel(name):
'Assemble a channel body item (for `send_message`).'
return {
'kind': 'channel',
'args': {'channel_name': name}}
def assemble_pair(label, value):
"""Assemble a 'pair' Celery Script node (for use as a body item)."""
return {
'kind': 'pair',
'args': {'label': label, 'value': value}}
def _nothing():
return {'kind': 'nothing', 'args': {}}
def _check_coordinate(coordinate):
coordinate_ok = True
try:
coordinate_ok = coordinate['kind'] == 'coordinate'
coordinate_ok = sorted(coordinate['args'].keys()) == ['x', 'y', 'z']
except (KeyError, TypeError):
coordinate_ok = False
if not coordinate_ok:
_cs_error('coordinate', coordinate)
_on_error()
return coordinate_ok
@_send
def send_message(message, message_type, channels=None):
"""Send command: send_message.
Args:
message (str): log message contents
message_type (str, optional): One of ALLOWED_MESSAGE_TYPES.
Defaults to 'info'.
channels (list, optional): Any of ALLOWED_MESSAGE_CHANNELS.
Defaults to None.
"""
kind = 'send_message'
args_ok = _check_arg(kind, message_type, ALLOWED_MESSAGE_TYPES)
if channels is not None:
for channel in channels:
args_ok = _check_arg(kind, channel, ALLOWED_MESSAGE_CHANNELS)
if args_ok:
if channels is None:
return _assemble(
kind, {'message': message, 'message_type': message_type})
return _assemble(
kind,
args={'message': message, 'message_type': message_type},
body=[_assemble_channel(channel) for channel in channels])
@_send
def calibrate(axis):
"""Send command: calibrate.
Args:
axis (str): One of ALLOWED_AXIS_VALUES.
"""
kind = 'calibrate'
args_ok = _check_arg(kind, axis, ALLOWED_AXIS_VALUES)
if args_ok:
return _assemble(kind, {'axis': axis})
@_send
def check_updates(package):
"""Send command: check_updates.
Args:
package (str): One of ALLOWED_PACKAGES.
"""
kind = 'check_updates'
args_ok = _check_arg(kind, package, ALLOWED_PACKAGES)
if args_ok:
return _assemble(kind, {'package': package})
@_send
def emergency_lock():
"""Send command: emergency_lock."""
kind = 'emergency_lock'
return _assemble(kind, {})
@_send
def emergency_unlock():
"""Send command: emergency_unlock."""
kind = 'emergency_unlock'
return _assemble(kind, {})
@_send
def execute(sequence_id):
"""Send command: execute.
Args:
sequence_id (int): Web App Sequence ID.
Sequence must be synced to FarmBot OS before execution.
"""
kind = 'execute'
args = {'sequence_id': sequence_id}
return _assemble(kind, args)
@_send
def execute_script(label, inputs=None):
"""Send command: execute_script (Run Farmware).
Args:
label (str): Name of the Farmware to execute. Must be installed.
inputs (dict, optional): Farmware configs, i.e., {'input_0': 0}.
Defaults to None.
"""
kind = 'execute_script'
args = {'label': label}
if inputs is None:
return _assemble(kind, args)
farmware = label.replace(' ', '_').replace('-', '_').lower()
body = []
for key, value in inputs.items():
if key.startswith(farmware):
input_name = key
else:
input_name = '{}_{}'.format(farmware, key)
body.append(assemble_pair(input_name, str(value)))
return _assemble(kind, args, body)
def _set_docstring_for_execute_script_alias(func):
func.__doc__ = execute_script.__doc__
return func
@_set_docstring_for_execute_script_alias
def run_farmware(label, inputs=None, rpc_id=None):
"""Alias for `execute_script`"""
return execute_script(label, inputs, rpc_id=rpc_id)
@_send
def factory_reset(package):
"""Send command: factory_reset.
Args:
package (str): One of ALLOWED_PACKAGES.
"""
kind = 'factory_reset'
args_ok = _check_arg(kind, package, ALLOWED_PACKAGES)
if args_ok:
return _assemble(kind, {'package': package})
@_send
def find_home(axis):
"""Send command: find_home.
Args:
axis (str): One of ALLOWED_AXIS_VALUES.
"""
kind = 'find_home'
args_ok = _check_arg(kind, axis, ALLOWED_AXIS_VALUES)
if args_ok:
return _assemble(kind, {'axis': axis})
@_send
def home(axis):
"""Send command: home.
Args:
axis (str): One of ALLOWED_AXIS_VALUES.
"""
kind = 'home'
args_ok = _check_arg(kind, axis, ALLOWED_AXIS_VALUES)
if args_ok:
return _assemble(kind, {'axis': axis})
@_send
def install_farmware(url):
"""Send command: install_farmware.
Args:
url (str): URL for the Farmware's manifest.
"""
kind = 'install_farmware'
return _assemble(kind, {'url': url})
@_send
def install_first_party_farmware():
"""Send command: install_first_party_farmware."""
kind = 'install_first_party_farmware'
return _assemble(kind, {})
@_send
def move_absolute(location, speed=100, offset=None):
"""Send command: move_absolute.
Celery Script 'coordinate' nodes can be assembled using
`assemble_coordinate(coord_x, coord_y, coord_z)`.
Args:
location (dict): Celery Script 'coordinate' node.
speed (int): Percent of max speed.
offset (dict): Celery Script 'coordinate' node.
"""
kind = 'move_absolute'
args_ok = _check_coordinate(location)
if offset is None:
offset = assemble_coordinate(0, 0, 0)
args_ok = _check_coordinate(offset)
args_ok = _check_arg(kind, speed, range(1, 101))
if args_ok:
return _assemble(kind, {'location': location,
'speed': speed,
'offset': offset})
@_send
def move_relative(x=0, y=0, z=0, speed=100):
"""Send command: move_relative.
Args:
x (int): Distance.
y (int): Distance.
z (int): Distance.
speed (int): Percent of max speed.
"""
kind = 'move_relative'
args_ok = _check_arg(kind, speed, range(1, 101))
if args_ok:
return _assemble(kind, {'x': x,
'y': y,
'z': z,
'speed': speed})
class Move():
'Computed move command handler.'
def __init__(self):
self.command = {'kind': 'move', 'args': {}, 'body': []}
self.sent = False
def __enter__(self):
return self
@staticmethod
def create_numeric_operand(value):
'Create numeric operand.'
return {'kind': 'numeric', 'args': {'number': value}}
@staticmethod
def create_random_operand(variance):
'Create random operand.'
return {'kind': 'random', 'args': {'variance': variance}}
def add_item(self, item):
'Add movement item to body.'
self.command['body'].append(item)
def add_axis_overwrite(self, axis, operand):
'Add axis overwrite.'
self.add_item({'kind': 'axis_overwrite',
'args': {'axis': axis, 'axis_operand': operand}})
def add_axis_addition(self, axis, operand):
'Add axis addition.'
self.add_item({'kind': 'axis_addition',
'args': {'axis': axis, 'axis_operand': operand}})
def set_position(self, axis, value):
'Set axis position.'
self.add_axis_overwrite(axis, self.create_numeric_operand(value))
def add_offset(self, axis, value):
'Add axis offset.'
self.add_axis_addition(axis, self.create_numeric_operand(value))
def add_random_offset(self, axis, value):
'Add axis random movement.'
self.add_axis_addition(axis, self.create_random_operand(value))
@_send
def send(self):
'Send movement command.'
self.sent = True
return self.command
def clear(self):
'Clear all items from body.'
self.command['body'] = []
def __exit__(self, *_args):
if not self.sent:
self.send()
@_send
def power_off():
"""Send command: power_off."""
kind = 'power_off'
return _assemble(kind, {})
@_send
def read_pin(pin_number, label, pin_mode):
"""Send command: read_pin.
Args:
pin_number (int): Arduino pin (0 through 69).
label (str): Any string.
pin_mode (int): 0 (digital) or 1 (analog).
"""
kind = 'read_pin'
args_ok = _check_arg(kind, pin_number, range(0, 70))
args_ok = _check_arg(kind, pin_mode, [0, 1])
if args_ok:
return _assemble(kind, {'pin_number': pin_number,
'label': label,
'pin_mode': pin_mode})
@_send
def read_status():
"""Send command: read_status."""
kind = 'read_status'
return _assemble(kind, {})
@_send
def reboot(package='farmbot_os'):
"""Send command: reboot."""
kind = 'reboot'
args_ok = _check_arg(kind, package, ALLOWED_PACKAGES)
if args_ok:
return _assemble(kind, {'package': package})
@_send
def register_gpio(sequence_id, pin_number):
"""Send command: register_gpio (DEPRECATED).
Deprecated. Use
`app.post('pin_bindings', {'sequence_id': 0, 'pin_num': 0})`
instead.
Args:
sequence_id (int): Web App Sequence ID.
Sequence must be synced to FarmBot OS before registration.
pin_number (int): Raspberry Pi GPIO BCM pin number.
"""
kind = 'register_gpio'
args_ok = _check_arg(kind, pin_number, range(1, 30))
if args_ok:
return _assemble(kind, {'sequence_id': sequence_id,
'pin_number': pin_number})
@_send
def remove_farmware(package):
"""Send command: remove_farmware.
Args:
package (str): Name of the Farmware to uninstall.
"""
kind = 'remove_farmware'
return _assemble(kind, {'package': package})
@_send
def set_pin_io_mode(pin_io_mode, pin_number):
"""Send command: set_pin_io_mode.
Args:
pin_io_mode (int): 0 (input), 1 (output), or 2 (input_pullup)
pin_number (int): Arduino pin (0 through 69).
"""
kind = 'set_pin_io_mode'
args_ok = _check_arg(kind, pin_io_mode, [0, 1, 2])
args_ok = _check_arg(kind, pin_number, range(0, 70))
if args_ok:
return _assemble(kind, {'pin_io_mode': pin_io_mode,
'pin_number': pin_number})
@_send
def set_servo_angle(pin_number, pin_value):
"""Send command: set_servo_angle.
Args:
pin_number (int): Arduino servo pin (4, 5, 6, or 11).
pin_value (int): Servo angle (0 through 180).
"""
kind = 'set_servo_angle'
args_ok = _check_arg(kind, pin_number, [4, 5, 6, 11])
args_ok = _check_arg(kind, pin_value, range(0, 181))
if args_ok:
return _assemble(kind, {'pin_number': pin_number,
'pin_value': pin_value})
@_send
def set_user_env(key, value):
"""Send command: set_user_env.
Args:
key (str): ENV key
value (str): ENV value
"""
kind = 'set_user_env'
body = [assemble_pair(key, str(value))]
return _assemble(kind, {}, body)
@_send
def sync():
"""Send command: sync."""
kind = 'sync'
return _assemble(kind, {})
@_send
def take_photo():
"""Send command: take_photo."""
kind = 'take_photo'
return _assemble(kind, {})
@_send
def toggle_pin(pin_number):
"""Send command: toggle_pin.
Args:
pin_number (int): Arduino pin (0 through 69).
"""
kind = 'toggle_pin'
args_ok = _check_arg(kind, pin_number, range(0, 70))
if args_ok:
return _assemble(kind, {'pin_number': pin_number})
@_send
def unregister_gpio(pin_number):
"""Send command: unregister_gpio (DEPRECATED).
Deprecated.
Use `app.delete('pin_bindings', _id=0)` instead.
Args:
pin_number (int): Arduino pin (0 through 69).
"""
kind = 'unregister_gpio'
args_ok = _check_arg(kind, pin_number, range(0, 70))
if args_ok:
return _assemble(kind, {'pin_number': pin_number})
@_send
def update_farmware(package):
"""Send command: update_farmware.
Args:
package (str): Name of the Farmware to update.
"""
kind = 'update_farmware'
return _assemble(kind, {'package': package})
@_send
def wait(milliseconds):
"""Send command: wait.
Args:
milliseconds (int): Time to wait in milliseconds.
"""
kind = 'wait'
return _assemble(kind, {'milliseconds': milliseconds})
@_send
def write_pin(pin_number, pin_value, pin_mode):
"""Send command: write_pin.
Args:
pin_number (int): Arduino pin (0 through 69).
pin_value (int): Value to write to pin.
pin_mode (int): 0 (digital) or 1 (analog).
"""
kind = 'write_pin'
args_ok = _check_arg(kind, pin_number, range(0, 70))
args_ok = _check_arg(kind, pin_mode, [0, 1])
if args_ok:
return _assemble(kind, {'pin_number': pin_number,
'pin_value': pin_value,
'pin_mode': pin_mode})
@_send
def zero(axis):
"""Send command: zero.
Args:
axis (str): One of ALLOWED_AXIS_VALUES.
"""
kind = 'zero'
args_ok = _check_arg(kind, axis, ALLOWED_AXIS_VALUES)
if args_ok:
return _assemble(kind, {'axis': axis})
def get_current_position(axis='all', _get_bot_state=get_bot_state):
"""Get the current position.
Args:
axis (str, optional): One of ALLOWED_AXIS_VALUES. Defaults to 'all'.
Returns:
'all': FarmBot position, i.e., {'x': 0.0, 'y': 0.0, 'z': 0.0}
'x', 'y', or 'z': FarmBot axis position, i.e., 0.0
"""
args_ok = _check_arg('get_current_position', axis, ALLOWED_AXIS_VALUES)
if args_ok:
if axis in ['x', 'y', 'z']:
try:
axis_val = _get_bot_state()['location_data']['position'][axis]
except KeyError:
_error('Position `{}` value unknown.'.format(axis))
else:
return float(axis_val)
else:
try:
position = _get_bot_state()['location_data']['position']
except KeyError:
_error('Position unknown.')
else:
return {axis: float(value) for axis, value in position.items()}
def get_pin_value(pin_number, _get_bot_state=get_bot_state):
"""Get a value from a pin.
Args:
pin_number (int): Arduino pin (0 through 69).
"""
try:
value = _get_bot_state()['pins'][str(pin_number)]['value']
except KeyError:
_error('Pin `{}` value unknown.'.format(pin_number))
else:
return value
if __name__ == '__main__':
send_celery_script({'kind': 'read_status', 'args': {}})
log('Hello World!')
send_message('Hello World!', 'success')
# calibrate('x')
check_updates('farmbot_os')
emergency_lock()
emergency_unlock()
execute(1)
execute_script('take-photo')
# factory_reset('farmbot_os')
find_home('x')
home('all')
URL = 'https://raw.githubusercontent.com/FarmBot-Labs/farmware_manifests/' \
'master/packages/take-photo/manifest.json'
install_farmware(URL)
install_first_party_farmware()
COORD = assemble_coordinate(0, 0, 0)
move_absolute(COORD, 100, COORD)
move_relative(0, 0, 0, 100)
# power_off()
read_pin(1, 'label', 0)
read_status()
# reboot()
# register_gpio(1, 1)
remove_farmware('farmware')
set_pin_io_mode(0, 47)
set_servo_angle(4, 1)
sync()
take_photo()
toggle_pin(1)
# unregister_gpio(1)
update_farmware('take-photo')
wait(100)
write_pin(1, 1, 0)
zero('z')
# preferred method for logging position
log('At position: ({{x}}, {{y}}, {{z}})')
# get position for calculations
POSITION = get_current_position()
log('At position: ({}, {}, {})'.format(
POSITION['x'], POSITION['y'], POSITION['z']))
# preferred method for logging pin value
log('pin 13 value: {{pin13}}')
# get pin value for calculations
VALUE = get_pin_value(13)
log('pin 13 value: {}'.format(VALUE))
|
# !/usr/bin/python
"""
Author: Thomas Laurenson
Email: thomas@thomaslaurenson.com
Website: thomaslaurenson.com
Date: 2016/02/23
Description:
printTimeTaken.py prints processing time of RegXML generated by CellXML-Registry.
Copyright (c) 2016, Thomas Laurenson
"""
import os
import sys
import glob
################################################################################
def parseLogFile(logfile):
keys = 0
values = 0
dkeys = 0
dvalues = 0
time = 0
with open(logfile) as f:
for l in f:
l = l.strip()
if "ExportDataToXMLFormat INFO >>> total_values:" in l:
l = l.split(">>>")
l = l[1]
l = l.split(":")
l = l[1]
values += int(l)
elif "ExportDataToXMLFormat INFO >>> total_keys:" in l:
l = l.split(">>>")
l = l[1]
l = l.split(":")
l = l[1]
keys += int(l)
elif "ExportDataToXMLFormat INFO >>> total_deleted_keys:" in l:
l = l.split(">>>")
l = l[1]
l = l.split(":")
l = l[1]
dkeys += int(l)
elif "ExportDataToXMLFormat INFO >>> total_deleted_values:" in l:
l = l.split(">>>")
l = l[1]
l = l.split(":")
l = l[1]
dvalues += int(l)
elif "CellXMLRegistry.Program Main INFO >>> Processing time:" in l:
l = l.split(">>>")
l = l[1]
l = l.split(":")
l = l[1]
l = l.split("s")
l = l[0]
time += float(l)
f.close()
print("%s,%d,%d,%d,%d,%f" % (logfile, keys, values, dkeys, dvalues, time))
################################################################################
if __name__=="__main__":
import argparse
parser = argparse.ArgumentParser(description='''printTimeTaken.py''', formatter_class = argparse.RawTextHelpFormatter)
parser.add_argument("logfile",
help = "CellXML-Registry log file, or directory of log files")
args = parser.parse_args()
logfile = args.logfile
# Print header
print("%s,%s,%s,%s,%s,%s" % (logfile,
"Keys",
"Values",
"DeletedKeys",
"DeletedValues",
"Time"))
if os.path.isdir(logfile):
logs = glob.glob(logfile + "/*.log")
for log in logs:
parseLogFile(log)
else:
parseLogFile(logfile)
|
class demomethod:
x = 0
y = 0
z = []
@staticmethod #静态方法的装饰器
def static_mthd(): #define the static method
print("static method")
@classmethod #类方法的装饰器
def class_mthd(cls): #define the class method
print("class method")
def demothod(a, b):
x = a
y = b
demomethod.static_mthd()
demomethod.class_mthd()
ab = demomethod()
ab.static_mthd()
ab.class_mthd()
"""
We can use the staticmethod and the class method to use an unspecified class
"""
|
import csv
import random
import cv2
import numpy as np
import os
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.image import ImageDataGenerator
def load_data(paths, undersamples = [], set = 'train'):
"""For given list of paths to csv files, load images, labels and bboxes.
Args:
paths ([type]): [description]
undersamples (list, optional): [description]. Defaults to [].
Returns:
[type]: [description]
"""
data = []
labels = []
bboxes = []
i = 0
for csv_path in paths:
with open(csv_path) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
next(csv_reader, None)
for row in csv_reader:
# Check
filename, label, startX, startY, w, h = row
# Skip validation set
if 'valid' in filename:
continue
# check if lavel is in undersample list
# if label in undersamples:
# continue
endX = float(startX) + float(w)
endY = float(startY) + float(h)
image = cv2.imread(filename)
(h,w)=image.shape[:2]
startX = float(startX) / w
startY = float(startY) / h
endX = float(endX) / w
endY = float(endY) / h
image = load_img(filename, target_size=(224, 224))
image = img_to_array(image)
data.append(image)
labels.append(label)
bboxes.append((startX, startY, endX, endY))
# Just testing on my pc remove for colab
# if i > 10:
# break
# i += 1
data = np.array(data, dtype=np.float64)# / 255.
labels = np.array(labels)
bboxes = np.array(bboxes, dtype=np.float64)
return data, labels, bboxes
# Generator parametes
datagen = ImageDataGenerator(rotation_range=5,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=-0.01,
brightness_range=[1,2.5],
fill_mode='nearest',
# fill_mode='wrap',
# fill_mode='reflect',
# horizontal_flip=True,
)
def add_noise(img,):
img = img[...,::-1]/255.0
noise = np.random.normal(loc=0, scale=1, size=img.shape)
img = np.clip((img + noise*0.1),0,1)*255.0
return img
# Load data then generate synthetic images applying basic augmentation + noise
# preserving bboxes postion on images ( will be considered as regulasitation)
# datas = list(zip(load_data(['data/data.csv'])))
images, labels, bboxes = load_data(['data/data.csv'])
# Creaing folders with appropiate names
augmented_folder = 'data/augmented/'
unique = np.unique(labels)
for i in unique:
if not os.path.exists(augmented_folder+str(i)):
os.makedirs(augmented_folder+str(i))
# Open csv for appending synthetic images
csv_data = open('data/data_augmented.csv','w')
csv_data.write('im_path,label,x,y,w,h\n')
# Content of csv
# filename, label, startX, startY, w, h = row
for data in zip(images, labels, bboxes):
image, label, bbox = data
# Create new images with image generator
# To current bbox we will aply small amount of noise
# Label stays the same
# Get number of current images in folder to avoid overwriting
count = int(len(os.listdir(augmented_folder+str(label))))
for i in range(12):
aug_filename = augmented_folder+str(label)+'/'+str(label)+'_'+str(count+i)+'.jpg'
# Image Augmentation (rotation, zoom, brightness)
current_img = datagen.random_transform(image)
# Add noise to BBOXes
noisy_bbox = bbox + np.random.normal(0, 0.02, bbox.shape)
# Add noise to image 25% chnace
if np.random.rand() > 0.75:
current_img = add_noise(current_img)
# Flip the image 25% chance
if np.random.rand() > 0.75:
current_img = cv2.flip(current_img, 1)
# Flip coordinates
noisy_bbox = 1 - noisy_bbox
# Change colorspace 40% chance
if np.random.rand() > 0.6:
# Pick 2 numbers out of 0,1,2
a, b = random.sample([0, 1, 2], 2)
current_img[[a,b]] = current_img[[b,a]]
# Add blur 50% chance
if np.random.rand() > 0.5:
dst = cv2.GaussianBlur(current_img, (3,3), cv2.BORDER_DEFAULT)
# Change bbox by adding very small noise to it
# Coorect bbox if its out of image bounds
noisy_bbox[0] = min(1.,max(0., noisy_bbox[0]))
noisy_bbox[1] = min(1.,max(0., noisy_bbox[1]))
noisy_bbox[2] = min(1.,max(0., noisy_bbox[2]))
noisy_bbox[3] = min(1.,max(0., noisy_bbox[3]))
h, w = current_img.shape[:2]
startX = int(noisy_bbox[0] * w)
startY = int(noisy_bbox[1] * h)
w = abs(startX - int(noisy_bbox[2] * w))
h = abs(startY - int(noisy_bbox[3] * h))
# cv2.rectangle(current_img, (startX, startY), (int(noisy_bbox[2]*w), int(noisy_bbox[3]*h)), (0, 255, 0), 2)
cv2.imwrite(aug_filename, current_img)
# Write to csv
csv_data.write(aug_filename+','+str(label)+','+str(startX)+','+str(startY)+','+str(w)+','+str(h)+'\n')
# Add noise to images
for j in range(3):
aug_filename = augmented_folder+str(label)+'/'+str(label)+'_'+str(count+i+j+1)+'.jpg'
current_img = add_noise(image)
# Change bbox by adding very small noise to it
noisy_bbox = bbox + np.random.normal(0, 0.02, bbox.shape)
# Coorect bbox if its out of image bounds
noisy_bbox[0] = min(1.,max(0., noisy_bbox[0]))
noisy_bbox[1] = min(1.,max(0., noisy_bbox[1]))
noisy_bbox[2] = min(1.,max(0., noisy_bbox[2]))
noisy_bbox[3] = min(1.,max(0., noisy_bbox[3]))
h, w = current_img.shape[:2]
startX = int(noisy_bbox[0] * w)
startY = int(noisy_bbox[1] * h)
w = abs(startX - int(noisy_bbox[2] * w))
h = abs(startY - int(noisy_bbox[3] * h))
# cv2.rectangle(current_img, (startX, startY), (int(noisy_bbox[2]*w), int(noisy_bbox[3]*h)), (0, 255, 0), 2)
cv2.imwrite(aug_filename, current_img)
csv_data.write(aug_filename+','+str(label)+','+str(startX)+','+str(startY)+','+str(w)+','+str(h)+'\n')
|
from io import StringIO
from collections import defaultdict
import logging
from typing import Dict, List
from ror.RORModel import RORModel
from ror.RORParameters import RORParameters
from ror.RORResult import RORResult
from ror.ResultAggregator import AbstractResultAggregator
from ror.alpha import AlphaValue, AlphaValues
from ror.loader_utils import RORParameter
from ror.result_aggregator_utils import BIG_NUMBER, Rank, RankItem, create_flat_ranks, get_position_in_rank, group_equal_alternatives_in_ranking
import pandas as pd
import numpy as np
import os
class WeightedResultAggregator(AbstractResultAggregator):
def __init__(self) -> None:
super().__init__('WeightedResultAggregator')
self.weighted_data: Dict[str, List[float]] = dict()
self.alpha_values: AlphaValues = None
def aggregate_results(self, result: RORResult, parameters: RORParameters) -> RORResult:
super().aggregate_results(result, parameters)
weights_parameter = parameters.get_parameter(
RORParameter.ALPHA_WEIGHTS)
assert type(
weights_parameter) is list, 'Weights must be provided as a list with values >= 0 that corresponds to alpha values'
self.alpha_values = AlphaValues.from_list(
parameters.get_parameter(RORParameter.ALPHA_VALUES))
assert len(weights_parameter) == len(
self.alpha_values.values), 'Number of weights must correspond to the number of alpha values'
weights = {
f'alpha_{alpha_value}': weight
for alpha_value, weight in zip(self.alpha_values.values, weights_parameter)
}
assert all([weight >= 0.0 for weight in weights.values()]
), 'All weights must be greater or equal 0.0'
eps = parameters.get_parameter(RORParameter.EPS)
data = result.get_results_dict(self.alpha_values)
# divide values by weights - alternative value is the distance to the ideal alternative
# so we need to divide instead of multiplying
for alternative in data:
alternative_data = data[alternative]
self.weighted_data[alternative] = [
value / weight if weight > 0 else BIG_NUMBER for value, weight in zip(alternative_data, weights.values())
]
flat_ranks = create_flat_ranks(self.weighted_data)
values_per_alternative: Dict[str, float] = defaultdict(lambda: 0)
for rank in flat_ranks:
for rank_item in rank:
values_per_alternative[rank_item.alternative] += rank_item.value
# sort by alternative's value
sorted_final_rank = sorted(
values_per_alternative.items(), key=lambda alternative: alternative[1])
# wrap sorted final rank into RankItem
final_rank = [RankItem(alternative, value)
for alternative, value in sorted_final_rank]
# place same results into same positions
final_rank = group_equal_alternatives_in_ranking(final_rank, eps)
resolved_final_rank = self._tie_resolver.resolve_rank(final_rank, result, parameters)
# draw positions
# get dir for all ranks because dir contains datetime so must be one for all
for alpha_value, intermediate_flat_rank in zip(self.alpha_values.values, flat_ranks):
# create intermediate ranks for drawing
grouped_rank = group_equal_alternatives_in_ranking(
intermediate_flat_rank, eps)
name = f'alpha_{round(alpha_value, 4)}'
image_filename = self.draw_rank(grouped_rank, result.output_dir, f'weighted_{name}')
result.add_intermediate_rank(
name, Rank(rank, image_filename, AlphaValue.from_value(alpha_value)))
final_rank_image_filename = self.draw_rank(resolved_final_rank, result.output_dir, 'weighted_final_rank')
final_rank_object = Rank(
resolved_final_rank,
final_rank_image_filename
)
# return result
result.final_rank = final_rank_object
return result
def explain_result(self, alternative_1: str, alternative_2: str) -> str:
assert alternative_1 in self.weighted_data, f'No results for alternative {alternative_1} found'
assert alternative_2 in self.weighted_data, f'No results for alternative {alternative_2} found'
alternative_1_weights = self.weighted_data[alternative_1]
alternative_2_weights = self.weighted_data[alternative_2]
precision = 3
def rounded(number):
return round(number, precision)
explanation = StringIO()
explanation.write(
f'First alternative {alternative_1} has the following results for alpha values:\n')
alternative_1_sum: float = 0
for alpha_value, result in zip(self.alpha_values.values, alternative_1_weights):
explanation.write(f'Alpha {alpha_value}: {rounded(result)}\n')
alternative_1_sum += result
explanation.write(f'Sum is {rounded(alternative_1_sum)}\n')
explanation.write(
f'Second alternative {alternative_2} has the following results for alpha values:\n')
alternative_2_sum: float = 0
for alpha_value, result in zip(self.alpha_values.values, alternative_2_weights):
explanation.write(f'Alpha {alpha_value}: {rounded(result)}\n')
alternative_2_sum += result
explanation.write(f'Sum is {rounded(alternative_2_sum)}\n')
final_rank = self._ror_result.final_rank
final_rank_alt_1_position = get_position_in_rank(
alternative_1, final_rank)
final_rank_alt_2_position = get_position_in_rank(
alternative_2, final_rank)
if alternative_1_sum > alternative_2_sum:
explanation.write(
f'First alternative {alternative_1} has bigger distance (sum)\n')
explanation.write(
f'therefore it is on the lower position in the rank ({final_rank_alt_1_position})\n')
explanation.write(
f'than the second alternative {alternative_2} (position {final_rank_alt_2_position}\n')
elif alternative_1_sum > alternative_2_sum:
explanation.write(
f'First alternative {alternative_1} has lower distance (sum)\n')
explanation.write(
f'therefore it is on the higher position in the rank ({final_rank_alt_1_position})\n')
explanation.write(
f'than the second alternative {alternative_2} (position {final_rank_alt_2_position}\n')
else:
explanation.write(
f'First {alternative_1} and second {alternative_2} alternative have same distance (sum)\n')
explanation.write(
f'with the precision of eps value {self._ror_parameters.get_parameter(RORParameter.EPS)}\n')
explanation.write(
f'therefore they are on the same position {final_rank_alt_1_position} in the final rank\n')
return explanation.getvalue()
def get_alpha_values(self, model: RORModel, parameters: RORParameters) -> AlphaValues:
return AlphaValues.from_list(parameters.get_parameter(RORParameter.ALPHA_VALUES))
def get_weighted_distances(self) -> pd.DataFrame:
assert self.weighted_data is not None and self._ror_result is not None,\
'Model must be solved before getting weighted distances'
alternatives = self._ror_result.model.dataset.alternatives
# number of columns is equal to the number of ranks == number of alpha values
# plus 1 for column with sum
alpha_values: List[float] = self._ror_parameters.get_parameter(RORParameter.ALPHA_VALUES)
columns = len(alpha_values) + 1
data = np.zeros(shape=(len(alternatives), columns))
column_names = [f'alpha_{round(alpha_value, 3)}' for alpha_value in alpha_values]
column_names.append('sum')
for row, alternative in enumerate(alternatives):
values = self.weighted_data[alternative]
data[row, :len(values)] = values
data[row, -1] = sum(values)
return pd.DataFrame(
data=data,
index=alternatives,
columns=column_names
)
def save_weighted_distances(self, filename: str, directory: str = None) -> str:
_directory = directory if directory is not None else self._ror_result.output_dir
data = self.get_weighted_distances()
logging.info(f'Alpha weights {self._ror_parameters.get_parameter(RORParameter.ALPHA_WEIGHTS)}')
fullpath = os.path.join(_directory, filename)
data.to_csv(fullpath, sep=';')
logging.info(f'Saved weighted distances to "{fullpath}"')
return fullpath
def help(self) -> str:
return '''
Function that aggregates results from ranks: R, Q and S by adding weights to ranks.
Weights must be greater or equal 0.0
Weight > 1.0 increases importance of the rank (lowers value)
Weight < 1.0 decreases importance of the rank (increases value)
Weight == 1.0 doesn't change the importance of the rank
'''
|
# Copyright Epic Games, Inc. All Rights Reserved.
import time
from . import utilities
from ..dependencies import remote_execution
unreal_response = ''
def run_unreal_python_commands(remote_exec, commands, failed_connection_attempts=0):
"""
This function finds the open unreal editor with remote connection enabled, and sends it python commands.
:param object remote_exec: A RemoteExecution instance.
:param str commands: A formatted string of python commands that will be run by the engine.
:param int failed_connection_attempts: A counter that keeps track of how many times an editor connection attempt
was made.
"""
# wait a tenth of a second before attempting to connect
time.sleep(0.1)
try:
# try to connect to an editor
for node in remote_exec.remote_nodes:
remote_exec.open_command_connection(node.get("node_id"))
# if a connection is made
if remote_exec.has_command_connection():
# run the import commands and save the response in the global unreal_response variable
global unreal_response
unreal_response = remote_exec.run_command(commands, unattended=False)
# otherwise make an other attempt to connect to the engine
else:
if failed_connection_attempts < 50:
run_unreal_python_commands(remote_exec, commands, failed_connection_attempts + 1)
else:
remote_exec.stop()
utilities.report_error("Could not find an open Unreal Editor instance!")
# shutdown the connection
finally:
remote_exec.stop()
def import_asset(asset_data, properties):
"""
This function imports an asset to unreal based on the asset data in the provided dictionary.
:param dict asset_data: A dictionary of import parameters.
:param object properties: The property group that contains variables that maintain the addon's correct state.
"""
# start a connection to the engine that lets you send python strings
remote_exec = remote_execution.RemoteExecution()
remote_exec.start()
# send over the python code as a string
run_unreal_python_commands(
remote_exec,
'\n'.join([
f'import_task = unreal.AssetImportTask()',
f'import_task.filename = r"{asset_data.get("fbx_file_path")}"',
f'import_task.destination_path = r"{asset_data.get("game_path")}"',
f'import_task.automated = {not properties.advanced_ui_import}',
f'import_task.replace_existing = True',
f'options = unreal.FbxImportUI()',
f'options.auto_compute_lod_distances = False',
f'options.lod_number = 0',
f'options.import_as_skeletal = {bool(asset_data.get("skeletal_mesh"))}',
f'options.import_animations = {bool(asset_data.get("animation"))}',
f'options.import_materials = {properties.import_materials}',
f'options.import_textures = {properties.import_textures}',
f'options.import_mesh = {bool(asset_data.get("import_mesh"))}',
f'options.static_mesh_import_data.generate_lightmap_u_vs = False',
f'options.lod_distance0 = 1.0',
# if this is a skeletal mesh import
f'if {bool(asset_data.get("skeletal_mesh"))}:',
f'\toptions.mesh_type_to_import = unreal.FBXImportType.FBXIT_SKELETAL_MESH',
f'\toptions.skeletal_mesh_import_data.import_mesh_lo_ds = {bool(asset_data.get("lods"))}',
f'\toptions.skeletal_mesh_import_data.normal_import_method = unreal.FBXNormalImportMethod.FBXNIM_IMPORT_NORMALS',
# if this is an static mesh import
f'if {not bool(asset_data.get("skeletal_mesh"))}:',
f'\toptions.mesh_type_to_import = unreal.FBXImportType.FBXIT_STATIC_MESH',
f'\toptions.static_mesh_import_data.import_mesh_lo_ds = {bool(asset_data.get("lods"))}',
f'\toptions.static_mesh_import_data.normal_import_method = unreal.FBXNormalImportMethod.FBXNIM_IMPORT_NORMALS',
# try to load the provided skeleton
f'skeleton_asset = unreal.load_asset(r"{asset_data.get("skeleton_game_path")}")',
f'if skeleton_asset:',
f'\toptions.set_editor_property("skeleton", skeleton_asset)',
# if this is an animation import
f'if {bool(asset_data.get("animation"))}:',
f'\toptions.set_editor_property("original_import_type", unreal.FBXImportType.FBXIT_ANIMATION)',
f'\toptions.set_editor_property("mesh_type_to_import", unreal.FBXImportType.FBXIT_ANIMATION)',
f'\toptions.anim_sequence_import_data.set_editor_property("preserve_local_transform", True)',
# assign the options object to the import task and import the asset
f'import_task.options = options',
f'unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([import_task])',
# check for a that the game asset imported correctly if the import object name as is False
f'if {not properties.import_object_name_as_root}:',
f'\tgame_asset = unreal.load_asset(r"{asset_data.get("game_path")}")',
f'\tif not game_asset:',
f'\t\traise RuntimeError("Multiple roots are found in the bone hierarchy. Unreal will only support a single root bone.")',
]))
# if there is an error report it
if unreal_response:
if unreal_response['result'] != 'None':
utilities.report_error(unreal_response['result'])
return False
return True
def asset_exists(game_path):
"""
This function checks to see if an asset exist in unreal.
:param str game_path: The game path to the unreal asset.
:return bool: Whether or not the asset exists.
"""
# start a connection to the engine that lets you send python strings
remote_exec = remote_execution.RemoteExecution()
remote_exec.start()
# send over the python code as a string
run_unreal_python_commands(
remote_exec,
'\n'.join([
f'game_asset = unreal.load_asset(r"{game_path}")',
f'if game_asset:',
f'\tpass',
f'else:',
f'\traise RuntimeError("Asset not found")',
]))
return bool(unreal_response['success'])
def delete_asset(game_path):
"""
This function deletes an asset in unreal.
:param str game_path: The game path to the unreal asset.
"""
# start a connection to the engine that lets you send python strings
remote_exec = remote_execution.RemoteExecution()
remote_exec.start()
# send over the python code as a string
run_unreal_python_commands(
remote_exec,
'\n'.join([
f'unreal.EditorAssetLibrary.delete_asset(r"{game_path}")',
]))
|
# --- Day 2: Password Philosophy ---
# Your flight departs in a few days from the coastal airport; the easiest way down to the coast from here is via toboggan.
# The shopkeeper at the North Pole Toboggan Rental Shop is having a bad day. "Something's wrong with our computers; we can't log in!" You ask if you can take a look.
# Their password database seems to be a little corrupted: some of the passwords wouldn't have been allowed by the Official Toboggan Corporate Policy that was in effect when they were chosen.
# To try to debug the problem, they have created a list (your puzzle input) of passwords (according to the corrupted database) and the corporate policy when that password was set.
# For example, suppose you have the following list:
# 1-3 a: abcde
# 1-3 b: cdefg
# 2-9 c: ccccccccc
# Each line gives the password policy and then the password. The password policy indicates the lowest and highest number of times a given letter must appear for the password to be valid. For example, 1-3 a means that the password must contain a at least 1 time and at most 3 times.
# In the above example, 2 passwords are valid. The middle password, cdefg, is not; it contains no instances of b, but needs at least 1. The first and third passwords are valid: they contain one a or nine c, both within the limits of their respective policies.
# How many passwords are valid according to their policies?
from re import split, fullmatch
inpf = open(r".\2020\input\d2-password-philosophy.txt") # Opens the input file
matches = 0
while (inp := inpf.readline().strip()): # Runs until the pointer reaches the end
min, max, char, password = split("-| |: ", inp) # Reads and splits input
# Checks if the password contains the intended number of characters
if fullmatch(f"[^{char}]*(?:{char}[^{char}]*){{{min},{max}}}", password):
matches += 1
print(matches)
# --- Part Two ---
# While it appears you validated the passwords correctly, they don't seem to be what the Official Toboggan Corporate Authentication System is expecting.
# The shopkeeper suddenly realizes that he just accidentally explained the password policy rules from his old job at the sled rental place down the street! The Official Toboggan Corporate Policy actually works a little differently.
# Each policy actually describes two positions in the password, where 1 means the first character, 2 means the second character, and so on. (Be careful; Toboggan Corporate Policies have no concept of "index zero"!) Exactly one of these positions must contain the given letter. Other occurrences of the letter are irrelevant for the purposes of policy enforcement.
# Given the same example list from above:
# 1-3 a: abcde is valid: position 1 contains a and position 3 does not.
# 1-3 b: cdefg is invalid: neither position 1 nor position 3 contains b.
# 2-9 c: ccccccccc is invalid: both position 2 and position 9 contain c.
# How many passwords are valid according to the new interpretation of the policies?
inpf.seek(0) # Resets the pointer
matches = 0
while (inp := inpf.readline().strip()): # Runs until the pointer reaches the end
pos1, pos2, char, password = split("-| |: ", inp) # Reads and splits input
# Checks if the password contains the character at exactly one of the intended positions
if (password[int(pos1)-1] == char) ^ (password[int(pos2)-1] == char):
matches += 1
print(matches)
inpf.close() # Closes the input file |
import torch
import torch.nn as nn
import numpy as np
import math
import yaml
from ..utils.config import cfg
from ..rpn.generate_anchors import generate_anchors
from ..utils.bbox import bbox_transform_inv ,clip_boxes, clip_boxes_batch
from ..nms.nms_wrapper import nms
import pdb
DEBUG = False
class _Proposallayer(nn.Module):
"""
output object detection proposals by applying essimated bounding box
transformations to a set of regular boxes (called 'anchors')
"""
def __init__(self, feat_stride, scales, ratios):
super(_Proposallayer, self).__init__()
self._feat_stride = feat_stride
self._anchors = torch.from_numpy(generate_anchors(scales=np.array(scales),
ratios= np.array(ratios))).float()
self._num_anchors = self._anchors.size(0)
# rois blob: holds R regions of interest, each is a 5-tuple
# (n, x1, y1, x2, y2) specifying an image batch index n and a
# rectangle (x1, y1, x2, y2)
# top[0].reshape(1, 5)
#
# # scores blob: holds scores for R regions of interest
# if len(top) > 1:
# top[1].reshape(1, 1, 1, 1)
def forward(self, cls_score, bbox_deltas, im_info):
# Algorithm:
#
# for each (H, W) location i
# generate A anchor boxes centered on cell i
# apply predicted bbox deltas at cell i to each of the A anchors
# clip predicted boxes to image
# remove predicted boxes with either height or width < threshold
# sort all (proposal, score) pairs by score from highest to lowest
# take top pre_nms_topN proposals before NMS
# apply NMS with threshold 0.7 to remaining proposals
# take after_nms_topN proposals after NMS
# return the top proposals (-> RoIs top, scores top)
# the first set of _num_anchors channels are bg probs
# the second set are the fg probs
scores = cls_score[:, self._num_anchors:, :, :]
cfg_key = 'TRAIN' if self.training else 'TEST'
pre_nms_topN = cfg[cfg_key].RPN_PRE_NMS_TOP_N
post_nms_topN = cfg[cfg_key].RPN_POST_NMS_TOP_N
nms_thresh = cfg[cfg_key].RPN_NMS_THRESH
min_size = cfg[cfg_key].RPN_MIN_SIZE
batch_size = bbox_deltas.size(0)
# Enumerate all shifts
feat_height, feat_width = scores.size(2), scores.size(3)
shift_x = np.arange(0, feat_width) * self._feat_stride
shift_y = np.arange(0, feat_height) * self._feat_stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = torch.from_numpy(np.vstack((shift_x.ravel(), shift_y.ravel(),
shift_x.ravel(), shift_y.ravel())).transpose())
shifts = shifts.contiguous().type_as(scores).float()
# Enumerate all shifted anchors:
#
# add A anchors (1, A, 4) to
# cell K shifts (K, 1, 4) to get
# shift anchors (K, A, 4)
# reshape to (K*A, 4) shifted anchors
A = self._num_anchors
K = shifts.size(0)
self._anchors = self._anchors.type_as(scores)
# anchors = self._anchors.view(1, A, 4) + shifts.view(1, K, 4).permute(1, 0, 2).contiguous()
anchors = self._anchors.view(1, A, 4) + shifts.view(K, 1, 4)
anchors = anchors.view(1, K * A, 4).expand(batch_size, K * A, 4)
# Transpose and reshape predicted bbox transformations to get them
# into the same order as the anchors:
#
# bbox deltas will be (1, 4 * A, H, W) format
# transpose to (1, H, W, 4 * A)
# reshape to (1 * H * W * A, 4) where rows are ordered by (h, w, a)
# in slowest to fastest order
bbox_deltas = bbox_deltas.permute(0, 2, 3, 1).contiguous()
bbox_deltas = bbox_deltas.view(batch_size, -1, 4)
# Same story for the scores:
#
# scores are (1, A, H, W) format
# transpose to (1, H, W, A)
# reshape to (1 * H * W * A, 1) where rows are ordered by (h, w, a)
scores = scores.permute(0, 2, 3, 1).contiguous()
scores = scores.view(batch_size, -1)
# Convert anchors into proposals via bbox transf ormations
proposals = bbox_transform_inv(anchors, bbox_deltas, batch_size)
# 2. clip predicted boxes to image
proposals = clip_boxes(proposals, im_info, batch_size)
# 3. remove predicted boxes with either height or width < threshold
# (NOTE: convert min_size to input image scale stored in im_info[2])
keep = _filter_boxes(proposals, min_size * im_info[0,2].float())
proposals = proposals[:, keep[0], :]
scores = scores[:, keep[0]]
scores_keep = scores
proposals_keep = proposals
_, order = torch.sort(scores_keep, 1, True)
output = scores.new_zeros(batch_size, pre_nms_topN, 5)
for i in range(batch_size):
# # 3. remove predicted boxes with either height or width < threshold
# # (NOTE: convert min_size to input image scale stored in im_info[2])
proposals_single = proposals_keep[i]
scores_single = scores_keep[i]
# # 4. sort all (proposal, score) pairs by score from highest to lowest
# # 5. take top pre_nms_topN (e.g. 6000)
order_single = order[i]
if pre_nms_topN > 0 and pre_nms_topN < scores_keep.numel():
order_single = order_single[:pre_nms_topN]
proposals_single = proposals_single[order_single, :]
scores_single = scores_single[order_single].view(-1,1)
# 6. apply nms (e.g. threshold = 0.7)
# 7. take after_nms_topN (e.g. 300)
# 8. return the top proposals (-> RoIs top)
# DURING TEST WE HAVE NMS OUTSIDE OF THIS FUNCTION
# keep_idx_i = nms(torch.cat((proposals_single, scores_single), 1), nms_thresh, force_cpu=not cfg.USE_GPU_NMS)
# keep_idx_i = keep_idx_i.long().view(-1)
#
# if post_nms_topN > 0:
# keep_idx_i = keep_idx_i[:post_nms_topN]
# proposals_single = proposals_single[keep_idx_i, :]
# scores_single = scores_single[keep_idx_i, :]
num_proposal = proposals_single.size(0)
output[i,:num_proposal, 4] = scores_single[:,0]
output[i,:num_proposal,0:4] = proposals_single[:,0:4]
return output
def _filter_boxes( boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
ws = boxes[:, :, 2] - boxes[:, :, 0] + 1
hs = boxes[:, :, 3] - boxes[:, :, 1] + 1
keep = ((ws >= min_size.view(-1, 1).expand_as(ws)) & (hs >= min_size.view(-1, 1).expand_as(hs)))
return keep |
# pylint: disable=missing-module-docstring, missing-function-docstring
# pylint: disable=missing-class-docstring
from unittest import TestCase
from datetime import time
import numpy as np
import pytz
from candystore import CandyStore
from tests.helpers import ColumnAssertionMixin
from augury.pipelines.betting import nodes as betting
YEAR_RANGE = (2013, 2015)
REQUIRED_OUTPUT_COLS = ["home_team", "year", "round_number"]
class TestBetting(TestCase, ColumnAssertionMixin):
def setUp(self):
self.raw_betting_data = CandyStore(seasons=YEAR_RANGE).betting_odds()
def test_clean_data(self):
clean_data = betting.clean_data(self.raw_betting_data)
self.assertIn("year", clean_data.columns)
invalid_cols = clean_data.filter(regex="_paid|_margin|venue|^round$").columns
self.assertFalse(any(invalid_cols))
self.assertEqual(
{*REQUIRED_OUTPUT_COLS}, {*clean_data.columns} & {*REQUIRED_OUTPUT_COLS}
)
self.assertEqual(clean_data["date"].dt.tz, pytz.UTC)
self.assertFalse((clean_data["date"].dt.time == time()).any())
def test_add_betting_pred_win(self):
feature_function = betting.add_betting_pred_win
match_data = CandyStore(seasons=YEAR_RANGE).match_results()
valid_data_frame = match_data.assign(
win_odds=np.random.randint(0, 2, len(match_data)),
oppo_win_odds=np.random.randint(0, 2, len(match_data)),
line_odds=np.random.randint(-50, 50, len(match_data)),
oppo_line_odds=np.random.randint(-50, 50, len(match_data)),
)
self._make_column_assertions(
column_names=["betting_pred_win"],
req_cols=("win_odds", "oppo_win_odds", "line_odds", "oppo_line_odds"),
valid_data_frame=valid_data_frame,
feature_function=feature_function,
)
|
'''
Description:
Given the root of a Binary Search Tree (BST), convert it to a Greater Tree such that every key of the original BST is changed to the original key plus sum of all keys greater than the original key in BST.
As a reminder, a binary search tree is a tree that satisfies these constraints:
The left subtree of a node contains only nodes with keys less than the node's key.
The right subtree of a node contains only nodes with keys greater than the node's key.
Both the left and right subtrees must also be binary search trees.
Note: This question is the same as 1038: https://leetcode.com/problems/binary-search-tree-to-greater-sum-tree/
Example 1:
Input: root = [4,1,6,0,2,5,7,null,null,null,3,null,null,null,8]
Output: [30,36,21,36,35,26,15,null,null,null,33,null,null,null,8]
Example 2:
Input: root = [0,null,1]
Output: [1,null,1]
Example 3:
Input: root = [1,0,2]
Output: [3,3,2]
Example 4:
Input: root = [3,2,4,1]
Output: [7,9,4,10]
Constraints:
The number of nodes in the tree is in the range [0, 104].
-104 <= Node.val <= 104
All the values in the tree are unique.
root is guaranteed to be a valid binary search tree.
'''
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def convertBST(self, root: TreeNode) -> TreeNode:
def dfs(node):
if node:
dfs(node.right)
node.val += dfs.greater_sum
dfs.greater_sum = node.val
dfs(node.left)
# ----------------------------------
dfs.greater_sum = 0
dfs(root)
return root
## Time Complexity: O( n )
#
# The overhead in time is the cost of DFS, which is of O( n )
## Space Complexity: O( n )
#
# The overhead in space is the storage for recurison call stack, which is of O( n )
# ----------------------------------
def inorder( node ):
if node:
yield from inorder( node.left )
yield node.val
yield from inorder( node.right )
# ----------------------------------
import unittest
class Testing( unittest.TestCase ):
def test_case_1( self ):
root = TreeNode( 4 )
root.left = TreeNode( 1 )
root.right = TreeNode( 6 )
root.left.left = TreeNode( 0 )
root.left.right = TreeNode( 2 )
root.right.left = TreeNode( 5 )
root.right.right = TreeNode( 7 )
root.left.right.right = TreeNode( 3 )
root.right.right.right = TreeNode( 8 )
root = Solution().convertBST( root )
result = [ *inorder(root) ]
self.assertEqual(result, [36,36,35,33,30,26,21,15,8])
if __name__ == '__main__':
unittest.main() |
"""
https://blog.csdn.net/freeking101/article/details/64461574
"""
import pathlib
from xml.etree import cElementTree as ET
PATH = pathlib.Path(__file__).parent.absolute()
INPUT = PATH / "RSS.xml"
has_img = PATH / "has_img.txt"
not_has_img = PATH / "not_has_img.txt"
tree = ET.ElementTree(file=INPUT)
root = tree.getroot()
with open(has_img, "w") as has, open(not_has_img, "w") as not_has:
for i in root.iter(tag="{http://www.w3.org/2005/Atom}entry"):
link = next(i.iter(tag="{http://www.w3.org/2005/Atom}link"))
try:
thumbnail = next(
i.iter(tag="{http://search.yahoo.com/mrss/}thumbnail")
)
print(thumbnail.attrib["url"], link.attrib["href"], file=has)
except Exception as e: # 没图
print(link.attrib["href"], file=not_has)
|
"""
Dane są ciągi: A[n+1] = sqrt(A[n] ∗ B[n]) oraz B[n+1] = (A[n] + B[n])/2.0. Ciągi te są zbieżne do
wspólnej granicy nazywanej średnią arytmetyczno-geometryczną. Napisać program wyznaczający średnią
arytmetyczno-geometryczną dwóch liczb.
"""
from math import sqrt
a0 = int(input("Enter a0: "))
b0 = int(input("Enter b0: "))
epsilon = 0.00001
while a0 - b0 > epsilon:
a1 = (a0 + b0) / 2
b1 = sqrt(a0 * b0)
a0 = (a1 + b1) / 2
b0 = sqrt(a1 * b1)
print(a0)
|
from abc import ABC, abstractmethod
import json
from enum import Enum
import numpy as np
import tensorflow as tf
class TensorflowObjectDetector:
def __init__(self, model_path, category_labels_path):
"""
:param model_path: string location of serialized frozen model
:param category_labels_path: string location of class labels json
"""
self.graph = self._load_model(str(model_path))
self.category_labels = self._load_category_labels(str(category_labels_path))
# Creating shared session to get performance benefits of cached graph
self.session = tf.Session(graph=self.graph)
def predict(self, image_array):
"""
Run object detection inference on a a single image
:param image_array: numpy array with shape [?,?,3]
:returns: dict containing lists of bounding box co-ordinates (ymin, xmin, ymax, xmax),
scores (class probabilities) and class ids (integers)
"""
assert image_array.ndim == 3 and image_array.shape[2] == 3, 'Input array must have shape [?,?,3]'
# Get handles to input and output tensors
ops = self.graph.get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in ['detection_boxes', 'detection_scores', 'detection_classes']:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = self.graph.get_tensor_by_name(tensor_name)
image_tensor = self.graph.get_tensor_by_name('image_tensor:0')
# Create feed dict in format required by graph
feed_dict={image_tensor: np.expand_dims(image_array, 0)}
# Run inference
output_dict = self.session.run(tensor_dict, feed_dict=feed_dict)
# Returning boxes as dicts to avoid ambiguous list of coords
raw_boxes = output_dict.pop('detection_boxes')[0]
output_dict['boxes'] = self.map_boxes(raw_boxes)
output_dict['scores'] = output_dict.pop('detection_scores')[0]
output_dict['category_ids'] = output_dict.pop('detection_classes')[0].astype(np.uint8)
return output_dict
def _load_category_labels(self, labels_path):
"""
Load class labels from json ("category_label": category_id) and convert to Enum mapping
:returns: Enum object with containing label/id mappings for two-way lookup
"""
with open(labels_path, 'rb') as json_file:
labels_dict = json.load(json_file)
return Enum('Labels', labels_dict)
def _load_model(self, model_path):
"""
Load serialized frozen graph model
:param model_path: path to frozen graph
:returns: Tensorflow Graph
"""
detection_graph = tf.Graph()
with detection_graph.as_default():
# Create GraphDef object to parse serialized frozen graph
graph_def = tf.GraphDef()
with tf.gfile.GFile(model_path, 'rb') as fid:
serialized_graph = fid.read()
graph_def.ParseFromString(serialized_graph)
# Import graph definition into default graph
tf.import_graph_def(graph_def, name='')
return detection_graph
def draw_bounding_boxes(self, image_array, boxes, scores, category_ids, threshold=0.2, **draw_kwargs):
"""
Draw all bounding boxes for a single image
:param image_array: numpy array with shape [?,?,3]
:param boxes: list of list of (ymin, xmin, ymax, xmax) box co-ordinates
:param scores: list of class probabilities for boxes
:param category_ids: list of class ids for boxes
:returns: numpy array with all boxes annootated
"""
for box, score, category_id in zip(boxes, scores, category_ids):
if score >= threshold:
#TODO add class labels to box
category_label = self.category_labels(category_id).name
label = f'{score:.0%}'
image_array = draw_bounding_box(image_array=image_array, label=label,
**box, **draw_kwargs)
else:
continue
return image_array
@staticmethod
def map_boxes(boxes):
''' Convert list of box coords in TF format to dict '''
return [dict(zip(['ymin', 'xmin', 'ymax', 'xmax'], box)) for box in boxes] |
import EulerRunner
def problem3_iter():
to_factor = 600851475143
gen = EulerRunner.prime_generator()
current_prime = None
while to_factor > 1:
current_prime = gen.next()
while to_factor % current_prime == 0:
to_factor /= current_prime
return current_prime
EulerRunner.solve_problem(problem3_iter)
|
from django.db import migrations
def initial_models(apps, *args):
"""
An initial data migration to create the necessary account related models.
"""
# Need at least one account tier for accounts to belong to
AccountTier = apps.get_model("accounts", "AccountTier")
one = AccountTier.objects.create()
# Need a Stencila account to own the default job queue etc
Account = apps.get_model("accounts", "Account")
stencila = Account.objects.create(name="stencila", display_name="Stencila")
class Migration(migrations.Migration):
dependencies = [
("accounts", "0001_initial"),
]
operations = [
migrations.RunPython(initial_models),
]
|
import re
from config import Config, BankDict, Wealth, Logger
import os
from utils.nlp_util import transfer_to_yuan
from utils.nlp_util import UTIL_CN_NUM
class ManualText(object):
# 从PDF等文件的文字内容中,提取符合正则表达式的内容,来补充之前在表格中提取的,然而未能提取到的内容
log_path = os.path.join(Config.LOG_DIR, 'ManualText.log')
log = Logger(log_path, level='warning').logger
# 如添加一个labels_check成员,则应相应的添加一个extract_开头的成员方法
labels_check = {
'product_type',
'risk',
'rate_type', 'promise_type', 'redeem_type',
'amount_buy_min',
}
# 如果只有一个wealth,则将下面这些也添加进labels_check中
labels_check_one = {
'name', 'code', 'code_register',
'term', 'term_looped',
}
# !!!ManualText类中的正则表达式内容与ManualTable类中的正则表达式的内容是不同的
list_risk = BankDict.list_risk
list_currency = BankDict.list_currency
list_ignore = ['ukey', 'bank_level', 'bank_name', 'name', 'code', 'do_dump', 'do_load']
def __init__(self, bank_name: str, dict_wealth: dict, text: str):
self.bank_name = bank_name
self.dict_wealth = dict_wealth
self.text = re.sub(r'[【】()()/\s]+', '', text)
config = getattr(self, 'labels_check_config', None)
if config:
if not isinstance(config, set):
raise ValueError("labels_check_config must be type of set")
self.labels_check.update(config)
config_one = getattr(self, 'labels_check_one_config', None)
if config_one:
if not isinstance(config_one, set):
raise ValueError("labels_check_one_config must be type of set")
self.labels_check_one.update(config_one)
self.wealth_codeless = None
if len(self.dict_wealth) > 1: # 如果传入的字典含有2个及以上的wealth, 并且其中含有一个codeless的wealth, 则去掉该codeless的wealth
if 'codeless' in self.dict_wealth.keys():
self.wealth_codeless = self.dict_wealth['codeless'] # 将codeless的wealth的临时保存在wealth_codeless实例变量中
self.dict_wealth.pop('codeless')
if len(self.dict_wealth) == 1:
self.labels_check.update(self.labels_check_one) # 如果传入的字典只含有1个wealth, 则设置为可以全文搜索补充所有的label的内容
@classmethod
def start(cls, bank_name: str, dict_wealth: dict, text: str):
manual_text_in = cls(bank_name, dict_wealth, text)
list_need = manual_text_in._start()
return list_need
def _start(self):
list_wealth_new = []
for wealth in self.dict_wealth.values():
wealth = self.makeup_wealth_table(self.text, wealth)
list_wealth_new.append(wealth)
list_need = self.final_makeup_wealth(list_wealth_new)
return list_need
def makeup_wealth_table(self, value: str, wealth: Wealth):
for one in self.labels_check:
extract_method = getattr(self, 'extract_' + one, None)
if extract_method is not None and callable(extract_method):
wealth = extract_method(value, wealth)
else:
self.log.error('没有找到相应的方法:extract_%s方法' % one)
return wealth
# 此方法开放给用户,ManualText的子类继承后,自定义使用
def parse_text_manual(self, wealth: Wealth):
return wealth
def final_makeup_wealth(self, list_wealth: list):
# !!!! 过滤掉code为空值的wealth, 为wealth定义wkey和wkeyhash
# 因为没有 产品名称、产品代码 开头的表格,已被并入上一表格中显示,
# 如果文件中只有一张表格,则在manual_text中,将进行全文正则匹配,来寻找code
# 所以如果仍未找到code, 则code为空值的wealth, 则可遗弃
dict_need = {}
for wealth in list_wealth:
code = wealth.code
if code:
wealth = self.makeup_from_codeless(wealth, self.wealth_codeless)
wealth = self.parse_text_manual(wealth)
dict_need[code] = wealth
return dict_need
def makeup_from_codeless(self, wealth: Wealth, wealth_codeless: Wealth):
wealth_instance_elements = [one for one in dir(wealth) if not (one.startswith('__') or one.startswith('_') or (one in self.list_ignore))]
for element in wealth_instance_elements:
codeless_element = getattr(wealth_codeless, element, None)
current_element = getattr(wealth, element, None)
if not current_element:
if codeless_element:
setattr(wealth, element, codeless_element)
return wealth
def extract_product_type(self, value: str, wealth: Wealth):
pattern_product_type = re.compile(r'理财[计划]*产品类型[属于为是::\s]*[非保本证固定浮动收益开放式封闭净值型类公私募]{5,}|本[期理财]*产品[属于为是::\s]*[非保本证固定浮动收益开放式封闭净值型类公私募\s]{5,}[理财]*产品')
result = pattern_product_type.search(value)
if result:
text = result.group(0)
if not wealth.promise_type:
if '非保本' in text:
wealth.promise_type = '非保本'
elif '保本' in text:
wealth.promise_type = '保本'
if not wealth.fixed_type:
if '浮动' in text:
wealth.fixed_type = '浮动收益'
elif '固定' in text:
wealth.fixed_type = '固定收益'
if not wealth.redeem_type:
if '封闭式' in value:
wealth.redeem_type = '封闭式'
elif '开放式' in value:
wealth.redeem_type = '开放式'
if not wealth.rate_type:
if '净值型' in value:
wealth.rate_type = '净值型'
wealth.fixed_type = '浮动收益'
return wealth
# 返回风险等级的数字表示
def extract_risk(self, value: str, wealth: Wealth):
if not wealth.risk:
pattern_risk_dig = re.compile(r'风险[评等分][级类][结果属于为是::\s]*([0-9A-Za-z零一二三四五]+)级?')
pattern_risk_cn = re.compile(r'风险[评等分][级类][结果属于为是::\s]*[基本]*([无低较中等高极]+风险)')
risk = None
result = pattern_risk_cn.search(value)
if result:
risk_raw = result.group(1)
for key in self.list_risk.keys():
if key == risk_raw:
risk = self.list_risk[key]
break
if not risk:
res = pattern_risk_dig.search(value)
if res:
risk_raw = res.group(1)
res_num = re.search(r'[0-9]', risk_raw)
if res_num:
risk = res_num.group(0)
risk = int(risk)
if risk > 5:
print('风险评级数字超出范围,内容为:%s' % value)
else:
res_cn_num = re.search(r'[零一二三四五]', risk_raw)
if res_cn_num:
cn_num = res_cn_num.group(0)
risk = UTIL_CN_NUM[cn_num]
wealth.risk = risk
return wealth
def extract_rate_type(self, value: str, wealth: Wealth):
if not wealth.rate_type:
pattern_rate_type = re.compile(r'净值型|业绩比较基准|比较业绩基准|预期收益率|年化收益率|预期理财收益率|预期年化收益率|预期到期利率|结构性存款')
result = pattern_rate_type.search(value)
if result:
rate_type = result.group(0)
if rate_type in ['净值型', '比较业绩基准', '业绩比较基准']:
wealth.rate_type = '净值型'
wealth.fixed_type = '浮动收益'
elif rate_type in ['预期收益率', '年化收益率', '预期理财收益率', '预期年化收益率', '预期到期利率', '结构性存款']:
wealth.rate_type = '预期收益型'
return wealth
def extract_promise_type(self, value: str, wealth: Wealth):
if not wealth.promise_type:
pattern_promise_type = re.compile(r'([不无]?)[提供]*本金[完全]*保障|([不无]?)[保证障]{2}[理财购买资金]*[金额本]{2}')
result = pattern_promise_type.search(value)
if result:
word = result.group(0)
if word:
one_no = result.group(1)
two_no = result.group(2)
if one_no or two_no:
wealth.promise_type = '非保本'
else:
wealth.promise_type = '保本'
return wealth
def extract_amount_buy_min(self, value: str, wealth: Wealth):
if not wealth.amount_buy_min:
pattern_amount_buy_min = re.compile(r'[起点份额认购金最低余申]{4,}[::为不低于\s]*(人民币|美元|欧元|英镑|日元)*\s*([1-9][0-9]*)\s*([亿万千百元]+)起?') # 仅能用于search()方法
results = pattern_amount_buy_min.search(value)
if results:
text = results.group(0)
num = transfer_to_yuan(text)
wealth.amount_buy_min = num
return wealth
def extract_redeem_type(self, value: str, wealth: Wealth):
if not wealth.redeem_type:
pattern_redeem_type_sub = re.compile(r'(如果|若|假设)[封闭期内投资理财计划成立后]*(投资者|投资人|客户)[不没]?(得|享有|开放|可以|可|能|能够|无|有)[提前]*赎回')
pattern_redeem_type = re.compile(r'(投资者|投资人|客户)[不没]?(得|享有|开放|可以|可|能|能够|无|有)[提前]*赎回')
# 去除内容中关于赎回权利的如果,假设等语句
value = pattern_redeem_type_sub.sub('', value)
result = pattern_redeem_type.search(value)
if result:
text = result.group(0)
if '不' in text or '无' in text or '没' in text:
wealth.redeem_type = '封闭式'
else:
wealth.redeem_type = '开放式'
return wealth
def extract_name(self, value: str, wealth: Wealth):
if not wealth.name:
pattern_name = re.compile(r'《([\u4e00-\u9fa5、,,::“”"+\[\]\s()()A-Za-z0-9\-]+)(风险揭示及说明书|产品说明书|协议书)》')
results = pattern_name.findall(value) # 全文搜索带书名号的文字,选择最长的作为name, 需要考虑是否合适?
if results:
word_longest = ''
for one in results:
name_raw = one[0]
name_net = name_raw.replace('结构性存款', '')
if len(name_net) > 0:
if len(name_raw) > len(word_longest):
word_longest = name_raw
wealth.name = word_longest
# print('extract_name中的wealth.name是:%s' % wealth.name)
return wealth
def extract_code(self, value: str, wealth: Wealth):
if not wealth.code:
pattern_code = re.compile(r'(产品|单元|理财|计划)[的认购]*[代码编号]{2}[为是::\s]*([A-Za-z0-9][-+A-Za-z0-9]+)')
result = pattern_code.search(value)
if result:
wealth.code = result.group(1)
return wealth
def extract_code_register(self, value: str, wealth: Wealth):
if not wealth.code_register:
pattern_code_register = re.compile(r'(登记|注册)[编码代号]+[为是::\s]*([A-Za-z0-9]{6,})')
result = pattern_code_register.search(value)
if result:
wealth.code_register = result.group(2)
return wealth
def extract_term(self, value: str, wealth: Wealth):
if not wealth.term:
pattern_term = re.compile(r'理财[计划]*期限[为是::\s]*([0-9,,]+)\s*([天日月年])')
pattern_term_sub = re.compile(r'([0-9]{2,4}年)?[0-9]{1,2}月[0-9]{1,2}日')
# 去除value单元格中XX年XX月XX日格式的文字
res = pattern_term_sub.finditer(value)
for one in res:
date = one.group(0)
value = value.replace(date, '')
# 查找数量的年、月、日
result = pattern_term.search(value)
if result:
num = result.group(1)
num = num.replace(',', '')
num = num.replace(',', '')
num = int(num)
unit = result.group(2)
if unit == '月':
num = num * 30
elif unit == '年':
num = num * 365
if num < 7301: # 设定期限的最大值不能超过20年
wealth.term = num
return wealth
def extract_term_looped(self, value: str, wealth: Wealth):
if not wealth.term_looped:
if '投资周期顺延' in value or '自动再投资' in value or '无固定期限' in value:
wealth.term_looped = 'YES'
return wealth
|
def read_csv_data(file_name, skip=1, samples=120, sep=','):
data = []
with open(file_name) as csv_file:
first = csv_file.readline()
if len(first) == 0:
return data
rows = 1
cols = len(first.split(sep))
while csv_file.readline():
rows += 1
data = list([0]*samples for i in range(cols))
print(len(data), len(data[0]))
with open(file_name) as csv_file:
for i in range(samples):
for _ in range(skip):
row = csv_file.readline().strip()
data_row = [float(item) for item in row.split(sep)]
for j in range(cols):
data[j][i] = data_row[j]
return data
|
from __future__ import division, absolute_import, print_function
from beets.dbcore import FieldQuery
from beets.plugins import BeetsPlugin
# Copied from beets.dbcore with two additions of 'not'
class NotNoneQuery(FieldQuery):
"""A query that checks whether a field is not null."""
def col_clause(self):
return self.field + " IS NOT NULL", ()
def match(self, item):
try:
return item[self.field] is not None
except KeyError:
return False
def __repr__(self):
return "{0.__class__.__name__}({0.field!r}, {0.fast})".format(self)
class OtherQueriesPlugin(BeetsPlugin):
def queries(self):
return {
'%': NotNoneQuery,
}
|
class Config:
is_new_version: bool = False
is_c611: bool = True # 金制空气c611 or ds-air b611
|
#!/usr/bin/env python3
import os
import gzip
import re
from collections import defaultdict
from pathlib import Path
# set variables from snakemake params, wildcards, input and threads
ROOT = str(snakemake.wildcards.root)
MASKS = snakemake.params.mask_ids + [32630,111789,6] # mask synthetic constructs by default
NODES = snakemake.input.nodes
INDIR = snakemake.params.indir
PREFIX = snakemake.wildcards.name
def graph_to_filenames(graph,root,masks,indir,prefix):
"""
Generate a list of filenames for the sequences of nested taxids from a root.
"""
filenames = defaultdict(list)
def descend(root):
"""
Iteratively descend from a root to generate a list of
filenames unless the child taxid is in the list of taxids to mask.
"""
print(root)
if root in graph:
print(root)
for child,rank in graph[root].items():
print(child)
if masks and int(child) in masks:
continue
taxid_file = "%s/%s/%s" %(indir,child[-2:].zfill(2),child)
if Path("%s.fa" % taxid_file).is_file():
filenames[child[-2:]].append(taxid_file)
descend(child)
return
descend(root)
return filenames
def node_graph(nodes_file):
"""
Read an NCBI nodes.dmp file and return a dict of child taxids and ranks for
each taxid with descendants.
"""
graph = defaultdict(dict)
with open(nodes_file,'r') as fh:
lines = fh.readlines()
for l in lines:
parts = re.split(r'\t\|\t',l)
tax_id = parts[0]
parent_id = parts[1]
rank = parts[2]
if parent_id == tax_id:
continue
graph[parent_id].update({tax_id:rank})
return graph
# Write a file containing a list of per-taxon sequence filenames needed to
# create a custom database containing all descendants of a specified root,
# optionally with one or more lineages masked.
graph = node_graph(NODES)
filenames = graph_to_filenames(graph,ROOT,MASKS,INDIR,PREFIX)
if len(filenames.items()) > 0:
suffix = ''
if MASKS:
suffix = ".minus.%s" % '.'.join(str(mask) for mask in MASKS)
outdir = "%s.root.%s%s/" %(PREFIX,ROOT,suffix)
os.makedirs(os.path.dirname(outdir), exist_ok=True)
dir_list = "%s.root.%s%s/list" % (PREFIX,ROOT,suffix)
with open(dir_list, 'w') as dl:
for directory,files in filenames.items():
file_list = "%s%s.list" %(outdir,directory)
with open(file_list, 'w') as fh:
fh.write('\n'.join(files))
dl.write("%s\n" % file_list)
|
# ------------------------------------------------------------------------------------------------
# Given a list of numbers and a number k, return whether any two numbers from the list add up to k #
# ------------------------------------------------------------------------------------------------
def find_k(l, k):
for i in range(0, len(l)):
if k - l[i] in l:
return True
return False
def main():
l = [10, 15, 3, 7]
k = 17
print(find_k(l, k))
if __name__ == "__main__":
main() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.